diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index df42bed09f25d..53f07fc41b966 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -142,7 +142,7 @@ Description: Raw capacitance measurement from channel Y. Units after application of scale and offset are nanofarads. -What: /sys/.../iio:deviceX/in_capacitanceY-in_capacitanceZ_raw +What: /sys/.../iio:deviceX/in_capacitanceY-capacitanceZ_raw KernelVersion: 3.2 Contact: linux-iio@vger.kernel.org Description: diff --git a/Documentation/ABI/testing/sysfs-kernel-oops_count b/Documentation/ABI/testing/sysfs-kernel-oops_count new file mode 100644 index 0000000000000..156cca9dbc960 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-oops_count @@ -0,0 +1,6 @@ +What: /sys/kernel/oops_count +Date: November 2022 +KernelVersion: 6.2.0 +Contact: Linux Kernel Hardening List +Description: + Shows how many times the system has Oopsed since last boot. diff --git a/Documentation/ABI/testing/sysfs-kernel-warn_count b/Documentation/ABI/testing/sysfs-kernel-warn_count new file mode 100644 index 0000000000000..90a029813717d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-warn_count @@ -0,0 +1,6 @@ +What: /sys/kernel/warn_count +Date: November 2022 +KernelVersion: 6.2.0 +Contact: Linux Kernel Hardening List +Description: + Shows how many times the system has Warned since last boot. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f577c29f20930..eb437d659f2c4 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2103,24 +2103,57 @@ ivrs_ioapic [HW,X86-64] Provide an override to the IOAPIC-ID<->DEVICE-ID - mapping provided in the IVRS ACPI table. For - example, to map IOAPIC-ID decimal 10 to - PCI device 00:14.0 write the parameter as: + mapping provided in the IVRS ACPI table. + By default, PCI segment is 0, and can be omitted. + + For example, to map IOAPIC-ID decimal 10 to + PCI segment 0x1 and PCI device 00:14.0, + write the parameter as: + ivrs_ioapic=10@0001:00:14.0 + + Deprecated formats: + * To map IOAPIC-ID decimal 10 to PCI device 00:14.0 + write the parameter as: ivrs_ioapic[10]=00:14.0 + * To map IOAPIC-ID decimal 10 to PCI segment 0x1 and + PCI device 00:14.0 write the parameter as: + ivrs_ioapic[10]=0001:00:14.0 ivrs_hpet [HW,X86-64] Provide an override to the HPET-ID<->DEVICE-ID - mapping provided in the IVRS ACPI table. For - example, to map HPET-ID decimal 0 to - PCI device 00:14.0 write the parameter as: + mapping provided in the IVRS ACPI table. + By default, PCI segment is 0, and can be omitted. + + For example, to map HPET-ID decimal 10 to + PCI segment 0x1 and PCI device 00:14.0, + write the parameter as: + ivrs_hpet=10@0001:00:14.0 + + Deprecated formats: + * To map HPET-ID decimal 0 to PCI device 00:14.0 + write the parameter as: ivrs_hpet[0]=00:14.0 + * To map HPET-ID decimal 10 to PCI segment 0x1 and + PCI device 00:14.0 write the parameter as: + ivrs_ioapic[10]=0001:00:14.0 ivrs_acpihid [HW,X86-64] Provide an override to the ACPI-HID:UID<->DEVICE-ID - mapping provided in the IVRS ACPI table. For - example, to map UART-HID:UID AMD0020:0 to - PCI device 00:14.5 write the parameter as: + mapping provided in the IVRS ACPI table. + By default, PCI segment is 0, and can be omitted. + + For example, to map UART-HID:UID AMD0020:0 to + PCI segment 0x1 and PCI device ID 00:14.5, + write the parameter as: + ivrs_acpihid=AMD0020:0@0001:00:14.5 + + Deprecated formats: + * To map UART-HID:UID AMD0020:0 to PCI segment is 0, + PCI device ID 00:14.5, write the parameter as: ivrs_acpihid[00:14.5]=AMD0020:0 + * To map UART-HID:UID AMD0020:0 to PCI segment 0x1 and + PCI device ID 00:14.5, write the parameter as: + ivrs_acpihid[0001:00:14.5]=AMD0020:0 js= [HW,JOY] Analog joystick See Documentation/input/joydev/joystick.rst. diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index a4b1ebc2e70b0..6b0c7b650deaa 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -663,6 +663,15 @@ This is the default behavior. an oops event is detected. +oops_limit +========== + +Number of kernel oopses after which the kernel should panic when +``panic_on_oops`` is not set. Setting this to 0 disables checking +the count. Setting this to 1 has the same effect as setting +``panic_on_oops=1``. The default value is 10000. + + osrelease, ostype & version =========================== @@ -1469,6 +1478,16 @@ entry will default to 2 instead of 0. 2 Unprivileged calls to ``bpf()`` are disabled = ============================================================= + +warn_limit +========== + +Number of kernel warnings after which the kernel should panic when +``panic_on_warn`` is not set. Setting this to 0 disables checking +the warning count. Setting this to 1 has the same effect as setting +``panic_on_warn=1``. The default value is 0. + + watchdog ======== diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index f01eed0ee23ad..4f3206495217c 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -76,10 +76,14 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A72 | #853709 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 | @@ -92,6 +96,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt index 8a9f3559335b5..7e14e26676ec9 100644 --- a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt @@ -34,8 +34,8 @@ Example: Use specific request line passing from dma For example, MMC request line is 5 - sdhci: sdhci@98e00000 { - compatible = "moxa,moxart-sdhci"; + mmc: mmc@98e00000 { + compatible = "moxa,moxart-mmc"; reg = <0x98e00000 0x5C>; interrupts = <5 0>; clocks = <&clk_apb>; diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml similarity index 85% rename from Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml rename to Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml index 399ebde454095..ff86c87309a41 100644 --- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml +++ b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml @@ -2,7 +2,7 @@ # Copyright 2019 BayLibre, SAS %YAML 1.2 --- -$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb2-phy.yaml#" +$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb2-phy.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Amlogic G12A USB2 PHY @@ -13,8 +13,8 @@ maintainers: properties: compatible: enum: - - amlogic,meson-g12a-usb2-phy - - amlogic,meson-a1-usb2-phy + - amlogic,g12a-usb2-phy + - amlogic,a1-usb2-phy reg: maxItems: 1 @@ -68,7 +68,7 @@ additionalProperties: false examples: - | phy@36000 { - compatible = "amlogic,meson-g12a-usb2-phy"; + compatible = "amlogic,g12a-usb2-phy"; reg = <0x36000 0x2000>; clocks = <&xtal>; clock-names = "xtal"; diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml similarity index 82% rename from Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml rename to Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml index 453c083cf44cb..84738644e3989 100644 --- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml +++ b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml @@ -2,7 +2,7 @@ # Copyright 2019 BayLibre, SAS %YAML 1.2 --- -$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#" +$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Amlogic G12A USB3 + PCIE Combo PHY @@ -13,7 +13,7 @@ maintainers: properties: compatible: enum: - - amlogic,meson-g12a-usb3-pcie-phy + - amlogic,g12a-usb3-pcie-phy reg: maxItems: 1 @@ -49,7 +49,7 @@ additionalProperties: false examples: - | phy@46000 { - compatible = "amlogic,meson-g12a-usb3-pcie-phy"; + compatible = "amlogic,g12a-usb3-pcie-phy"; reg = <0x46000 0x2000>; clocks = <&ref_clk>; clock-names = "ref_clk"; diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt index 5d6ea66a863fe..1f75feec3dec6 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt +++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt @@ -109,7 +109,7 @@ audio-codec@1{ reg = <1 0>; interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "intr2" - reset-gpios = <&msmgpio 64 0>; + reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>; slim-ifc-dev = <&wc9335_ifd>; clock-names = "mclk", "native"; clocks = <&rpmcc RPM_SMD_DIV_CLK1>, diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst index f64cb666498aa..f28887045049d 100644 --- a/Documentation/driver-api/spi.rst +++ b/Documentation/driver-api/spi.rst @@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as a pair of FIFOs connected to dual DMA engines on the other side of the SPI shift register (maximizing throughput). Such drivers bridge between whatever bus they sit on (often the platform bus) and SPI, and expose -the SPI side of their device as a :c:type:`struct spi_master -`. SPI devices are children of that master, +the SPI side of their device as a :c:type:`struct spi_controller +`. SPI devices are children of that master, represented as a :c:type:`struct spi_device ` and manufactured from :c:type:`struct spi_board_info ` descriptors which are usually provided by diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst index 31ecfe44e5b45..47de5006f6456 100644 --- a/Documentation/fault-injection/fault-injection.rst +++ b/Documentation/fault-injection/fault-injection.rst @@ -78,8 +78,8 @@ configuration of fault-injection capabilities. - /sys/kernel/debug/fail*/times: - specifies how many times failures may happen at most. - A value of -1 means "no limit". + specifies how many times failures may happen at most. A value of -1 + means "no limit". - /sys/kernel/debug/fail*/space: @@ -167,11 +167,13 @@ configuration of fault-injection capabilities. - ERRNO: retval must be -1 to -MAX_ERRNO (-4096). - ERR_NULL: retval must be 0 or -1 to -MAX_ERRNO (-4096). -- /sys/kernel/debug/fail_function//retval: +- /sys/kernel/debug/fail_function//retval: - specifies the "error" return value to inject to the given - function for given function. This will be created when - user specifies new injection entry. + specifies the "error" return value to inject to the given function. + This will be created when the user specifies a new injection entry. + Note that this file only accepts unsigned values. So, if you want to + use a negative errno, you better use 'printf' instead of 'echo', e.g.: + $ printf %#x -12 > retval Boot option ^^^^^^^^^^^ @@ -336,7 +338,7 @@ Application Examples FAILTYPE=fail_function FAILFUNC=open_ctree echo $FAILFUNC > /sys/kernel/debug/$FAILTYPE/inject - echo -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval + printf %#x -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval echo N > /sys/kernel/debug/$FAILTYPE/task-filter echo 100 > /sys/kernel/debug/$FAILTYPE/probability echo 0 > /sys/kernel/debug/$FAILTYPE/interval diff --git a/Documentation/input/joydev/joystick.rst b/Documentation/input/joydev/joystick.rst index 9746fd76cc581..f38c330c028e5 100644 --- a/Documentation/input/joydev/joystick.rst +++ b/Documentation/input/joydev/joystick.rst @@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. This includes: * AVB Mag Turbo Force * AVB Top Shot Pegasus * AVB Top Shot Force Feedback Racing Wheel +* Boeder Force Feedback Wheel * Logitech WingMan Force * Logitech WingMan Force Wheel * Guillemot Race Leader Force Feedback diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst index e899f14a4ba24..43da2cc2e3b9b 100644 --- a/Documentation/process/code-of-conduct-interpretation.rst +++ b/Documentation/process/code-of-conduct-interpretation.rst @@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're uncertain how to handle situations that come up. It will not be considered a violation report unless you want it to be. If you are uncertain about approaching the TAB or any other maintainers, please -reach out to our conflict mediator, Mishi Choudhary . +reach out to our conflict mediator, Joanna Lee . In the end, "be kind to each other" is really what the end goal is for everybody. We know everyone is human and we all fail at times, but the diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py index eeb394b39e2cc..8b416bfd75ac1 100644 --- a/Documentation/sphinx/load_config.py +++ b/Documentation/sphinx/load_config.py @@ -3,7 +3,7 @@ import os import sys -from sphinx.util.pycompat import execfile_ +from sphinx.util.osutil import fs_encoding # ------------------------------------------------------------------------------ def loadConfig(namespace): @@ -48,7 +48,9 @@ def loadConfig(namespace): sys.stdout.write("load additional sphinx-config: %s\n" % config_file) config = namespace.copy() config['__file__'] = config_file - execfile_(config_file, config) + with open(config_file, 'rb') as f: + code = compile(f.read(), fs_encoding, 'exec') + exec(code, config) del config['__file__'] namespace.update(config) else: diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index f99be8062bc82..a9ffc4f3ee69a 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -39,7 +39,7 @@ Documentation written by Tom Zanussi will use the event's kernel stacktrace as the key. The keywords 'keys' or 'key' can be used to specify keys, and the keywords 'values', 'vals', or 'val' can be used to specify values. Compound - keys consisting of up to two fields can be specified by the 'keys' + keys consisting of up to three fields can be specified by the 'keys' keyword. Hashing a compound key produces a unique entry in the table for each unique combination of component keys, and can be useful for providing more fine-grained summaries of event data. diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index cd8a585568045..2b4b64797191f 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6398,3 +6398,63 @@ When enabled, KVM will disable paravirtual features provided to the guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf (0x40000001). Otherwise, a guest may use the paravirtual features regardless of what has actually been exposed through the CPUID leaf. + +9. Known KVM API problems +========================= + +In some cases, KVM's API has some inconsistencies or common pitfalls +that userspace need to be aware of. This section details some of +these issues. + +Most of them are architecture specific, so the section is split by +architecture. + +9.1. x86 +-------- + +``KVM_GET_SUPPORTED_CPUID`` issues +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In general, ``KVM_GET_SUPPORTED_CPUID`` is designed so that it is possible +to take its result and pass it directly to ``KVM_SET_CPUID2``. This section +documents some cases in which that requires some care. + +Local APIC features +~~~~~~~~~~~~~~~~~~~ + +CPU[EAX=1]:ECX[21] (X2APIC) is reported by ``KVM_GET_SUPPORTED_CPUID``, +but it can only be enabled if ``KVM_CREATE_IRQCHIP`` or +``KVM_ENABLE_CAP(KVM_CAP_IRQCHIP_SPLIT)`` are used to enable in-kernel emulation of +the local APIC. + +The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature. + +CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``. +It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel +has enabled in-kernel emulation of the local APIC. + +CPU topology +~~~~~~~~~~~~ + +Several CPUID values include topology information for the host CPU: +0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems. Different +versions of KVM return different values for this information and userspace +should not rely on it. Currently they return all zeroes. + +If userspace wishes to set up a guest topology, it should be careful that +the values of these three leaves differ for each CPU. In particular, +the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX +for 0x8000001e; the latter also encodes the core id and node id in bits +7:0 of EBX and ECX respectively. + +Obsolete ioctls and capabilities +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +KVM_CAP_DISABLE_QUIRKS does not let userspace know which quirks are actually +available. Use ``KVM_CHECK_EXTENSION(KVM_CAP_DISABLE_QUIRKS2)`` instead if +available. + +Ordering of KVM_GET_*/KVM_SET_* ioctls +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +TBD diff --git a/Documentation/virt/kvm/devices/vm.rst b/Documentation/virt/kvm/devices/vm.rst index 0aa5b1cfd700c..60acc39e0e937 100644 --- a/Documentation/virt/kvm/devices/vm.rst +++ b/Documentation/virt/kvm/devices/vm.rst @@ -215,6 +215,7 @@ KVM_S390_VM_TOD_EXT). :Parameters: address of a buffer in user space to store the data (u8) to :Returns: -EFAULT if the given address is not accessible from kernel space; -EINVAL if setting the TOD clock extension to != 0 is not supported + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW ----------------------------------- @@ -224,6 +225,7 @@ the POP (u64). :Parameters: address of a buffer in user space to store the data (u64) to :Returns: -EFAULT if the given address is not accessible from kernel space + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT ----------------------------------- @@ -237,6 +239,7 @@ it, it is stored as 0 and not allowed to be set to a value != 0. (kvm_s390_vm_tod_clock) to :Returns: -EFAULT if the given address is not accessible from kernel space; -EINVAL if setting the TOD clock extension to != 0 is not supported + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 4. GROUP: KVM_S390_VM_CRYPTO ============================ diff --git a/MAINTAINERS b/MAINTAINERS index 4d10e79030a9c..6c5efc4013ab5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3001,7 +3001,7 @@ F: drivers/net/ieee802154/atusb.h AUDIT SUBSYSTEM M: Paul Moore M: Eric Paris -L: linux-audit@redhat.com (moderated for non-subscribers) +L: audit@vger.kernel.org S: Supported W: https://github.com/linux-audit T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git @@ -7280,7 +7280,7 @@ F: Documentation/locking/*futex* F: include/asm-generic/futex.h F: include/linux/futex.h F: include/uapi/linux/futex.h -F: kernel/futex.c +F: kernel/futex/* F: tools/perf/bench/futex* F: tools/testing/selftests/futex/ diff --git a/Makefile b/Makefile index 655fe095459b3..028fca7ec5cf3 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 142 +SUBLEVEL = 170 EXTRAVERSION = NAME = Dare mighty things @@ -465,6 +465,8 @@ LZ4 = lz4c XZ = xz ZSTD = zstd +PAHOLE_FLAGS = $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh) + CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) NOSTDINC_FLAGS := @@ -518,6 +520,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL +export PAHOLE_FLAGS # Files to ignore in find ... statements @@ -816,12 +819,12 @@ endif # Initialize all stack variables with a zero value. ifdef CONFIG_INIT_STACK_ALL_ZERO -# Future support for zero initialization is still being debated, see -# https://bugs.llvm.org/show_bug.cgi?id=45497. These flags are subject to being -# renamed or dropped. KBUILD_CFLAGS += -ftrivial-auto-var-init=zero +ifdef CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER +# https://github.com/llvm/llvm-project/issues/44842 KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang endif +endif DEBUG_CFLAGS := @@ -839,7 +842,9 @@ else DEBUG_CFLAGS += -g endif -ifneq ($(LLVM_IAS),1) +ifeq ($(LLVM_IAS),1) +KBUILD_AFLAGS += -g +else KBUILD_AFLAGS += -Wa,-gdwarf-2 endif @@ -1123,7 +1128,7 @@ export MODORDER := $(extmod-prefix)modules.order export MODULES_NSDEPS := $(extmod-prefix)modules.nsdeps ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ io_uring/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ @@ -1420,7 +1425,6 @@ endif PHONY += modules modules: $(if $(KBUILD_BUILTIN),vmlinux) modules_check modules_prepare - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost PHONY += modules_check modules_check: modules.order @@ -1438,12 +1442,9 @@ PHONY += modules_prepare modules_prepare: prepare $(Q)$(MAKE) $(build)=scripts scripts/module.lds -# Target to install modules -PHONY += modules_install -modules_install: _modinst_ _modinst_post - -PHONY += _modinst_ -_modinst_: +modules_install: __modinst_pre +PHONY += __modinst_pre +__modinst_pre: @rm -rf $(MODLIB)/kernel @rm -f $(MODLIB)/source @mkdir -p $(MODLIB)/kernel @@ -1455,14 +1456,6 @@ _modinst_: @sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order @cp -f modules.builtin $(MODLIB)/ @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/ - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst - -# This depmod is only for convenience to give the initial -# boot a modules.dep even before / is mounted read-write. However the -# boot script depmod is the master version. -PHONY += _modinst_post -_modinst_post: _modinst_ - $(call cmd,depmod) ifeq ($(CONFIG_MODULE_SIG), y) PHONY += modules_sign @@ -1470,20 +1463,6 @@ modules_sign: $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modsign endif -else # CONFIG_MODULES - -# Modules not configured -# --------------------------------------------------------------------------- - -PHONY += modules modules_install -modules modules_install: - @echo >&2 - @echo >&2 "The present kernel configuration has modules disabled." - @echo >&2 "Type 'make config' and enable loadable module support." - @echo >&2 "Then build a kernel with module support enabled." - @echo >&2 - @exit 1 - endif # CONFIG_MODULES ### @@ -1731,26 +1710,9 @@ KBUILD_BUILTIN := KBUILD_MODULES := 1 build-dirs := $(KBUILD_EXTMOD) -PHONY += modules -modules: $(MODORDER) - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost - $(MODORDER): descend @: -PHONY += modules_install -modules_install: _emodinst_ _emodinst_post - -install-dir := $(if $(INSTALL_MOD_DIR),$(INSTALL_MOD_DIR),extra) -PHONY += _emodinst_ -_emodinst_: - $(Q)mkdir -p $(MODLIB)/$(install-dir) - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst - -PHONY += _emodinst_post -_emodinst_post: _emodinst_ - $(call cmd,depmod) - compile_commands.json: $(extmod-prefix)compile_commands.json PHONY += compile_commands.json @@ -1773,6 +1735,41 @@ PHONY += prepare modules_prepare endif # KBUILD_EXTMOD +# --------------------------------------------------------------------------- +# Modules + +PHONY += modules modules_install + +ifdef CONFIG_MODULES + +modules: $(MODORDER) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + +quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) + cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \ + $(KERNELRELEASE) + +modules_install: + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst + $(call cmd,depmod) + +else # CONFIG_MODULES + +# Modules not configured +# --------------------------------------------------------------------------- + +modules modules_install: + @echo >&2 '***' + @echo >&2 '*** The present kernel configuration has modules disabled.' + @echo >&2 '*** To use the module feature, please run "make menuconfig" etc.' + @echo >&2 '*** to enable CONFIG_MODULES.' + @echo >&2 '***' + @exit 1 + +KBUILD_MODULES := + +endif # CONFIG_MODULES + # Single targets # --------------------------------------------------------------------------- # To build individual files in subdirectories, you can do like this: @@ -1796,18 +1793,12 @@ $(single-ko): single_modpost $(single-no-ko): descend @: -ifeq ($(KBUILD_EXTMOD),) -# For the single build of in-tree modules, use a temporary file to avoid -# the situation of modules_install installing an invalid modules.order. -MODORDER := .modules.tmp -endif - +# Remove MODORDER when done because it is not the real one. PHONY += single_modpost single_modpost: $(single-no-ko) modules_prepare $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER) $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost - -KBUILD_MODULES := 1 + $(Q)rm -f $(MODORDER) export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko)) @@ -1817,10 +1808,6 @@ build-dirs := $(foreach d, $(build-dirs), \ endif -ifndef CONFIG_MODULES -KBUILD_MODULES := -endif - # Handle descending into subdirectories listed in $(build-dirs) # Preset locale variables to speed up the build process. Limit locale # tweaks to this spot to avoid wrong language settings when running @@ -1960,11 +1947,6 @@ tools/%: FORCE quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))) cmd_rmfiles = rm -rf $(rm-files) -# Run depmod only if we have System.map and depmod is executable -quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) - cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \ - $(KERNELRELEASE) - # read saved command lines for existing targets existing-targets := $(wildcard $(sort $(targets))) diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 807d7b9a18604..0ce1eee0924b1 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -62,6 +62,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SYSCALL_AUDIT 4 /* syscall audit active */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 14 /* idle is polling for TIF_NEED_RESCHED */ @@ -71,11 +72,12 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define _TIF_NEED_RESCHED (1<pcb.ksp = (unsigned long) childstack; childti->pcb.flags = 1; /* set FEN, clear everything else */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(childstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c index 1b1d5963ac550..48ffbfbd06240 100644 --- a/arch/alpha/kernel/rtc.c +++ b/arch/alpha/kernel/rtc.c @@ -80,7 +80,12 @@ init_rtc_epoch(void) static int alpha_rtc_read_time(struct device *dev, struct rtc_time *tm) { - mc146818_get_time(tm); + int ret = mc146818_get_time(tm); + + if (ret < 0) { + dev_err_ratelimited(dev, "unable to read current time\n"); + return ret; + } /* Adjust for non-default epochs. It's easier to depend on the generic __get_rtc_time and adjust the epoch here than create diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 3739efce1ec02..948b89789da82 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -527,7 +527,7 @@ do_work_pending(struct pt_regs *regs, unsigned long thread_flags, schedule(); } else { local_irq_enable(); - if (thread_flags & _TIF_SIGPENDING) { + if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) { do_signal(regs, r0, r19); r0 = 0; } else { diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 921d4b6e4d956..8b0f81a58b948 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -192,7 +192,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) local_irq_enable(); while (1); } - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } #ifndef CONFIG_MATHEMU @@ -577,7 +577,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", pc, va, opcode, reg); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); got_exception: /* Ok, we caught the exception, but we don't want it. Is there @@ -632,7 +632,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, local_irq_enable(); while (1); } - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 09172f017efc0..5d42f94887daf 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -204,7 +204,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); - do_exit(SIGKILL); + make_task_dead(SIGKILL); /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 8f777d6441a5d..80347382a3800 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -32,7 +32,7 @@ static inline void ioport_unmap(void __iomem *addr) { } -extern void iounmap(const void __iomem *addr); +extern void iounmap(const volatile void __iomem *addr); /* * io{read,write}{16,32}be() macros diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index f9eef0e8f0b77..c0942c24d4015 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -79,6 +79,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_SYSCALL_TRACE 15 /* syscall trace active */ /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -89,11 +90,12 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) #define _TIF_SIGPENDING (1<flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(c_regs, 0, sizeof(struct pt_regs)); c_callee->r13 = kthread_arg; diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 9d5996e014c01..4868bdebf586d 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -405,7 +405,7 @@ void do_signal(struct pt_regs *regs) restart_scall = in_syscall(regs) && syscall_restartable(regs); - if (get_signal(&ksig)) { + if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) { if (restart_scall) { arc_restart_syscall(&ksig.ka, regs); syscall_wont_restart(regs); /* No more restarts */ diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index 95c649fbc95af..d3b1ea16e9cd3 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -93,7 +93,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, EXPORT_SYMBOL(ioremap_prot); -void iounmap(const void __iomem *addr) +void iounmap(const volatile void __iomem *addr) { /* weird double cast to handle phys_addr_t > 32 bits */ if (arc_uncached_addr_space((phys_addr_t)(u32)addr)) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b587ecc6f9493..985ab0b091a6a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1791,7 +1791,6 @@ config CMDLINE choice prompt "Kernel command line type" if CMDLINE != "" default CMDLINE_FROM_BOOTLOADER - depends on ATAGS config CMDLINE_FROM_BOOTLOADER bool "Use bootloader kernel arguments if available" diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi index 6c547c83e5ddf..fc465f0d7e18b 100644 --- a/arch/arm/boot/dts/am335x-pcm-953.dtsi +++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi @@ -12,22 +12,20 @@ compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx"; /* Power */ - regulators { - vcc3v3: fixedregulator@1 { - compatible = "regulator-fixed"; - regulator-name = "vcc3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; - }; + vcc3v3: fixedregulator1 { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + }; - vcc1v8: fixedregulator@2 { - compatible = "regulator-fixed"; - regulator-name = "vcc1v8"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-boot-on; - }; + vcc1v8: fixedregulator2 { + compatible = "regulator-fixed"; + regulator-name = "vcc1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; }; /* User IO */ diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 29fafb67cfaad..0d36e9dd14a45 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -1352,8 +1352,7 @@ mmc1: mmc@0 { compatible = "ti,am335-sdhci"; ti,needs-special-reset; - dmas = <&edma_xbar 24 0 0 - &edma_xbar 25 0 0>; + dmas = <&edma 24 0>, <&edma 25 0>; dma-names = "tx", "rx"; interrupts = <64>; reg = <0x0 0x1000>; diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 46e6d3ed8f35a..c042c416a94a3 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -74,7 +74,7 @@ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi index 9805e507c695c..d117fc4ae6d93 100644 --- a/arch/arm/boot/dts/armada-375.dtsi +++ b/arch/arm/boot/dts/armada-375.dtsi @@ -582,7 +582,7 @@ pcie1: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi index cff1269f3fbfd..7146cc8f082af 100644 --- a/arch/arm/boot/dts/armada-380.dtsi +++ b/arch/arm/boot/dts/armada-380.dtsi @@ -79,7 +79,7 @@ /* x1 port */ pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -98,7 +98,7 @@ /* x1 port */ pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts index fde4c302f08ec..320c759b4090f 100644 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts @@ -22,6 +22,12 @@ stdout-path = &uart0; }; + aliases { + ethernet0 = ð0; + ethernet1 = ð1; + ethernet2 = ð2; + }; + memory { device_type = "memory"; reg = <0x00000000 0x40000000>; /* 1024 MB */ @@ -291,7 +297,17 @@ }; }; - /* port 6 is connected to eth0 */ + ports@6 { + reg = <6>; + label = "cpu"; + ethernet = <ð0>; + phy-mode = "rgmii-id"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; }; }; }; @@ -307,7 +323,7 @@ marvell,function = "spi0"; }; - spi0cs1_pins: spi0cs1-pins { + spi0cs2_pins: spi0cs2-pins { marvell,pins = "mpp26"; marvell,function = "spi0"; }; @@ -342,7 +358,7 @@ }; }; - /* MISO, MOSI, SCLK and CS1 are routed to pin header CN11 */ + /* MISO, MOSI, SCLK and CS2 are routed to pin header CN11 */ }; &uart0 { diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi index f0022d10c7159..f081f7cb66e5f 100644 --- a/arch/arm/boot/dts/armada-385.dtsi +++ b/arch/arm/boot/dts/armada-385.dtsi @@ -84,7 +84,7 @@ /* x1 port */ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -103,7 +103,7 @@ /* x1 port */ pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -125,7 +125,7 @@ */ pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi index e0b7c20998312..9525e7b7f4360 100644 --- a/arch/arm/boot/dts/armada-39x.dtsi +++ b/arch/arm/boot/dts/armada-39x.dtsi @@ -453,7 +453,7 @@ /* x1 port */ pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -472,7 +472,7 @@ /* x1 port */ pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -494,7 +494,7 @@ */ pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi index 8558bf6bb54c6..d55fe162fc7f0 100644 --- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi @@ -97,7 +97,7 @@ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -115,7 +115,7 @@ pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -133,7 +133,7 @@ pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -151,7 +151,7 @@ pcie5: pcie@5,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; reg = <0x2800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 2d85fe8ac3272..fdcc818199401 100644 --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -112,7 +112,7 @@ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -130,7 +130,7 @@ pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -148,7 +148,7 @@ pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -166,7 +166,7 @@ pcie5: pcie@5,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; reg = <0x2800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -184,7 +184,7 @@ pcie6: pcie@6,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x84000 0 0x2000>; + assigned-addresses = <0x82003000 0 0x84000 0 0x2000>; reg = <0x3000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -202,7 +202,7 @@ pcie7: pcie@7,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x88000 0 0x2000>; + assigned-addresses = <0x82003800 0 0x88000 0 0x2000>; reg = <0x3800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -220,7 +220,7 @@ pcie8: pcie@8,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>; + assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>; reg = <0x4000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -238,7 +238,7 @@ pcie9: pcie@9,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; + assigned-addresses = <0x82004800 0 0x42000 0 0x2000>; reg = <0x4800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi index a06700e53e4c3..9c8b3eb49ea30 100644 --- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi +++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi @@ -62,8 +62,8 @@ regulators { vdd_3v3: VDD_IO { regulator-name = "VDD_IO"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -81,8 +81,8 @@ vddio_ddr: VDD_DDR { regulator-name = "VDD_DDR"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1850000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -104,8 +104,8 @@ vdd_core: VDD_CORE { regulator-name = "VDD_CORE"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1850000>; + regulator-min-microvolt = <1250000>; + regulator-max-microvolt = <1250000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -146,8 +146,8 @@ LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-always-on; regulator-state-standby { @@ -161,9 +161,8 @@ LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; - regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; regulator-state-standby { regulator-on-in-suspend; diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts index 634411d13b4aa..00b9e88ff5451 100644 --- a/arch/arm/boot/dts/at91-sama5d2_icp.dts +++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts @@ -195,8 +195,8 @@ regulators { vdd_io_reg: VDD_IO { regulator-name = "VDD_IO"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -214,8 +214,8 @@ VDD_DDR { regulator-name = "VDD_DDR"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1850000>; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -233,8 +233,8 @@ VDD_CORE { regulator-name = "VDD_CORE"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1850000>; + regulator-min-microvolt = <1250000>; + regulator-max-microvolt = <1250000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -256,7 +256,6 @@ regulator-max-microvolt = <1850000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; - regulator-always-on; regulator-state-standby { regulator-on-in-suspend; @@ -271,8 +270,8 @@ LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; regulator-always-on; regulator-state-standby { @@ -286,8 +285,8 @@ LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-always-on; regulator-state-standby { diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi index d1181ead18e5a..21344fbc89e5e 100644 --- a/arch/arm/boot/dts/at91rm9200.dtsi +++ b/arch/arm/boot/dts/at91rm9200.dtsi @@ -660,7 +660,7 @@ compatible = "atmel,at91rm9200-udc"; reg = <0xfffb0000 0x4000>; interrupts = <11 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 2>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 1>; clock-names = "pclk", "hclk"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index ca03685f0f086..4783e657b4cb6 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -39,6 +39,13 @@ }; + usb1 { + pinctrl_usb1_vbus_gpio: usb1_vbus_gpio { + atmel,pins = + ; /* PC5 GPIO */ + }; + }; + mmc0_slot1 { pinctrl_board_mmc0_slot1: mmc0_slot1-board { atmel,pins = @@ -84,6 +91,8 @@ }; usb1: gadget@fffa4000 { + pinctrl-0 = <&pinctrl_usb1_vbus_gpio>; + pinctrl-names = "default"; atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 89e0bdaf3a85f..726d353eda686 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -129,7 +129,7 @@ pcie1: pcie@2 { device_type = "pci"; status = "disabled"; - assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; reg = <0x1000 0 0 0 0>; clocks = <&gate_clk 5>; marvell,pcie-port = <1>; diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi index 06450066b1787..255a13666edcd 100644 --- a/arch/arm/boot/dts/exynos4412-midas.dtsi +++ b/arch/arm/boot/dts/exynos4412-midas.dtsi @@ -588,7 +588,7 @@ clocks = <&camera 1>; clock-names = "extclk"; samsung,camclk-out = <1>; - gpios = <&gpm1 6 GPIO_ACTIVE_HIGH>; + gpios = <&gpm1 6 GPIO_ACTIVE_LOW>; port { is_s5k6a3_ep: endpoint { diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts index c2e793b69e7d9..e2d76ea4404e8 100644 --- a/arch/arm/boot/dts/exynos4412-origen.dts +++ b/arch/arm/boot/dts/exynos4412-origen.dts @@ -95,7 +95,7 @@ }; &ehci { - samsung,vbus-gpio = <&gpx3 5 1>; + samsung,vbus-gpio = <&gpx3 5 GPIO_ACTIVE_HIGH>; status = "okay"; phys = <&exynos_usbphy 2>, <&exynos_usbphy 3>; phy-names = "hsic0", "hsic1"; diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 7e2b0f198dfad..1053b7c584d81 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts @@ -129,7 +129,7 @@ pinctrl-0 = <&spi2_pins_a>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "sst,sst25vf016b", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index f3bddc5ada4b8..13acdc7916b9b 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts @@ -33,7 +33,7 @@ pinctrl-0 = <&spi2_pins_a>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "m25p80", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index 43be7a6a769bc..90928db0df701 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts @@ -51,7 +51,7 @@ pinctrl-0 = <&spi2_pins_a>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "everspin,mr25h256", "mr25h256"; diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index 006fbd7f54322..54e39db447c4d 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts @@ -487,7 +487,7 @@ scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; status = "okay"; - i2c-switch@70 { + i2c-mux@70 { compatible = "nxp,pca9547"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/imx6dl-rex-basic.dts b/arch/arm/boot/dts/imx6dl-rex-basic.dts index 0f1616bfa9a80..b72f8ea1e6f6c 100644 --- a/arch/arm/boot/dts/imx6dl-rex-basic.dts +++ b/arch/arm/boot/dts/imx6dl-rex-basic.dts @@ -19,7 +19,7 @@ }; &ecspi3 { - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,sst25vf016b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi index fdd81fdc3f357..cd3183c36488a 100644 --- a/arch/arm/boot/dts/imx6dl.dtsi +++ b/arch/arm/boot/dts/imx6dl.dtsi @@ -84,6 +84,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi index e4578ed3371ef..133991ca8c633 100644 --- a/arch/arm/boot/dts/imx6q-ba16.dtsi +++ b/arch/arm/boot/dts/imx6q-ba16.dtsi @@ -139,7 +139,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: n25q032@0 { + flash: flash@0 { compatible = "jedec,spi-nor"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi index 2a98cc657595f..66be04299cbf8 100644 --- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi +++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi @@ -160,7 +160,7 @@ pinctrl-0 = <&pinctrl_ecspi5>; status = "okay"; - m25_eeprom: m25p80@0 { + m25_eeprom: flash@0 { compatible = "atmel,at25"; spi-max-frequency = <10000000>; size = <0x8000>; diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts index bfb530f29d9de..1ad41c944b4b9 100644 --- a/arch/arm/boot/dts/imx6q-cm-fx6.dts +++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts @@ -260,7 +260,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - m25p80@0 { + flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "st,m25p", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts index fa2307d8ce861..4dee1b22d5c17 100644 --- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts +++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts @@ -102,7 +102,7 @@ cs-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "m25p80", "jedec,spi-nor"; spi-max-frequency = <40000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6q-dms-ba16.dts b/arch/arm/boot/dts/imx6q-dms-ba16.dts index 48fb47e715f6d..137db38f0d27b 100644 --- a/arch/arm/boot/dts/imx6q-dms-ba16.dts +++ b/arch/arm/boot/dts/imx6q-dms-ba16.dts @@ -47,7 +47,7 @@ pinctrl-0 = <&pinctrl_ecspi5>; status = "okay"; - m25_eeprom: m25p80@0 { + m25_eeprom: flash@0 { compatible = "atmel,at25256B", "atmel,at25"; spi-max-frequency = <20000000>; size = <0x8000>; diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts index 4cde45d5c90c8..e894faba571f9 100644 --- a/arch/arm/boot/dts/imx6q-gw5400-a.dts +++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts @@ -137,7 +137,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,w25q256", "jedec,spi-nor"; spi-max-frequency = <30000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts index 05ee283882290..cc18010023942 100644 --- a/arch/arm/boot/dts/imx6q-marsboard.dts +++ b/arch/arm/boot/dts/imx6q-marsboard.dts @@ -100,7 +100,7 @@ cs-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; status = "okay"; - m25p80@0 { + flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6q-prti6q.dts b/arch/arm/boot/dts/imx6q-prti6q.dts index b4605edfd2ab8..d8fa83effd638 100644 --- a/arch/arm/boot/dts/imx6q-prti6q.dts +++ b/arch/arm/boot/dts/imx6q-prti6q.dts @@ -364,8 +364,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_wifi>; interrupts-extended = <&gpio1 30 IRQ_TYPE_LEVEL_HIGH>; - ref-clock-frequency = "38400000"; - tcxo-clock-frequency = "19200000"; + ref-clock-frequency = <38400000>; + tcxo-clock-frequency = <19200000>; }; }; diff --git a/arch/arm/boot/dts/imx6q-rex-pro.dts b/arch/arm/boot/dts/imx6q-rex-pro.dts index 1767e1a3cd53a..271f4b2d9b9f0 100644 --- a/arch/arm/boot/dts/imx6q-rex-pro.dts +++ b/arch/arm/boot/dts/imx6q-rex-pro.dts @@ -19,7 +19,7 @@ }; &ecspi3 { - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,sst25vf032b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi index 5277e39032912..afec1677e6baf 100644 --- a/arch/arm/boot/dts/imx6q.dtsi +++ b/arch/arm/boot/dts/imx6q.dtsi @@ -163,6 +163,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x40000>; + ranges = <0 0x00900000 0x40000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi index e21f6ac864e54..baa197c90060e 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi @@ -96,7 +96,7 @@ pinctrl-0 = <&pinctrl_ecspi4>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q128a11", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi index ead7ba27e1053..ff8cb47fb9fdb 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi @@ -131,7 +131,7 @@ pinctrl-0 = <&pinctrl_ecspi4>; status = "okay"; - flash: m25p80@1 { + flash: flash@1 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q128a11", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi index 648f5fcb72e65..2c1d6f28e6950 100644 --- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi +++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi @@ -35,7 +35,7 @@ pinctrl-0 = <&pinctrl_ecspi3>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "sst,sst25vf040b", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi index 093a219a77ae1..f520e337698af 100644 --- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi @@ -634,7 +634,6 @@ &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; - uart-has-rtscts; rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi index ed4e222599594..852601d5ab6b8 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi @@ -31,7 +31,7 @@ user-pb { label = "user_pb"; - gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; + gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; linux,code = ; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi index 4cd7d290f5b28..7a2628fdd1424 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi @@ -28,7 +28,7 @@ user-pb { label = "user_pb"; - gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; + gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; linux,code = ; }; diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi index 92f9977d14822..37d94aa45a8b7 100644 --- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi @@ -51,16 +51,6 @@ vin-supply = <®_3p3v_s5>; }; - reg_3p3v_s0: regulator-3p3v-s0 { - compatible = "regulator-fixed"; - regulator-name = "V_3V3_S0"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-always-on; - regulator-boot-on; - vin-supply = <®_3p3v_s5>; - }; - reg_3p3v_s5: regulator-3p3v-s5 { compatible = "regulator-fixed"; regulator-name = "V_3V3_S5"; @@ -258,8 +248,8 @@ status = "okay"; /* default boot source: workaround #1 for errata ERR006282 */ - smarc_flash: spi-flash@0 { - compatible = "winbond,w25q16dw", "jedec,spi-nor"; + smarc_flash: flash@0 { + compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <20000000>; }; diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi index d526f01a2c520..b7e74d859a962 100644 --- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi @@ -179,7 +179,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi index a0917823c244f..a88323ac6c696 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi @@ -321,7 +321,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi index 92d09a3ebe0ee..ee7e2371f94bd 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi @@ -252,7 +252,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi index 1243677b5f977..5adeb7aed2204 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi @@ -237,7 +237,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,sst25vf016b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index afe477f329846..17535bf12516d 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi @@ -272,7 +272,7 @@ pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1_cs>; status = "disabled"; /* pin conflict with WEIM NOR */ - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "st,m25p32", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi index fdc3aa9d544d3..0aa1a0a28de0c 100644 --- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi @@ -313,7 +313,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,sst25vf016b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index f824c9abd11a3..758c62fb9cac1 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -194,7 +194,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "st,m25p32", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi index b310f13a53f22..4d23c92aa8a6b 100644 --- a/arch/arm/boot/dts/imx6qp.dtsi +++ b/arch/arm/boot/dts/imx6qp.dtsi @@ -9,12 +9,18 @@ ocram2: sram@940000 { compatible = "mmio-sram"; reg = <0x00940000 0x20000>; + ranges = <0 0x00940000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; ocram3: sram@960000 { compatible = "mmio-sram"; reg = <0x00960000 0x20000>; + ranges = <0 0x00960000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 25f6f2fb1555e..f16c830f1e918 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -137,7 +137,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "st,m25p32", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 91a8c54d5e113..c184a6d5bc420 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -114,6 +114,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6SL_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index 0b622201a1f3d..bf5b262b91f91 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -115,6 +115,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; }; intc: interrupt-controller@a01000 { diff --git a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts index 66af78e83b701..a2c79bcf9a11c 100644 --- a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts +++ b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts @@ -107,7 +107,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6sx-sdb-reva.dts b/arch/arm/boot/dts/imx6sx-sdb-reva.dts index dce5dcf96c255..7dda42553f4bc 100644 --- a/arch/arm/boot/dts/imx6sx-sdb-reva.dts +++ b/arch/arm/boot/dts/imx6sx-sdb-reva.dts @@ -123,7 +123,7 @@ pinctrl-0 = <&pinctrl_qspi2>; status = "okay"; - flash0: s25fl128s@0 { + flash0: flash@0 { reg = <0>; #address-cells = <1>; #size-cells = <1>; @@ -133,7 +133,7 @@ spi-tx-bus-width = <4>; }; - flash1: s25fl128s@2 { + flash1: flash@2 { reg = <2>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 5a63ca6157229..1b808563a536a 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -108,7 +108,7 @@ pinctrl-0 = <&pinctrl_qspi2>; status = "okay"; - flash0: n25q256a@0 { + flash0: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q256a", "jedec,spi-nor"; @@ -118,7 +118,7 @@ reg = <0>; }; - flash1: n25q256a@2 { + flash1: flash@2 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q256a", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index dfdca1804f9f8..c399919943c34 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -161,12 +161,18 @@ ocram_s: sram@8f8000 { compatible = "mmio-sram"; reg = <0x008f8000 0x4000>; + ranges = <0 0x008f8000 0x4000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6SX_CLK_OCRAM_S>; }; ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6SX_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi index 64c2d1e9f7fce..71d3c7e05e08f 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi @@ -239,7 +239,7 @@ pinctrl-0 = <&pinctrl_qspi>; status = "okay"; - flash0: n25q256a@0 { + flash0: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q256a", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi index 47d3ce5d255fa..acd936540d898 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi +++ b/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi @@ -19,7 +19,7 @@ }; &qspi { - spi-flash@0 { + flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "spi-nand"; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi index a095a7654ac65..29ed38dce5802 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi +++ b/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi @@ -18,7 +18,7 @@ }; &qspi { - spi-flash@0 { + flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "spi-nand"; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi index 2a449a3c1ae27..09a83dbdf6510 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi +++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi @@ -19,7 +19,7 @@ pinctrl-0 = <&pinctrl_ecspi2>; status = "okay"; - spi-flash@0 { + flash@0 { compatible = "mxicy,mx25v8035f", "jedec,spi-nor"; spi-max-frequency = <50000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts index 162dc259edc8c..5a74c7f68eb62 100644 --- a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts +++ b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts @@ -32,7 +32,7 @@ }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi b/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi index b7e984284e1ad..d000606c07049 100644 --- a/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi +++ b/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi @@ -18,7 +18,7 @@ }; &qspi { - spi-flash@0 { + flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "spi-nand"; diff --git a/arch/arm/boot/dts/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/imx7d-pico-dwarf.dts index 5162fe227d1ea..fdc10563f1473 100644 --- a/arch/arm/boot/dts/imx7d-pico-dwarf.dts +++ b/arch/arm/boot/dts/imx7d-pico-dwarf.dts @@ -32,7 +32,7 @@ }; &i2c1 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; @@ -52,7 +52,7 @@ }; &i2c4 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; diff --git a/arch/arm/boot/dts/imx7d-pico-nymph.dts b/arch/arm/boot/dts/imx7d-pico-nymph.dts index 104a85254adbb..5afb1674e0125 100644 --- a/arch/arm/boot/dts/imx7d-pico-nymph.dts +++ b/arch/arm/boot/dts/imx7d-pico-nymph.dts @@ -43,7 +43,7 @@ }; &i2c1 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; @@ -64,7 +64,7 @@ }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 6823b9f1a2a32..6d562ebe90295 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -199,12 +199,7 @@ interrupt-parent = <&gpio2>; interrupts = <29 0>; pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>; - ti,x-min = /bits/ 16 <0>; - ti,x-max = /bits/ 16 <0>; - ti,y-min = /bits/ 16 <0>; - ti,y-max = /bits/ 16 <0>; - ti,pressure-max = /bits/ 16 <0>; - ti,x-plate-ohms = /bits/ 16 <400>; + touchscreen-max-pressure = <255>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts index 67d1f9b24a52f..8600c0548525e 100644 --- a/arch/arm/boot/dts/integratorap.dts +++ b/arch/arm/boot/dts/integratorap.dts @@ -153,6 +153,7 @@ pci: pciv3@62000000 { compatible = "arm,integrator-ap-pci", "v3,v360epc-pci"; + device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi index 7b151acb99846..88b70ba1c8fee 100644 --- a/arch/arm/boot/dts/kirkwood-lsxl.dtsi +++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi @@ -10,6 +10,11 @@ ocp@f1000000 { pinctrl: pin-controller@10000 { + /* Non-default UART pins */ + pmx_uart0: pmx-uart0 { + marvell,pins = "mpp4", "mpp5"; + }; + pmx_power_hdd: pmx-power-hdd { marvell,pins = "mpp10"; marvell,function = "gpo"; @@ -213,22 +218,11 @@ &mdio { status = "okay"; - ethphy0: ethernet-phy@0 { - reg = <0>; - }; - ethphy1: ethernet-phy@8 { reg = <8>; }; }; -ð0 { - status = "okay"; - ethernet0-port@0 { - phy-handle = <ðphy0>; - }; -}; - ð1 { status = "okay"; ethernet1-port@0 { diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts index eb5291b0ee3aa..e07b807b4cec5 100644 --- a/arch/arm/boot/dts/moxart-uc7112lx.dts +++ b/arch/arm/boot/dts/moxart-uc7112lx.dts @@ -79,7 +79,7 @@ clocks = <&ref12>; }; -&sdhci { +&mmc { status = "okay"; }; diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi index f5f070a874823..764832ddfa78a 100644 --- a/arch/arm/boot/dts/moxart.dtsi +++ b/arch/arm/boot/dts/moxart.dtsi @@ -93,8 +93,8 @@ clock-names = "PCLK"; }; - sdhci: sdhci@98e00000 { - compatible = "moxa,moxart-sdhci"; + mmc: mmc@98e00000 { + compatible = "moxa,moxart-mmc"; reg = <0x98e00000 0x5C>; interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk_apb>; diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index 72c4a9fc41a20..fb25ede1ce9f9 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -1571,7 +1571,7 @@ }; etb@1a01000 { - compatible = "coresight-etb10", "arm,primecell"; + compatible = "arm,coresight-etb10", "arm,primecell"; reg = <0x1a01000 0x1000>; clocks = <&rpmcc RPM_QDSS_CLK>; diff --git a/arch/arm/boot/dts/rk3036-evb.dts b/arch/arm/boot/dts/rk3036-evb.dts index 2a7e6624efb93..ea23ba98625e7 100644 --- a/arch/arm/boot/dts/rk3036-evb.dts +++ b/arch/arm/boot/dts/rk3036-evb.dts @@ -31,7 +31,7 @@ &i2c1 { status = "okay"; - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts index b0fef82c0a71b..39b913f8d7018 100644 --- a/arch/arm/boot/dts/rk3188-radxarock.dts +++ b/arch/arm/boot/dts/rk3188-radxarock.dts @@ -67,7 +67,7 @@ #sound-dai-cells = <0>; }; - ir_recv: gpio-ir-receiver { + ir_recv: ir-receiver { compatible = "gpio-ir-receiver"; gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index b6bde9d12c2be..ddf23748ead4c 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi @@ -402,7 +402,7 @@ rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>; }; - lcdc1_rgb24: ldcd1-rgb24 { + lcdc1_rgb24: lcdc1-rgb24 { rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>, <2 RK_PA1 1 &pcfg_pull_none>, <2 RK_PA2 1 &pcfg_pull_none>, @@ -630,7 +630,6 @@ &global_timer { interrupts = ; - status = "disabled"; }; &local_timer { diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts index be695b8c1f672..8a635c2431274 100644 --- a/arch/arm/boot/dts/rk3288-evb-act8846.dts +++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts @@ -54,7 +54,7 @@ vin-supply = <&vcc_sys>; }; - hym8563@51 { + rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi index 7fb582302b326..c560afe3af780 100644 --- a/arch/arm/boot/dts/rk3288-firefly.dtsi +++ b/arch/arm/boot/dts/rk3288-firefly.dtsi @@ -233,7 +233,7 @@ vin-supply = <&vcc_sys>; }; - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts index cf54d5ffff2f9..fe265a834e8ea 100644 --- a/arch/arm/boot/dts/rk3288-miqi.dts +++ b/arch/arm/boot/dts/rk3288-miqi.dts @@ -157,7 +157,7 @@ vin-supply = <&vcc_sys>; }; - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts index c4d1d142d8c68..d5ef99ebbddce 100644 --- a/arch/arm/boot/dts/rk3288-rock2-square.dts +++ b/arch/arm/boot/dts/rk3288-rock2-square.dts @@ -165,7 +165,7 @@ }; &i2c0 { - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi index 859a7477909f1..5edc46a5585cb 100644 --- a/arch/arm/boot/dts/rk3xxx.dtsi +++ b/arch/arm/boot/dts/rk3xxx.dtsi @@ -111,6 +111,13 @@ reg = <0x1013c200 0x20>; interrupts = ; clocks = <&cru CORE_PERI>; + status = "disabled"; + /* The clock source and the sched_clock provided by the arm_global_timer + * on Rockchip rk3066a/rk3188 are quite unstable because their rates + * depend on the CPU frequency. + * Keep the arm_global_timer disabled in order to have the + * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default. + */ }; local_timer: local-timer@1013c600 { diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi index ec45ced3cde68..e1e0dec8cc1f2 100644 --- a/arch/arm/boot/dts/sam9x60.dtsi +++ b/arch/arm/boot/dts/sam9x60.dtsi @@ -567,7 +567,7 @@ mpddrc: mpddrc@ffffe800 { compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc"; reg = <0xffffe800 0x200>; - clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>; + clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>; clock-names = "ddrck", "mpddr"; }; diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index fd41243a0b2c0..9d5a04a46b14e 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi @@ -47,7 +47,7 @@ compatible = "arm,pl110", "arm,primecell"; reg = <0xfc200000 0x1000>; interrupt-parent = <&vic1>; - interrupts = <12>; + interrupts = <13>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts index 2e3c9fbb4eb36..275167f26fd9d 100644 --- a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts +++ b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts @@ -13,7 +13,6 @@ /dts-v1/; #include "stm32mp157.dtsi" -#include "stm32mp15xc.dtsi" #include "stm32mp15xx-dhcor-som.dtsi" #include "stm32mp15xx-dhcor-avenger96.dtsi" diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi index f3e0c790a4b19..723b39bb2129c 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi @@ -100,7 +100,7 @@ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>; + gpio = <&gpioz 3 GPIO_ACTIVE_HIGH>; enable-active-high; }; }; diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts index 6f1e0f0d4f0ae..073f5d196ca9b 100644 --- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts +++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts @@ -345,7 +345,7 @@ }; &i2c2 { - tca9548@70 { + i2c-mux@70 { compatible = "nxp,pca9548"; pinctrl-0 = <&pinctrl_i2c_mux_reset>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts index de79dcfd32e62..ba2001f373158 100644 --- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts +++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts @@ -340,7 +340,7 @@ }; &i2c2 { - tca9548@70 { + i2c-mux@70 { compatible = "nxp,pca9548"; pinctrl-0 = <&pinctrl_i2c_mux_reset>; pinctrl-names = "default"; diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index fe87397c3d8c6..bdbc1e590891e 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_arch_fetch_caller_regs(regs, __ip) { \ (regs)->ARM_pc = (__ip); \ - (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \ + frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \ (regs)->ARM_sp = current_stack_pointer; \ (regs)->ARM_cpsr = SVC_MODE; \ } diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index d16aba48fa0a4..090011394477f 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -44,12 +44,6 @@ typedef pte_t *pte_addr_t; -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -#define ZERO_PAGE(vaddr) (virt_to_page(0)) - /* * Mark the prot value as uncacheable and unbufferable. */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index c02f24400369b..d38d503493cb2 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -10,6 +10,15 @@ #include #include +#ifndef __ASSEMBLY__ +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern struct page *empty_zero_page; +#define ZERO_PAGE(vaddr) (empty_zero_page) +#endif + #ifndef CONFIG_MMU #include @@ -156,13 +165,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define __S111 __PAGE_SHARED_EXEC #ifndef __ASSEMBLY__ -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern struct page *empty_zero_page; -#define ZERO_PAGE(vaddr) (empty_zero_page) - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 536b6b979f634..fcccf35f5cf96 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -126,19 +126,23 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, * thread information flags: * TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED + * + * Any bit in the range of 0..15 will cause do_work_pending() to be invoked. */ #define TIF_SIGPENDING 0 /* signal pending */ #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_UPROBE 3 /* breakpointed or singlestepping */ -#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ -#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ +#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ -#define TIF_RESTORE_SIGMASK 20 +#define TIF_RESTORE_SIGMASK 19 +#define TIF_SYSCALL_TRACE 20 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */ +#define TIF_SECCOMP 23 /* seccomp syscall filtering active */ + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -148,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) /* Checks for any syscall work in entry-common.S */ @@ -158,7 +163,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, * Change these and you break ASM code in entry-common.S */ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE) + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NOTIFY_SIGNAL) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index bd619da73c84e..9b3c737575e91 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -53,7 +53,7 @@ __ret_fast_syscall: cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + movs r1, r1, lsl #16 bne fast_work_pending @@ -90,7 +90,7 @@ __ret_fast_syscall: cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + movs r1, r1, lsl #16 beq no_work_pending UNWIND(.fnend ) ENDPROC(ret_fast_syscall) @@ -131,7 +131,7 @@ ENTRY(ret_to_user_from_irq) cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] - tst r1, #_TIF_WORK_MASK + movs r1, r1, lsl #16 bne slow_work_pending no_work_pending: asm_trace_hardirqs_on save = 0 diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index de1f20624be15..d0e898608d303 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -59,7 +59,7 @@ __irq_entry: get_thread_info tsk ldr r2, [tsk, #TI_FLAGS] - tst r2, #_TIF_WORK_MASK + movs r2, r2, lsl #16 beq 2f @ no work pending mov r0, #V7M_SCB_ICSR_PENDSVSET str r0, [r1, V7M_SCB_ICSR] @ raise PendSV diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 9f199b1e83839..2647e48c537e6 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -243,7 +243,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, thread->cpu_domain = get_domain(); #endif - if (likely(!(p->flags & PF_KTHREAD))) { + if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; if (stack_start) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 2f81d3af5f9af..a3a38d0a4c853 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -655,7 +655,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); - if (thread_flags & _TIF_SIGPENDING) { + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { /* diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index a531afad87fdb..7878c33e188d7 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -348,7 +348,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) if (panic_on_oops) panic("Fatal exception"); if (signr) - do_exit(signr); + make_task_dead(signr); } /* diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c index b2e1963f473de..2ee2d2813d577 100644 --- a/arch/arm/mach-imx/cpu-imx25.c +++ b/arch/arm/mach-imx/cpu-imx25.c @@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void) np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim"); iim_base = of_iomap(np, 0); + of_node_put(np); BUG_ON(!iim_base); rev = readl(iim_base + MXC_IIMSREV); iounmap(iim_base); diff --git a/arch/arm/mach-imx/cpu-imx27.c b/arch/arm/mach-imx/cpu-imx27.c index bf70e13bbe9ee..1d28939083683 100644 --- a/arch/arm/mach-imx/cpu-imx27.c +++ b/arch/arm/mach-imx/cpu-imx27.c @@ -28,6 +28,7 @@ static int mx27_read_cpu_rev(void) np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm"); ccm_base = of_iomap(np, 0); + of_node_put(np); BUG_ON(!ccm_base); /* * now we have access to the IO registers. As we need diff --git a/arch/arm/mach-imx/cpu-imx31.c b/arch/arm/mach-imx/cpu-imx31.c index b9c24b851d1ab..35c544924e509 100644 --- a/arch/arm/mach-imx/cpu-imx31.c +++ b/arch/arm/mach-imx/cpu-imx31.c @@ -39,6 +39,7 @@ static int mx31_read_cpu_rev(void) np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim"); iim_base = of_iomap(np, 0); + of_node_put(np); BUG_ON(!iim_base); /* read SREV register from IIM module */ diff --git a/arch/arm/mach-imx/cpu-imx35.c b/arch/arm/mach-imx/cpu-imx35.c index 80e7d8ab9f1bb..1fe75b39c2d99 100644 --- a/arch/arm/mach-imx/cpu-imx35.c +++ b/arch/arm/mach-imx/cpu-imx35.c @@ -21,6 +21,7 @@ static int mx35_read_cpu_rev(void) np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim"); iim_base = of_iomap(np, 0); + of_node_put(np); BUG_ON(!iim_base); rev = imx_readl(iim_base + MXC_IIMSREV); diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c index ad56263778f93..a67c89bf155dd 100644 --- a/arch/arm/mach-imx/cpu-imx5.c +++ b/arch/arm/mach-imx/cpu-imx5.c @@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat) np = of_find_compatible_node(NULL, NULL, compat); iim_base = of_iomap(np, 0); + of_node_put(np); WARN_ON(!iim_base); srev = readl(iim_base + IIM_SREV) & 0xff; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 41b2e8abc9e69..708816caf859c 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -43,18 +43,21 @@ static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE; /* - * FIXME: the timer needs some delay to stablize the counter capture + * Read the timer through the CVWR register. Delay is required after requesting + * a read. The CR register cannot be directly read due to metastability issues + * documented in the PXA168 software manual. */ static inline uint32_t timer_read(void) { - int delay = 100; + uint32_t val; + int delay = 3; __raw_writel(1, mmp_timer_base + TMR_CVWR(1)); while (delay--) - cpu_relax(); + val = __raw_readl(mmp_timer_base + TMR_CVWR(1)); - return __raw_readl(mmp_timer_base + TMR_CVWR(1)); + return val; } static u64 notrace mmp_read_sched_clock(void) diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index c109f47e9cbca..a687e83ad6048 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -387,8 +387,10 @@ static void __init mxs_machine_init(void) root = of_find_node_by_path("/"); ret = of_property_read_string(root, "model", &soc_dev_attr->machine); - if (ret) + if (ret) { + kfree(soc_dev_attr); return; + } soc_dev_attr->family = "Freescale MXS Family"; soc_dev_attr->soc_id = mxs_get_soc_id(); diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index c18d23a5e5f12..9b9023a92d464 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -342,7 +342,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) addr = start + i * PMD_SIZE; domain = get_domain_name(pmd); if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) - note_page(st, addr, 3, pmd_val(*pmd), domain); + note_page(st, addr, 4, pmd_val(*pmd), domain); else walk_pte(st, pmd, addr, domain); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index efa4020250315..af5177801fb10 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -125,7 +125,7 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, show_pte(KERN_ALERT, mm, addr); die("Oops", regs, fsr); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } /* diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 86f213f1b44b8..0d0c3bf239143 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -300,7 +300,11 @@ static struct mem_type mem_types[] __ro_after_init = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN | L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, +#ifdef CONFIG_ARM_LPAE + .prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2, +#else .prot_sect = PMD_TYPE_SECT, +#endif .domain = DOMAIN_KERNEL, }, [MT_ROM] = { diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 8b3d7191e2b88..c4d5b3bacf644 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -26,6 +26,13 @@ unsigned long vectors_base; +/* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. + */ +struct page *empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); + #ifdef CONFIG_ARM_MPU struct mpu_rgn_info mpu_rgn_info; #endif @@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void) */ void __init paging_init(const struct machine_desc *mdesc) { + void *zero_page; + early_trap_init((void *)vectors_base); mpu_setup(); + + /* allocate the zero page. */ + zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + bootmem_init(); + + empty_zero_page = virt_to_page(zero_page); + flush_dcache_page(empty_zero_page); } /* diff --git a/arch/arm/nwfpe/Makefile b/arch/arm/nwfpe/Makefile index 303400fa2cdf7..2aec85ab1e8b9 100644 --- a/arch/arm/nwfpe/Makefile +++ b/arch/arm/nwfpe/Makefile @@ -11,3 +11,9 @@ nwfpe-y += fpa11.o fpa11_cpdo.o fpa11_cpdt.o \ entry.o nwfpe-$(CONFIG_FPE_NWFPE_XP) += extended_cpdo.o + +# Try really hard to avoid generating calls to __aeabi_uldivmod() from +# float64_rem() due to loop elision. +ifdef CONFIG_CC_IS_CLANG +CFLAGS_softfloat.o += -mllvm -replexitval=never +endif diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7c7906e9dafda..34bd4cba81e66 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -481,6 +481,22 @@ config ARM64_ERRATUM_834220 If unsure, say Y. +config ARM64_ERRATUM_1742098 + bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence" + depends on COMPAT + default y + help + This option removes the AES hwcap for aarch32 user-space to + workaround erratum 1742098 on Cortex-A57 and Cortex-A72. + + Affected parts may corrupt the AES state if an interrupt is + taken between a pair of AES instructions. These instructions + are only present if the cryptography extensions are present. + All software should have a fallback implementation for CPUs + that don't implement the cryptography extensions. + + If unsure, say Y. + config ARM64_ERRATUM_845719 bool "Cortex-A53: 845719: a load might read incorrect data" depends on COMPAT @@ -657,6 +673,24 @@ config ARM64_ERRATUM_1508412 If unsure, say Y. +config ARM64_ERRATUM_2457168 + bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly" + depends on ARM64_AMU_EXTN + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2457168. + + The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate + as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments + incorrectly giving a significantly higher output value. + + Work around this problem by keeping the reference values of affected counters + to 0 thus signaling an error case. This effect is the same to firmware disabling + affected counters, in which case 0 will be returned when reading the disabled + counters. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -1636,7 +1670,10 @@ config ARM64_BTI_KERNEL depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 depends on !CC_IS_GCC || GCC_VERSION >= 100100 - depends on !(CC_IS_CLANG && GCOV_KERNEL) + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671 + depends on !CC_IS_GCC + # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 + depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Build the kernel with Branch Target Identification annotations diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index fae48efae83e9..5c75fbf0d4709 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -1754,7 +1754,7 @@ sd_emmc_b: sd@5000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0x5000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_B>, <&clkc CLKID_SD_EMMC_B_CLK0>, @@ -1766,7 +1766,7 @@ sd_emmc_c: mmc@7000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0x7000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_C>, <&clkc CLKID_SD_EMMC_C_CLK0>, diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index 075153a4d49fc..2091db7c9b8af 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -2317,7 +2317,7 @@ sd_emmc_a: sd@ffe03000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0xffe03000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_A>, <&clkc CLKID_SD_EMMC_A_CLK0>, @@ -2329,7 +2329,7 @@ sd_emmc_b: sd@ffe05000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0xffe05000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_B>, <&clkc CLKID_SD_EMMC_B_CLK0>, @@ -2341,7 +2341,7 @@ sd_emmc_c: mmc@ffe07000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0xffe07000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_C>, <&clkc CLKID_SD_EMMC_C_CLK0>, diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 47cbb0a1eb183..88a7db5c55a07 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -595,21 +595,21 @@ sd_emmc_a: mmc@70000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; reg = <0x0 0x70000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; }; sd_emmc_b: mmc@72000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; reg = <0x0 0x72000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; }; sd_emmc_c: mmc@74000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; reg = <0x0 0x74000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; }; }; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 2c0161125ece4..cb45a2f0537a9 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -595,12 +595,26 @@ polling-delay = <1000>; polling-delay-passive = <100>; thermal-sensors = <&scpi_sensors0 0>; + trips { + pmic_crit0: trip0 { + temperature = <90000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; soc { polling-delay = <1000>; polling-delay-passive = <100>; thermal-sensors = <&scpi_sensors0 3>; + trips { + soc_crit0: trip0 { + temperature = <80000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; big_cluster_thermal_zone: big-cluster { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi index 5667009aae13a..674a0ab8a5396 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi @@ -70,7 +70,7 @@ &ecspi2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_espi2>; - cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; status = "okay"; eeprom@0 { @@ -187,7 +187,7 @@ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82 MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82 MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82 - MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41 + MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h index a003e6af33533..56271abfb7e09 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -601,7 +601,7 @@ #define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0 #define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0 #define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0 -#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0 +#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1 #define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0 #define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0 #define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index f4d7bb75707df..3490619a9ba96 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -939,10 +939,10 @@ clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>; }; - gpmi: nand-controller@33002000{ + gpmi: nand-controller@33002000 { compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand"; #address-cells = <1>; - #size-cells = <1>; + #size-cells = <0>; reg = <0x33002000 0x2000>, <0x33004000 0x4000>; reg-names = "gpmi-nand", "bch"; interrupts = ; diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index aea723eb2ba3f..7dba83041264c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -809,7 +809,7 @@ gpmi: nand-controller@33002000 { compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand"; #address-cells = <1>; - #size-cells = <1>; + #size-cells = <0>; reg = <0x33002000 0x2000>, <0x33004000 0x4000>; reg-names = "gpmi-nand", "bch"; interrupts = ; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi index e3c6d1272198f..325ea100969a8 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi @@ -899,6 +899,7 @@ interrupts = <20 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_gauge>; + power-supplies = <&bq25895>; maxim,over-heat-temp = <700>; maxim,over-volt = <4500>; maxim,rsns-microohm = <5000>; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts index 5d5aa6537225f..6e6182709d220 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts @@ -339,7 +339,7 @@ bus-width = <4>; non-removable; no-sd; - no-emmc; + no-mmc; status = "okay"; brcmf: wifi@1 { @@ -359,7 +359,7 @@ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; bus-width = <4>; no-sdio; - no-emmc; + no-mmc; disable-wp; status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index 00e5dbf4b8236..eea8d23683dc1 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -124,9 +124,12 @@ /delete-property/ mrvl,i2c-fast-mode; status = "okay"; + /* MCP7940MT-I/MNY RTC */ rtc@6f { compatible = "microchip,mcp7940x"; reg = <0x6f>; + interrupt-parent = <&gpiosb>; + interrupts = <5 0>; /* GPIO2_5 */ }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts index 7d369fdd3117f..9d20cabf4f699 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts @@ -26,14 +26,14 @@ stdout-path = "serial0:921600n8"; }; - cpus_fixed_vproc0: fixedregulator@0 { + cpus_fixed_vproc0: regulator-vproc-buck0 { compatible = "regulator-fixed"; regulator-name = "vproc_buck0"; regulator-min-microvolt = <1000000>; regulator-max-microvolt = <1000000>; }; - cpus_fixed_vproc1: fixedregulator@1 { + cpus_fixed_vproc1: regulator-vproc-buck1 { compatible = "regulator-fixed"; regulator-name = "vproc_buck1"; regulator-min-microvolt = <1000000>; @@ -50,7 +50,7 @@ id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>; }; - usb_p0_vbus: regulator@2 { + usb_p0_vbus: regulator-usb-p0-vbus { compatible = "regulator-fixed"; regulator-name = "p0_vbus"; regulator-min-microvolt = <5000000>; @@ -59,7 +59,7 @@ enable-active-high; }; - usb_p1_vbus: regulator@3 { + usb_p1_vbus: regulator-usb-p1-vbus { compatible = "regulator-fixed"; regulator-name = "p1_vbus"; regulator-min-microvolt = <5000000>; @@ -68,7 +68,7 @@ enable-active-high; }; - usb_p2_vbus: regulator@4 { + usb_p2_vbus: regulator-usb-p2-vbus { compatible = "regulator-fixed"; regulator-name = "p2_vbus"; regulator-min-microvolt = <5000000>; @@ -77,7 +77,7 @@ enable-active-high; }; - usb_p3_vbus: regulator@5 { + usb_p3_vbus: regulator-usb-p3-vbus { compatible = "regulator-fixed"; regulator-name = "p3_vbus"; regulator-min-microvolt = <5000000>; diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi index db17d0a4ed57b..cc3d1c99517d1 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi @@ -160,70 +160,70 @@ #clock-cells = <0>; }; - clk26m: oscillator@0 { + clk26m: oscillator-26m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; clock-output-names = "clk26m"; }; - clk32k: oscillator@1 { + clk32k: oscillator-32k { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32768>; clock-output-names = "clk32k"; }; - clkfpc: oscillator@2 { + clkfpc: oscillator-50m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; clock-output-names = "clkfpc"; }; - clkaud_ext_i_0: oscillator@3 { + clkaud_ext_i_0: oscillator-aud0 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <6500000>; clock-output-names = "clkaud_ext_i_0"; }; - clkaud_ext_i_1: oscillator@4 { + clkaud_ext_i_1: oscillator-aud1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <196608000>; clock-output-names = "clkaud_ext_i_1"; }; - clkaud_ext_i_2: oscillator@5 { + clkaud_ext_i_2: oscillator-aud2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <180633600>; clock-output-names = "clkaud_ext_i_2"; }; - clki2si0_mck_i: oscillator@6 { + clki2si0_mck_i: oscillator-i2s0 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si0_mck_i"; }; - clki2si1_mck_i: oscillator@7 { + clki2si1_mck_i: oscillator-i2s1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si1_mck_i"; }; - clki2si2_mck_i: oscillator@8 { + clki2si2_mck_i: oscillator-i2s2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si2_mck_i"; }; - clktdmin_mclk_i: oscillator@9 { + clktdmin_mclk_i: oscillator-mclk { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; @@ -266,7 +266,7 @@ reg = <0 0x10005000 0 0x1000>; }; - pio: pinctrl@10005000 { + pio: pinctrl@1000b000 { compatible = "mediatek,mt2712-pinctrl"; reg = <0 0x1000b000 0 0x1000>; mediatek,pctl-regmap = <&syscfg_pctl_a>; diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi index 15616231022a2..c3677d77e0a45 100644 --- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi @@ -95,7 +95,7 @@ }; }; - clk26m: oscillator@0 { + clk26m: oscillator-26m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi index 99c2d6fd6304a..d5059735c5940 100644 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi @@ -17,7 +17,7 @@ }; firmware { - optee: optee@4fd00000 { + optee: optee { compatible = "linaro,optee-tz"; method = "smc"; }; @@ -209,7 +209,7 @@ }; }; - i2c0_pins_a: i2c0@0 { + i2c0_pins_a: i2c0 { pins1 { pinmux = , ; @@ -217,7 +217,7 @@ }; }; - i2c2_pins_a: i2c2@0 { + i2c2_pins_a: i2c2 { pins1 { pinmux = , ; diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts index e8eaa958c1992..b867506bc7e1c 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts +++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts @@ -37,6 +37,8 @@ &spi_0 { cs-select = <0>; + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; status = "okay"; m25p80@0 { diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 291276a38d7cd..c32e4a3833f23 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1249,7 +1249,7 @@ }; mpss: remoteproc@4080000 { - compatible = "qcom,msm8916-mss-pil", "qcom,q6v5-pil"; + compatible = "qcom,msm8916-mss-pil"; reg = <0x04080000 0x100>, <0x04020000 0x040>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index ef5d03a150693..bc140269e4cc5 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -651,17 +651,17 @@ compatible ="operating-points-v2"; /* - * 624Mhz and 560Mhz are only available on speed - * bin (1 << 0). All the rest are available on - * all bins of the hardware + * 624Mhz is only available on speed bins 0 and 3. + * 560Mhz is only available on speed bins 0, 2 and 3. + * All the rest are available on all bins of the hardware. */ opp-624000000 { opp-hz = /bits/ 64 <624000000>; - opp-supported-hw = <0x01>; + opp-supported-hw = <0x09>; }; opp-560000000 { opp-hz = /bits/ 64 <560000000>; - opp-supported-hw = <0x01>; + opp-supported-hw = <0x0d>; }; opp-510000000 { opp-hz = /bits/ 64 <510000000>; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lte-sku.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lte-sku.dtsi index 44956e3165a16..469aad4e5948c 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lte-sku.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lte-sku.dtsi @@ -9,6 +9,10 @@ label = "proximity-wifi-lte"; }; +&mpss_mem { + reg = <0x0 0x86000000 0x0 0x8c00000>; +}; + &remoteproc_mpss { firmware-name = "qcom/sc7180-trogdor/modem/mba.mbn", "qcom/sc7180-trogdor/modem/qdsp6sw.mbn"; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi index 5b2a616c6257b..cb2c47f13a8a4 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi @@ -39,7 +39,7 @@ }; mpss_mem: memory@86000000 { - reg = <0x0 0x86000000 0x0 0x8c00000>; + reg = <0x0 0x86000000 0x0 0x2000000>; no-map; }; diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi index f87054575ce7f..79d260c2b3c32 100644 --- a/arch/arm64/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi @@ -593,7 +593,7 @@ pins = "gpio17", "gpio18", "gpio19"; function = "gpio"; drive-strength = <2>; - bias-no-pull; + bias-disable; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi index 64fc1bfd66fad..26f6f193bd1b6 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi @@ -1292,7 +1292,7 @@ ap_ts_i2c: &i2c14 { config { pins = "gpio126"; function = "gpio"; - bias-no-pull; + bias-disable; drive-strength = <2>; output-low; }; @@ -1302,7 +1302,7 @@ ap_ts_i2c: &i2c14 { config { pins = "gpio126"; function = "gpio"; - bias-no-pull; + bias-disable; drive-strength = <2>; output-high; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index 96d36b38f2696..c6691bdc81002 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -1045,7 +1045,10 @@ /* PINCTRL - additions to nodes defined in sdm845.dtsi */ &qup_spi2_default { - drive-strength = <16>; + pinconf { + pins = "gpio27", "gpio28", "gpio29", "gpio30"; + drive-strength = <16>; + }; }; &qup_uart3_default{ diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index e080c317b5e3d..4d67f49827388 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -322,8 +322,10 @@ }; &qup_i2c12_default { - drive-strength = <2>; - bias-disable; + pinmux { + drive-strength = <2>; + bias-disable; + }; }; &qup_uart6_default { diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts index fbcb9531cc70d..213c0759c4b85 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts @@ -13,7 +13,7 @@ stdout-path = "serial2:1500000n8"; }; - ir_rx { + ir-receiver { compatible = "gpio-ir-receiver"; gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index e6c1c94c8d69c..07737b65d7a3d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts @@ -87,3 +87,8 @@ }; }; }; + +&wlan_host_wake_l { + /* Kevin has an external pull up, but Bob does not. */ + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index 1384dabbdf406..739937f70f8d0 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -237,6 +237,14 @@ &edp { status = "okay"; + /* + * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only + * set this here, because rk3399-gru.dtsi ensures we can generate this + * off GPLL=600MHz, whereas some other RK3399 boards may not. + */ + assigned-clocks = <&cru PCLK_EDP>; + assigned-clock-rates = <24000000>; + ports { edp_out: port@1 { reg = <1>; @@ -395,6 +403,7 @@ ap_i2c_tp: &i2c5 { }; wlan_host_wake_l: wlan-host-wake-l { + /* Kevin has an external pull up, but Bob does not */ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts index a8d363568fd62..3fc761c8d550a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts @@ -203,7 +203,7 @@ cap-sd-highspeed; cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; disable-wp; - max-frequency = <150000000>; + max-frequency = <40000000>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; vmmc-supply = <&vcc3v3_baseboard>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 544110aaffc56..95bc7a5f61dd5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -102,7 +102,6 @@ vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; - enable-active-low; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; regulator-name = "vcc5v0_host"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index f121203081b97..64df643391194 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -448,7 +448,6 @@ &i2s1 { rockchip,playback-channels = <2>; rockchip,capture-channels = <2>; - status = "okay"; }; &i2s2 { diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index d04189771c773..4265f627ca167 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -127,7 +127,6 @@ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>; dma-names = "tx", "rx1", "rx2"; - dma-coherent; rng: rng@4e10000 { compatible = "inside-secure,safexcel-eip76"; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 0350ddfe2c723..691d73f0f1e07 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -367,7 +367,6 @@ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>; dma-names = "tx", "rx1", "rx2"; - dma-coherent; rng: rng@4e10000 { compatible = "inside-secure,safexcel-eip76"; diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 13869b76b58cd..abd302e521c06 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -12,19 +12,6 @@ #include -#ifdef CONFIG_ARM64_LSE_ATOMICS -#define __LL_SC_FALLBACK(asm_ops) \ -" b 3f\n" \ -" .subsection 1\n" \ -"3:\n" \ -asm_ops "\n" \ -" b 4f\n" \ -" .previous\n" \ -"4:\n" -#else -#define __LL_SC_FALLBACK(asm_ops) asm_ops -#endif - #ifndef CONFIG_CC_HAS_K_CONSTRAINT #define K #endif @@ -43,12 +30,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ int result; \ \ asm volatile("// atomic_" #op "\n" \ - __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" stxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b\n") \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %w0, %2\n" \ + " " #asm_op " %w0, %w0, %w3\n" \ + " stxr %w1, %w0, %2\n" \ + " cbnz %w1, 1b\n" \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -61,13 +47,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ int result; \ \ asm volatile("// atomic_" #op "_return" #name "\n" \ - __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ld" #acq "xr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" st" #rel "xr %w1, %w0, %2\n" \ -" cbnz %w1, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr %w0, %2\n" \ + " " #asm_op " %w0, %w0, %w3\n" \ + " st" #rel "xr %w1, %w0, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -83,13 +68,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ int val, result; \ \ asm volatile("// atomic_fetch_" #op #name "\n" \ - __LL_SC_FALLBACK( \ -" prfm pstl1strm, %3\n" \ -"1: ld" #acq "xr %w0, %3\n" \ -" " #asm_op " %w1, %w0, %w4\n" \ -" st" #rel "xr %w2, %w1, %3\n" \ -" cbnz %w2, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %3\n" \ + "1: ld" #acq "xr %w0, %3\n" \ + " " #asm_op " %w1, %w0, %w4\n" \ + " st" #rel "xr %w2, %w1, %3\n" \ + " cbnz %w2, 1b\n" \ + " " #mb \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -142,12 +126,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "\n" \ - __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" stxr %w1, %0, %2\n" \ -" cbnz %w1, 1b") \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %0, %2\n" \ + " " #asm_op " %0, %0, %3\n" \ + " stxr %w1, %0, %2\n" \ + " cbnz %w1, 1b" \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -160,13 +143,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "_return" #name "\n" \ - __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ld" #acq "xr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" st" #rel "xr %w1, %0, %2\n" \ -" cbnz %w1, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr %0, %2\n" \ + " " #asm_op " %0, %0, %3\n" \ + " st" #rel "xr %w1, %0, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -176,19 +158,18 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ static inline long \ -__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ +__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ s64 result, val; \ unsigned long tmp; \ \ asm volatile("// atomic64_fetch_" #op #name "\n" \ - __LL_SC_FALLBACK( \ -" prfm pstl1strm, %3\n" \ -"1: ld" #acq "xr %0, %3\n" \ -" " #asm_op " %1, %0, %4\n" \ -" st" #rel "xr %w2, %1, %3\n" \ -" cbnz %w2, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %3\n" \ + "1: ld" #acq "xr %0, %3\n" \ + " " #asm_op " %1, %0, %4\n" \ + " st" #rel "xr %w2, %1, %3\n" \ + " cbnz %w2, 1b\n" \ + " " #mb \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -240,15 +221,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" - __LL_SC_FALLBACK( -" prfm pstl1strm, %2\n" -"1: ldxr %0, %2\n" -" subs %0, %0, #1\n" -" b.lt 2f\n" -" stlxr %w1, %0, %2\n" -" cbnz %w1, 1b\n" -" dmb ish\n" -"2:") + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " subs %0, %0, #1\n" + " b.lt 2f\n" + " stlxr %w1, %0, %2\n" + " cbnz %w1, 1b\n" + " dmb ish\n" + "2:" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : : "cc", "memory"); @@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ old = (u##sz)old; \ \ asm volatile( \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %[v]\n" \ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ @@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ " cbnz %w[tmp], 1b\n" \ " " #mb "\n" \ - "2:") \ + "2:" \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ [v] "+Q" (*(u##sz *)ptr) \ : [old] __stringify(constraint) "r" (old), [new] "r" (new) \ @@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ unsigned long tmp, ret; \ \ asm volatile("// __cmpxchg_double" #name "\n" \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxp %0, %1, %2\n" \ " eor %0, %0, %3\n" \ @@ -336,8 +314,8 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ " st" #rel "xp %w0, %5, %6, %2\n" \ " cbnz %w0, 1b\n" \ " " #mb "\n" \ - "2:") \ - : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ + "2:" \ + : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ : cl); \ \ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index da3280f639cd7..28e96118c1e5a 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -11,11 +11,11 @@ #define __ASM_ATOMIC_LSE_H #define ATOMIC_OP(op, asm_op) \ -static inline void __lse_atomic_##op(int i, atomic_t *v) \ +static inline void __lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op " %w[i], %[v]\n" \ + " " #asm_op " %w[i], %[v]\n" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v)); \ } @@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op #mb " %w[i], %w[i], %[v]" \ + " " #asm_op #mb " %w[i], %w[i], %[v]" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v) \ : cl); \ @@ -130,7 +130,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ " add %w[i], %w[i], %w[tmp]" \ : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ : "r" (v) \ - : cl); \ + : cl); \ \ return i; \ } @@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op " %[i], %[v]\n" \ + " " #asm_op " %[i], %[v]\n" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v)); \ } @@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ { \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op #mb " %[i], %[i], %[v]" \ + " " #asm_op #mb " %[i], %[i], %[v]" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v) \ : cl); \ @@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) } #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ -static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\ { \ unsigned long tmp; \ \ @@ -403,7 +403,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \ " eor %[old2], %[old2], %[oldval2]\n" \ " orr %[old1], %[old1], %[old2]" \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ - [v] "+Q" (*(unsigned long *)ptr) \ + [v] "+Q" (*(__uint128_t *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ : cl); \ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index f42fd0a2e81c8..d2080a41f6e6f 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -67,7 +67,9 @@ #define ARM64_MTE 57 #define ARM64_WORKAROUND_1508412 58 #define ARM64_SPECTRE_BHB 59 +#define ARM64_WORKAROUND_2457168 60 +#define ARM64_WORKAROUND_1742098 61 -#define ARM64_NCAPS 60 +#define ARM64_NCAPS 62 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 423f9b40e4d95..31ba0ac7db630 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -648,7 +648,8 @@ static inline bool system_supports_4kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN4_SHIFT); - return val == ID_AA64MMFR0_TGRAN4_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX); } static inline bool system_supports_64kb_granule(void) @@ -660,7 +661,8 @@ static inline bool system_supports_64kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN64_SHIFT); - return val == ID_AA64MMFR0_TGRAN64_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX); } static inline bool system_supports_16kb_granule(void) @@ -672,7 +674,8 @@ static inline bool system_supports_16kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN16_SHIFT); - return val == ID_AA64MMFR0_TGRAN16_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX); } static inline bool system_supports_mixed_endian_el0(void) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 39f5c1672f480..9cf5d9551e991 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -41,7 +41,7 @@ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) #define MIDR_CPU_MODEL(imp, partnum) \ - (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ + ((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \ (0xf << MIDR_ARCHITECTURE_SHIFT) | \ ((partnum) << MIDR_PARTNUM_SHIFT)) @@ -60,6 +60,7 @@ #define ARM_CPU_IMP_FUJITSU 0x46 #define ARM_CPU_IMP_HISI 0x48 #define ARM_CPU_IMP_APPLE 0x61 +#define ARM_CPU_IMP_AMPERE 0xC0 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -112,6 +113,8 @@ #define APPLE_CPU_PART_M1_ICESTORM 0x022 #define APPLE_CPU_PART_M1_FIRESTORM 0x023 +#define AMPERE_CPU_PART_AMPERE1 0xAC3 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -151,6 +154,7 @@ #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) +#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 973b144152711..16892f0d05ad6 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -25,6 +25,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ({ \ efi_virtmap_load(); \ __efi_fpsimd_begin(); \ + spin_lock(&efi_rt_lock); \ }) #define arch_efi_call_virt(p, f, args...) \ @@ -36,10 +37,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); #define arch_efi_call_virt_teardown() \ ({ \ + spin_unlock(&efi_rt_lock); \ __efi_fpsimd_end(); \ efi_virtmap_unload(); \ }) +extern spinlock_t efi_rt_lock; efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 472122d731b0d..4bca3d10ab0ef 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -382,8 +382,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) { - if (kvm_vcpu_abt_iss1tw(vcpu)) - return true; + if (kvm_vcpu_abt_iss1tw(vcpu)) { + /* + * Only a permission fault on a S1PTW should be + * considered as a write. Otherwise, page tables baked + * in a read-only memslot will result in an exception + * being delivered in the guest. + * + * The drawback is that we end-up faulting twice if the + * guest is using any of HW AF/DB: a translation fault + * to map the page containing the PT (read only at + * first), then a permission fault to allow the flags + * to be set. + */ + switch (kvm_vcpu_trap_get_fault_type(vcpu)) { + case ESR_ELx_FSC_PERM: + return true; + default: + return false; + } + } if (kvm_vcpu_trap_is_iabt(vcpu)) return false; diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 7c546c3487c9f..c628d8e3a403a 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -230,13 +230,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, } #endif -static inline bool is_ttbr0_addr(unsigned long addr) +static __always_inline bool is_ttbr0_addr(unsigned long addr) { /* entry assembly clears tags for TTBR0 addrs */ return addr < TASK_SIZE; } -static inline bool is_ttbr1_addr(unsigned long addr) +static __always_inline bool is_ttbr1_addr(unsigned long addr) { /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index b383b4802a7bd..d30217c21eff7 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -8,7 +8,7 @@ #ifndef __ASM_SYSCALL_WRAPPER_H #define __ASM_SYSCALL_WRAPPER_H -struct pt_regs; +#include #define SC_ARM64_REGS_TO_ARGS(x, ...) \ __MAP(x,__SC_ARGS \ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1f2209ad2cca1..06755fad38304 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -786,15 +786,24 @@ #define ID_AA64MMFR0_ASID_SHIFT 4 #define ID_AA64MMFR0_PARANGE_SHIFT 0 -#define ID_AA64MMFR0_TGRAN4_NI 0xf -#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0 -#define ID_AA64MMFR0_TGRAN64_NI 0xf -#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0 -#define ID_AA64MMFR0_TGRAN16_NI 0x0 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 +#define ID_AA64MMFR0_TGRAN4_NI 0xf +#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN64_NI 0xf +#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN16_NI 0x0 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf + #define ID_AA64MMFR0_PARANGE_48 0x5 #define ID_AA64MMFR0_PARANGE_52 0x6 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 + #ifdef CONFIG_ARM64_PA_BITS_52 #define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 #else @@ -955,14 +964,17 @@ #define ID_PFR1_PROGMOD_SHIFT 0 #if defined(CONFIG_ARM64_4K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX #elif defined(CONFIG_ARM64_16K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX #elif defined(CONFIG_ARM64_64K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX #endif #define MVFR2_FPMISC_SHIFT 4 diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 1fbab854a51b0..cdcf307764aad 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -68,6 +68,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ #define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -98,10 +99,12 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ - _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT) + _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT | \ + _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 587543c6c51cb..97c42be71338a 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, int init_cache_level(unsigned int cpu) { - unsigned int ctype, level, leaves, fw_level; + unsigned int ctype, level, leaves; + int fw_level; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { @@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu) else fw_level = acpi_find_last_cache_level(cpu); + if (fw_level < 0) + return fw_level; + if (level < fw_level) { /* * some external caches not specified in CLIDR_EL1 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 78263dadd00da..5d6f19bc628c2 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -356,6 +356,14 @@ static const struct midr_range erratum_1463225[] = { }; #endif +#ifdef CONFIG_ARM64_ERRATUM_1742098 +static struct midr_range broken_aarch32_aes[] = { + MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + {}, +}; +#endif + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -545,6 +553,23 @@ const struct arm64_cpu_capabilities arm64_errata[] = { 0, 0, 1, 0), }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2457168 + { + .desc = "ARM erratum 2457168", + .capability = ARM64_WORKAROUND_2457168, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + /* Cortex-A510 r0p0-r1p1 */ + CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1742098 + { + .desc = "ARM erratum 1742098", + .capability = ARM64_WORKAROUND_1742098, + CAP_MIDR_RANGE_LIST(broken_aarch32_aes), + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + }, #endif { } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4087e2d1f39e2..f3767c1445933 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -76,6 +76,7 @@ #include #include #include +#include #include #include #include @@ -1559,7 +1560,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", smp_processor_id()); cpumask_set_cpu(smp_processor_id(), &amu_cpus); - init_cpu_freq_invariance_counters(); + + /* 0 reference values signal broken/disabled counters */ + if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) + init_cpu_freq_invariance_counters(); } } @@ -1727,6 +1731,14 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_MTE */ +static void elf_hwcap_fixup(void) +{ +#ifdef CONFIG_ARM64_ERRATUM_1742098 + if (cpus_have_const_cap(ARM64_WORKAROUND_1742098)) + compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES; +#endif /* ARM64_ERRATUM_1742098 */ +} + /* Internal helper functions to match cpu capability type */ static bool cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) @@ -2732,8 +2744,10 @@ void __init setup_cpu_features(void) setup_system_capabilities(); setup_elf_hwcaps(arm64_elf_hwcaps); - if (system_supports_32bit_el0()) + if (system_supports_32bit_el0()) { setup_elf_hwcaps(compat_elf_hwcaps); + elf_hwcap_fixup(); + } if (system_uses_ttbr0_pan()) pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S index 75691a2641c1c..2d3c4b02393e4 100644 --- a/arch/arm64/kernel/efi-rt-wrapper.S +++ b/arch/arm64/kernel/efi-rt-wrapper.S @@ -4,6 +4,7 @@ */ #include +#include SYM_FUNC_START(__efi_rt_asm_wrapper) stp x29, x30, [sp, #-32]! @@ -16,6 +17,12 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) */ stp x1, x18, [sp, #16] + ldr_l x16, efi_rt_stack_top + mov sp, x16 +#ifdef CONFIG_SHADOW_CALL_STACK + str x18, [sp, #-16]! +#endif + /* * We are lucky enough that no EFI runtime services take more than * 5 arguments, so all are passed in registers rather than via the @@ -29,6 +36,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) mov x4, x6 blr x8 + mov sp, x29 ldp x1, x2, [sp, #16] cmp x2, x18 ldp x29, x30, [sp], #32 @@ -42,6 +50,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) * called with preemption disabled and a separate shadow stack is used * for interrupts. */ - mov x18, x2 +#ifdef CONFIG_SHADOW_CALL_STACK + ldr_l x18, efi_rt_stack_top + ldr x18, [x18, #-16] +#endif + b efi_handle_corrupted_x18 // tail call SYM_FUNC_END(__efi_rt_asm_wrapper) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index fa02efb28e88e..72f432d23ec5c 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -12,6 +12,14 @@ #include +static bool region_is_misaligned(const efi_memory_desc_t *md) +{ + if (PAGE_SIZE == EFI_PAGE_SIZE) + return false; + return !PAGE_ALIGNED(md->phys_addr) || + !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT); +} + /* * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be * executable, everything else can be mapped with the XN bits @@ -25,14 +33,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) if (type == EFI_MEMORY_MAPPED_IO) return PROT_DEVICE_nGnRE; - if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr), - "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?")) + if (region_is_misaligned(md)) { + static bool __initdata code_is_misaligned; + /* - * If the region is not aligned to the page size of the OS, we - * can not use strict permissions, since that would also affect - * the mapping attributes of the adjacent regions. + * Regions that are not aligned to the OS page size cannot be + * mapped with strict permissions, as those might interfere + * with the permissions that are needed by the adjacent + * region's mapping. However, if we haven't encountered any + * misaligned runtime code regions so far, we can safely use + * non-executable permissions for non-code regions. */ - return pgprot_val(PAGE_KERNEL_EXEC); + code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE); + + return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC) + : pgprot_val(PAGE_KERNEL); + } /* R-- */ if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == @@ -62,19 +78,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || md->type == EFI_RUNTIME_SERVICES_DATA); - if (!PAGE_ALIGNED(md->phys_addr) || - !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) { - /* - * If the end address of this region is not aligned to page - * size, the mapping is rounded up, and may end up sharing a - * page frame with the next UEFI memory region. If we create - * a block entry now, we may need to split it again when mapping - * the next region, and support for that is going to be removed - * from the MMU routines. So avoid block mappings altogether in - * that case. - */ + /* + * If this region is not aligned to the page size used by the OS, the + * mapping will be rounded outwards, and may end up sharing a page + * frame with an adjacent runtime memory region. Given that the page + * table descriptor covering the shared page will be rewritten when the + * adjacent region gets mapped, we must avoid block mappings here so we + * don't have to worry about splitting them when that happens. + */ + if (region_is_misaligned(md)) page_mappings_only = true; - } create_pgd_mapping(mm, md->phys_addr, md->virt_addr, md->num_pages << EFI_PAGE_SHIFT, @@ -101,6 +114,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm, BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && md->type != EFI_RUNTIME_SERVICES_DATA); + if (region_is_misaligned(md)) + return 0; + /* * Calling apply_to_page_range() is only safe on regions that are * guaranteed to be mapped down to pages. Since we are only called @@ -127,3 +143,30 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f); return s; } + +DEFINE_SPINLOCK(efi_rt_lock); + +asmlinkage u64 *efi_rt_stack_top __ro_after_init; + +/* EFI requires 8 KiB of stack space for runtime services */ +static_assert(THREAD_SIZE >= SZ_8K); + +static int __init arm64_efi_rt_init(void) +{ + void *p; + + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + + p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, + NUMA_NO_NODE, &&l); +l: if (!p) { + pr_warn("Failed to allocate EFI runtime stack\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + return -ENOMEM; + } + + efi_rt_stack_top = p + THREAD_SIZE; + return 0; +} +core_initcall(arm64_efi_rt_init); diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 3724bab278b28..402a24f845b9e 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -216,11 +216,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long pc = rec->ip; u32 old = 0, new; + new = aarch64_insn_gen_nop(); + + /* + * When using mcount, callsites in modules may have been initalized to + * call an arbitrary module PLT (which redirects to the _mcount stub) + * rather than the ftrace PLT we'll use at runtime (which redirects to + * the ftrace trampoline). We can ignore the old PLT when initializing + * the callsite. + * + * Note: 'mod' is only set at module load time. + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && + IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) { + return aarch64_insn_patch_text_nosync((void *)pc, new); + } + if (!ftrace_find_callable_addr(rec, mod, &addr)) return -EINVAL; old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); - new = aarch64_insn_gen_nop(); return ftrace_modify_code(pc, old, new, true); } diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index f9119eea735e2..e1c25fa3b8e6c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -797,8 +797,10 @@ SYM_FUNC_END(__secondary_too_slow) SYM_FUNC_START(__enable_mmu) mrs x2, ID_AA64MMFR0_EL1 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 - cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED - b.ne __no_granule_support + cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN + b.lt __no_granule_support + cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX + b.gt __no_granule_support update_early_cpu_boot_status 0, x2, x3 adrp x2, idmap_pg_dir phys_to_ttbr x1, x1 diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 22275d8518eb3..3696dbcbfa80c 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ptrauth_thread_init_kernel(p); - if (likely(!(p->flags & PF_KTHREAD))) { + if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 6ae53d8cd576f..faa8a6bf2376e 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -876,6 +876,10 @@ u8 spectre_bhb_loop_affected(int scope) MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), {}, }; + static const struct midr_range spectre_bhb_k11_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + {}, + }; static const struct midr_range spectre_bhb_k8_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), @@ -886,6 +890,8 @@ u8 spectre_bhb_loop_affected(int scope) k = 32; else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) k = 24; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) + k = 11; else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) k = 8; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0dab5679a97d5..b6fbbd527dd79 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -938,7 +938,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, (void __user *)NULL, current); } - if (thread_flags & _TIF_SIGPENDING) + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 543c67cae02ff..f35af19b70555 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -22,46 +22,6 @@ #include #include -void store_cpu_topology(unsigned int cpuid) -{ - struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; - u64 mpidr; - - if (cpuid_topo->package_id != -1) - goto topology_populated; - - mpidr = read_cpuid_mpidr(); - - /* Uniprocessor systems can rely on default topology values */ - if (mpidr & MPIDR_UP_BITMASK) - return; - - /* - * This would be the place to create cpu topology based on MPIDR. - * - * However, it cannot be trusted to depict the actual topology; some - * pieces of the architecture enforce an artificial cap on Aff0 values - * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an - * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up - * having absolutely no relationship to the actual underlying system - * topology, and cannot be reasonably used as core / package ID. - * - * If the MT bit is set, Aff0 *could* be used to define a thread ID, but - * we still wouldn't be able to obtain a sane core ID. This means we - * need to entirely ignore MPIDR for any topology deduction. - */ - cpuid_topo->thread_id = -1; - cpuid_topo->core_id = cpuid; - cpuid_topo->package_id = cpu_to_node(cpuid); - - pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", - cpuid, cpuid_topo->package_id, cpuid_topo->core_id, - cpuid_topo->thread_id, mpidr); - -topology_populated: - update_siblings_masks(cpuid); -} - #ifdef CONFIG_ACPI static bool __init acpi_cpu_is_threaded(int cpu) { @@ -158,7 +118,7 @@ static int validate_cpu_freq_invariance_counters(int cpu) } /* Convert maximum frequency from KHz to Hz and validate */ - max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000; + max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000ULL; if (unlikely(!max_freq_hz)) { pr_debug("CPU%d: invalid maximum frequency.\n", cpu); return -EINVAL; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 2059d8f43f55f..2cdd53425509d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -144,7 +144,7 @@ void die(const char *str, struct pt_regs *regs, int err) raw_spin_unlock_irqrestore(&die_lock, flags); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } static void arm64_show_signal(int signo, const char *str) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 204c62debf06e..6f85c1821c3fb 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -397,16 +397,18 @@ int kvm_set_ipa_limit(void) } switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) { - default: - case 1: + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); return -EINVAL; - case 0: + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); break; - case 2: + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); break; + default: + kvm_err("Unsupported value for TGRAN_2, giving up\n"); + return -EINVAL; } kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange); diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index b9518f94bd435..23710bf5a86b6 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2096,7 +2096,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, memset(entry, 0, esz); - while (len > 0) { + while (true) { int next_offset; size_t byte_offset; @@ -2109,6 +2109,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, return next_offset; byte_offset = next_offset * esz; + if (byte_offset >= len) + break; + id += next_offset; gpa += byte_offset; len -= byte_offset; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 795d224f184ff..2be856731e817 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -293,7 +293,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, show_pte(addr); die("Oops", regs, esr); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } static void __do_kernel_fault(unsigned long addr, unsigned int esr, diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h index f70382844b965..dd8913d57189d 100644 --- a/arch/c6x/include/asm/thread_info.h +++ b/arch/c6x/include/asm/thread_info.h @@ -82,6 +82,7 @@ struct thread_info *current_thread_info(void) #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_MEMDIE 17 /* OOM killer killed process */ diff --git a/arch/c6x/kernel/asm-offsets.c b/arch/c6x/kernel/asm-offsets.c index 0f8fde494875e..4a264ef87dcb8 100644 --- a/arch/c6x/kernel/asm-offsets.c +++ b/arch/c6x/kernel/asm-offsets.c @@ -116,6 +116,7 @@ void foo(void) DEFINE(_TIF_NOTIFY_RESUME, (1< #include +#include #include #include @@ -313,7 +314,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags, int syscall) { /* deal with pending signal delivery */ - if (thread_info_flags & (1 << TIF_SIGPENDING)) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, syscall); if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c index cb2a0d94a144d..2df115d0e2105 100644 --- a/arch/csky/abiv1/alignment.c +++ b/arch/csky/abiv1/alignment.c @@ -294,7 +294,7 @@ void csky_alignment(struct pt_regs *regs) __func__, opcode, rz, rx, imm, addr); show_regs(regs); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h index 68e7a1227170c..21456a3737c2a 100644 --- a/arch/csky/include/asm/thread_info.h +++ b/arch/csky/include/asm/thread_info.h @@ -64,6 +64,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ #define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_AUDIT 6 /* syscall auditing */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ @@ -75,6 +76,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_MEMDIE (1 << TIF_MEMDIE) @@ -82,7 +84,8 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE) + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c index 69af6bc87e647..3d0ca22cd0e2e 100644 --- a/arch/csky/kernel/process.c +++ b/arch/csky/kernel/process.c @@ -49,7 +49,7 @@ int copy_thread(unsigned long clone_flags, /* setup thread.sp for switch_to !!! */ p->thread.sp = (unsigned long)childstack; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); childstack->r15 = (unsigned long) ret_from_kernel_thread; childstack->r10 = kthread_arg; diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 243228b0aa075..f7c1677e59719 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -261,7 +261,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, uprobe_notify_resume(regs); /* Handle pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c index 22721468a04b2..15711efa14a4e 100644 --- a/arch/csky/kernel/traps.c +++ b/arch/csky/kernel/traps.c @@ -111,7 +111,7 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index 0cdaa302d3d28..a518214d4ddd8 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h @@ -73,6 +73,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_NOTIFY_SIGNAL 10 /* signal notifications exist */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) @@ -83,6 +84,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) /* work to do in syscall trace */ #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ @@ -92,7 +94,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ - _TIF_SYSCALL_TRACEPOINT) + _TIF_SYSCALL_TRACEPOINT | _TIF_NOTIFY_SIGNAL) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index bc1364db58feb..46b1342ce515b 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); childregs->retpc = (unsigned long) ret_from_kernel_thread; childregs->er4 = topstk; /* arg */ diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index 75d9b7e626b2f..75a1c36b105a1 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -279,7 +279,7 @@ static void do_signal(struct pt_regs *regs) asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) { - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 5d8b969cd8f34..cf23ccb50c17e 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -110,7 +111,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err) dump(fp); spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } static int kstack_depth_to_print = 24; diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c index d4bc9c16f2df9..b465441f490df 100644 --- a/arch/h8300/mm/fault.c +++ b/arch/h8300/mm/fault.c @@ -51,7 +51,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, printk(" at virtual address %08lx\n", address); if (!user_mode(regs)) die("Oops", regs, error_code); - do_exit(SIGKILL); + make_task_dead(SIGKILL); return 1; } diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h index 563da19864645..535976665bf00 100644 --- a/arch/hexagon/include/asm/thread_info.h +++ b/arch/hexagon/include/asm/thread_info.h @@ -95,6 +95,7 @@ register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG); #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */ #define TIF_RESTORE_SIGMASK 6 /* restore sig mask in do_signal() */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 17 /* OOM killer killed process */ @@ -103,6 +104,7 @@ register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG); #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) /* work to do on interrupt/exception return - All but TIF_SYSCALL_TRACE */ #define _TIF_WORK_MASK (0x0000FFFF & ~_TIF_SYSCALL_TRACE) diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 67767c5ed98c3..c61165c99ae0b 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -73,7 +73,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, sizeof(*ss)); ss->lr = (unsigned long)ret_from_fork; p->thread.switch_sp = ss; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); /* r24 <- fn, r25 <- arg */ ss->r24 = usp; @@ -174,7 +174,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) return 1; } - if (thread_info_flags & _TIF_SIGPENDING) { + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { do_signal(regs); return 1; } diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 904134b37232f..b334e80717099 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -218,7 +218,7 @@ int die(const char *str, struct pt_regs *regs, long err) panic("Fatal exception"); oops_exit(); - do_exit(err); + make_task_dead(err); return 0; } diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 39b25a5a591b3..1d0579bc9d655 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -361,7 +361,7 @@ config ARCH_PROC_KCORE_TEXT depends on PROC_KCORE config IA64_MCA_RECOVERY - tristate "MCA recovery from errors other than TLB." + bool "MCA recovery from errors other than TLB." config IA64_PALINFO tristate "/proc/pal support" diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 64a1011f68121..51d20cb377062 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -103,6 +103,7 @@ struct thread_info { #define TIF_SYSCALL_TRACE 2 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notification exist */ #define TIF_NOTIFY_RESUME 6 /* resumption notification requested */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ @@ -115,6 +116,7 @@ struct thread_info { #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) @@ -124,7 +126,7 @@ struct thread_info { /* "work to do on user-return" bits */ #define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\ - _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE) + _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_NOTIFY_SIGNAL) /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 2a40268c3d494..d9ee3b186249d 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -176,7 +176,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) spin_unlock(&mca_bh_lock); /* This process is about to be killed itself */ - do_exit(SIGKILL); + make_task_dead(SIGKILL); } /** diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index c9ff8796b5097..8159b7af5509f 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -171,7 +171,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) } /* deal with pending signal delivery */ - if (test_thread_flag(TIF_SIGPENDING)) { + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) { local_irq_enable(); /* force interrupt enable */ ia64_do_signal(scr, in_syscall); } @@ -337,7 +338,7 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base, ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { if (unlikely(!user_stack_base)) { /* fork_idle() called us */ return 0; diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index e67b22fc3c60b..c1b299760bf7a 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -341,7 +341,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) * need to push through a forced SIGSEGV. */ while (1) { - get_signal(&ksig); + if (!get_signal(&ksig)) + break; /* * get_signal() may have run a debugger (via notify_parent()) diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e13cb905930fb..753642366e12e 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); return 0; } diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index cd9766d2b6e0e..829198180ca6f 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -274,7 +274,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re regs = NULL; bust_spinlocks(0); if (regs) - do_exit(SIGKILL); + make_task_dead(SIGKILL); return; out_of_memory: diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index f34964271101a..6cd002e8163db 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -106,5 +106,6 @@ int memory_add_physaddr_to_nid(u64 addr) return 0; return nid; } +EXPORT_SYMBOL(memory_add_physaddr_to_nid); #endif #endif diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 3689c6718c883..15a757073fa58 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h @@ -60,6 +60,7 @@ static inline struct thread_info *current_thread_info(void) * bits 0-7 are tested at every exception exit * bits 8-15 are also tested at syscall exit */ +#define TIF_NOTIFY_SIGNAL 4 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ #define TIF_SIGPENDING 6 /* signal pending */ #define TIF_NEED_RESCHED 7 /* rescheduling necessary */ diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 08359a6e058f6..da83cc83e7912 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, */ p->thread.fs = get_fs().seg; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(frame, 0, sizeof(struct fork_frame)); frame->regs.sr = PS_S; diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index fd916844a683f..5d12736b4b281 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -1129,7 +1129,8 @@ static void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_NOTIFY_SIGNAL) || + test_thread_flag(TIF_SIGPENDING)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 9e1261462bcc5..b2a31afb998c2 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -1136,7 +1136,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr) pr_crit("%s: %08x\n", str, nr); show_registers(fp); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } asmlinkage void set_esp0(unsigned long ssp) diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index ef46e77e97a5b..fcb3a0d8421c5 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs) pr_alert("Unable to handle kernel access"); pr_cont(" at virtual address %p\n", addr); die_if_kernel("Oops", regs, 0 /*error_code*/); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } return 1; diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index ad8e8fcb90d3b..44f5ca3318625 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -107,6 +107,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ /* restore singlestep on return to user mode */ #define TIF_SINGLESTEP 4 +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_MEMDIE 6 /* is terminating due to OOM killer */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ @@ -119,6 +120,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index cf99c411503e3..6d3a6a6442205 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c @@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err) pr_warn("Oops: %s, sig: %ld\n", str, err); show_regs(fp); spin_unlock_irq(&die_lock); - /* do_exit() should take care of panic'ing from an interrupt + /* make_task_dead() should take care of panic'ing from an interrupt * context so we don't handle it here */ - do_exit(err); + make_task_dead(err); } /* for user application debugging */ diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index f99860771ff48..ee000ae17e395 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -59,7 +59,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct pt_regs *childregs = task_pt_regs(p); struct thread_info *ti = task_thread_info(p); - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* if we're creating a new kernel thread then just zeroing all * the registers. That's OK for a brand new thread.*/ memset(childregs, 0, sizeof(struct pt_regs)); diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index f11a0ccccabc4..5a8d173d7b75c 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -313,7 +313,8 @@ static void do_signal(struct pt_regs *regs, int in_syscall) asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c index 3e2a8166377f5..22509b5fab74f 100644 --- a/arch/mips/bcm47xx/prom.c +++ b/arch/mips/bcm47xx/prom.c @@ -86,7 +86,7 @@ static __init void prom_init_mem(void) pr_debug("Assume 128MB RAM\n"); break; } - if (!memcmp(prom_init, prom_init + mem, 32)) + if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32)) break; } lowmem = mem; @@ -163,7 +163,7 @@ void __init bcm47xx_prom_highmem_init(void) off = EXTVBASE + __pa(off); for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) { - if (!memcmp(prom_init, (void *)(off + extmem), 16)) + if (!memcmp((void *)prom_init, (void *)(off + extmem), 16)) break; } extmem -= lowmem; diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index dcfa0ea912fe1..f183c45503ce1 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -361,6 +361,8 @@ static struct clk clk_periph = { */ int clk_enable(struct clk *clk) { + if (!clk) + return 0; mutex_lock(&clocks_mutex); clk_enable_unlocked(clk); mutex_unlock(&clocks_mutex); diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index abd11b7af22f2..9b791ccf874f9 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -211,7 +211,7 @@ union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port) { union cvmx_helper_link_info result; - WARN(!octeon_is_simulation(), + WARN_ONCE(!octeon_is_simulation(), "Using deprecated link status - please update your DT"); /* Unless we fix it later, all links are defaulted to down */ diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 6044ff4710022..a18ad2daf0052 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -1100,7 +1100,7 @@ union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port) if (index == 0) result = __cvmx_helper_rgmii_link_get(ipd_port); else { - WARN(1, "Using deprecated link status - please update your DT"); + WARN_ONCE(1, "Using deprecated link status - please update your DT"); result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 1000; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 6501a842c41a5..191bcaf565138 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -127,6 +127,16 @@ static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, int irq, int line, int bit) { + struct device_node *of_node; + int ret; + + of_node = irq_domain_get_of_node(domain); + if (!of_node) + return -EINVAL; + ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node)); + if (ret < 0) + return ret; + return irq_domain_associate(domain, irq, line << 6 | bit); } diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h index d0ef8b4892bbe..d0494ce4b3373 100644 --- a/arch/mips/include/asm/fw/fw.h +++ b/arch/mips/include/asm/fw/fw.h @@ -26,6 +26,6 @@ extern char *fw_getcmdline(void); extern void fw_meminit(void); extern char *fw_getenv(char *name); extern unsigned long fw_getenvl(char *name); -extern void fw_init_early_console(char port); +extern void fw_init_early_console(void); #endif /* __ASM_FW_H_ */ diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index ee26f9a4575df..e2c352da3877a 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -115,6 +115,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SECCOMP 4 /* secure computing */ #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ #define TIF_UPROBE 6 /* breakpointed or singlestepping */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @@ -139,6 +140,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SECCOMP (1<= BIT(25)) || + WARN_ON((offset >= (long)BIT(25)) || (offset < -(long)BIT(25))); insn.j_format.opcode = bc6_op; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 75ebd8d7bd5d0..98ecaf6f3edb0 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -135,7 +135,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 50d0515bea21f..f1e985109da01 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -903,7 +903,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, uprobe_notify_resume(regs); /* deal with pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index b1fe4518bd221..ebd0101f0958d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -413,7 +413,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) if (regs && kexec_should_crash(current)) crash_kexec(regs); - do_exit(sig); + make_task_dead(sig); } extern struct exception_table_entry __start___dbe_table[]; diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c index 9268ebc0f61e6..903c07bdc92d9 100644 --- a/arch/mips/kernel/vpe-cmp.c +++ b/arch/mips/kernel/vpe-cmp.c @@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe); static void vpe_device_release(struct device *cd) { - kfree(cd); } static struct class vpe_class = { @@ -157,6 +156,7 @@ int __init vpe_module_init(void) device_del(&vpe_device); out_class: + put_device(&vpe_device); class_unregister(&vpe_class); out_chrdev: @@ -169,7 +169,7 @@ void __exit vpe_module_exit(void) { struct vpe *v, *n; - device_del(&vpe_device); + device_unregister(&vpe_device); class_unregister(&vpe_class); unregister_chrdev(major, VPE_MODULE_NAME); diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index 2e003b11a098f..9fd7cd48ea1d2 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -313,7 +313,6 @@ ATTRIBUTE_GROUPS(vpe); static void vpe_device_release(struct device *cd) { - kfree(cd); } static struct class vpe_class = { @@ -497,6 +496,7 @@ int __init vpe_module_init(void) device_del(&vpe_device); out_class: + put_device(&vpe_device); class_unregister(&vpe_class); out_chrdev: @@ -509,7 +509,7 @@ void __exit vpe_module_exit(void) { struct vpe *v, *n; - device_del(&vpe_device); + device_unregister(&vpe_device); class_unregister(&vpe_class); unregister_chrdev(major, VPE_MODULE_NAME); diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index 7a623684d9b5e..2d5a0bcb0cec1 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -50,6 +50,7 @@ struct clk *clk_get_io(void) { return &cpu_clk_generic[2]; } +EXPORT_SYMBOL_GPL(clk_get_io); struct clk *clk_get_ppe(void) { diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c index 794c96c2a4cdd..311dc1580bbde 100644 --- a/arch/mips/loongson32/common/platform.c +++ b/arch/mips/loongson32/common/platform.c @@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) if (plat_dat->bus_id) { __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 | GMAC1_USE_UART0, LS1X_MUX_CTRL0); - switch (plat_dat->interface) { + switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_RGMII: val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23); break; @@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) break; default: pr_err("unsupported mii mode %d\n", - plat_dat->interface); + plat_dat->phy_interface); return -ENOTSUPP; } val &= ~GMAC1_SHUT; } else { - switch (plat_dat->interface) { + switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_RGMII: val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01); break; @@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) break; default: pr_err("unsupported mii mode %d\n", - plat_dat->interface); + plat_dat->phy_interface); return -ENOTSUPP; } val &= ~GMAC0_SHUT; @@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) plat_dat = dev_get_platdata(&pdev->dev); val &= ~PHY_INTF_SELI; - if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) + if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) val |= 0x4 << PHY_INTF_SELI_SHIFT; __raw_writel(val, LS1X_MUX_CTRL1); @@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = { .bus_id = 0, .phy_addr = -1, #if defined(CONFIG_LOONGSON1_LS1B) - .interface = PHY_INTERFACE_MODE_MII, + .phy_interface = PHY_INTERFACE_MODE_MII, #elif defined(CONFIG_LOONGSON1_LS1C) - .interface = PHY_INTERFACE_MODE_RMII, + .phy_interface = PHY_INTERFACE_MODE_RMII, #endif .mdio_bus_data = &ls1x_mdio_bus_data, .dma_cfg = &ls1x_eth_dma_cfg, @@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = { static struct plat_stmmacenet_data ls1x_eth1_pdata = { .bus_id = 1, .phy_addr = -1, - .interface = PHY_INTERFACE_MODE_MII, + .phy_interface = PHY_INTERFACE_MODE_MII, .mdio_bus_data = &ls1x_mdio_bus_data, .dma_cfg = &ls1x_eth_dma_cfg, .has_gmac = 1, diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c index e9de6da0ce51f..9dcfe9de55b0a 100644 --- a/arch/mips/loongson32/ls1c/board.c +++ b/arch/mips/loongson32/ls1c/board.c @@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = { static int __init ls1c_platform_init(void) { ls1x_serial_set_uartclk(&ls1x_uart_pdev); - ls1x_rtc_set_extclk(&ls1x_rtc_pdev); return platform_add_devices(ls1c_platform_devices, ARRAY_SIZE(ls1c_platform_devices)); diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c index 25372e62783b5..3cd1b408fa1cb 100644 --- a/arch/mips/pic32/pic32mzda/early_console.c +++ b/arch/mips/pic32/pic32mzda/early_console.c @@ -27,7 +27,7 @@ #define U_BRG(x) (UART_BASE(x) + 0x40) static void __iomem *uart_base; -static char console_port = -1; +static int console_port = -1; static int __init configure_uart_pins(int port) { @@ -47,7 +47,7 @@ static int __init configure_uart_pins(int port) return 0; } -static void __init configure_uart(char port, int baud) +static void __init configure_uart(int port, int baud) { u32 pbclk; @@ -60,7 +60,7 @@ static void __init configure_uart(char port, int baud) uart_base + PIC32_SET(U_STA(port))); } -static void __init setup_early_console(char port, int baud) +static void __init setup_early_console(int port, int baud) { if (configure_uart_pins(port)) return; @@ -130,16 +130,15 @@ static int __init get_baud_from_cmdline(char *arch_cmdline) return baud; } -void __init fw_init_early_console(char port) +void __init fw_init_early_console(void) { char *arch_cmdline = pic32_getcmdline(); - int baud = -1; + int baud, port; uart_base = ioremap(PIC32_BASE_UART, 0xc00); baud = get_baud_from_cmdline(arch_cmdline); - if (port == -1) - port = get_port_from_cmdline(arch_cmdline); + port = get_port_from_cmdline(arch_cmdline); if (port == -1) port = EARLY_CONSOLE_PORT; diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c index f232c77ff5265..488c0bee7ebf5 100644 --- a/arch/mips/pic32/pic32mzda/init.c +++ b/arch/mips/pic32/pic32mzda/init.c @@ -60,7 +60,7 @@ void __init plat_mem_setup(void) strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); #ifdef CONFIG_EARLY_PRINTK - fw_init_early_console(-1); + fw_init_early_console(); #endif pic32_config_init(); } diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c index 000ede156bdc0..5143d1cf8984c 100644 --- a/arch/mips/sgi-ip27/ip27-xtalk.c +++ b/arch/mips/sgi-ip27/ip27-xtalk.c @@ -27,15 +27,18 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) { struct xtalk_bridge_platform_data *bd; struct sgi_w1_platform_data *wd; - struct platform_device *pdev; + struct platform_device *pdev_wd; + struct platform_device *pdev_bd; struct resource w1_res; unsigned long offset; offset = NODE_OFFSET(nasid); wd = kzalloc(sizeof(*wd), GFP_KERNEL); - if (!wd) - goto no_mem; + if (!wd) { + pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); + return; + } snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx", offset + (widget << SWIN_SIZE_BITS)); @@ -46,22 +49,35 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) w1_res.end = w1_res.start + 3; w1_res.flags = IORESOURCE_MEM; - pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO); - if (!pdev) { - kfree(wd); - goto no_mem; + pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO); + if (!pdev_wd) { + pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); + goto err_kfree_wd; + } + if (platform_device_add_resources(pdev_wd, &w1_res, 1)) { + pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget); + goto err_put_pdev_wd; + } + if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) { + pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget); + goto err_put_pdev_wd; + } + if (platform_device_add(pdev_wd)) { + pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget); + goto err_put_pdev_wd; } - platform_device_add_resources(pdev, &w1_res, 1); - platform_device_add_data(pdev, wd, sizeof(*wd)); - platform_device_add(pdev); + /* platform_device_add_data() duplicates the data */ + kfree(wd); bd = kzalloc(sizeof(*bd), GFP_KERNEL); - if (!bd) - goto no_mem; - pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO); - if (!pdev) { - kfree(bd); - goto no_mem; + if (!bd) { + pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); + goto err_unregister_pdev_wd; + } + pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO); + if (!pdev_bd) { + pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); + goto err_kfree_bd; } @@ -82,13 +98,31 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) bd->io.flags = IORESOURCE_IO; bd->io_offset = offset; - platform_device_add_data(pdev, bd, sizeof(*bd)); - platform_device_add(pdev); + if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) { + pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget); + goto err_put_pdev_bd; + } + if (platform_device_add(pdev_bd)) { + pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget); + goto err_put_pdev_bd; + } + /* platform_device_add_data() duplicates the data */ + kfree(bd); pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget); return; -no_mem: - pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); +err_put_pdev_bd: + platform_device_put(pdev_bd); +err_kfree_bd: + kfree(bd); +err_unregister_pdev_wd: + platform_device_unregister(pdev_wd); + return; +err_put_pdev_wd: + platform_device_put(pdev_wd); +err_kfree_wd: + kfree(wd); + return; } static int probe_one_port(nasid_t nasid, int widget, int masterwid) diff --git a/arch/nds32/include/asm/thread_info.h b/arch/nds32/include/asm/thread_info.h index c135111ec44eb..d3967ad184f03 100644 --- a/arch/nds32/include/asm/thread_info.h +++ b/arch/nds32/include/asm/thread_info.h @@ -48,6 +48,7 @@ struct thread_info { #define TIF_NEED_RESCHED 2 #define TIF_SINGLESTEP 3 #define TIF_NOTIFY_RESUME 4 /* callback before returning to user */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_SYSCALL_TRACE 8 #define TIF_POLLING_NRFLAG 17 #define TIF_MEMDIE 18 @@ -57,6 +58,7 @@ struct thread_info { #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S index 6a2966c2d8c8f..b30699911b81e 100644 --- a/arch/nds32/kernel/ex-exit.S +++ b/arch/nds32/kernel/ex-exit.S @@ -120,7 +120,7 @@ work_pending: andi $p1, $r1, #_TIF_NEED_RESCHED bnez $p1, work_resched - andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME + andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME|#_TIF_NOTIFY_SIGNAL beqz $p1, no_work_pending move $r0, $sp ! 'regs' diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c index 9edd7ed7d7bf8..701c09a668de4 100644 --- a/arch/nds32/kernel/fpu.c +++ b/arch/nds32/kernel/fpu.c @@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs) } } else if (fpcsr & FPCSR_mskRIT) { if (!user_mode(regs)) - do_exit(SIGILL); + make_task_dead(SIGILL); si_signo = SIGILL; } diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index e01ad5d172245..c1327e552ec6c 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); /* kernel thread fn */ p->thread.cpu_context.r6 = stack_start; diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c index 2acb94812af98..7e3ca430a2233 100644 --- a/arch/nds32/kernel/signal.c +++ b/arch/nds32/kernel/signal.c @@ -376,7 +376,7 @@ static void do_signal(struct pt_regs *regs) asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int thread_flags) { - if (thread_flags & _TIF_SIGPENDING) + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_flags & _TIF_NOTIFY_RESUME) diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index 6a9772ba73927..12cdd6549360c 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -185,7 +185,7 @@ void die(const char *str, struct pt_regs *regs, int err) bust_spinlocks(0); spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } EXPORT_SYMBOL(die); @@ -289,7 +289,7 @@ void unhandled_interruption(struct pt_regs *regs) pr_emerg("unhandled_interruption\n"); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGKILL); + make_task_dead(SIGKILL); force_sig(SIGKILL); } @@ -300,7 +300,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr, addr, type); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGKILL); + make_task_dead(SIGKILL); force_sig(SIGKILL); } @@ -327,7 +327,7 @@ void do_revinsn(struct pt_regs *regs) pr_emerg("Reserved Instruction\n"); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGILL); + make_task_dead(SIGILL); force_sig(SIGILL); } diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile index 37dfc7e584bce..0b704c1f379f5 100644 --- a/arch/nios2/boot/Makefile +++ b/arch/nios2/boot/Makefile @@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) -$(obj)/vmImage: $(obj)/vmlinux.gz +$(obj)/vmImage: $(obj)/vmlinux.gz FORCE $(call if_changed,uimage) @$(kecho) 'Kernel: $@ is ready' diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h index 7349a4fa635be..272d2c72a7275 100644 --- a/arch/nios2/include/asm/thread_info.h +++ b/arch/nios2/include/asm/thread_info.h @@ -86,6 +86,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_MEMDIE 4 /* is terminating due to OOM killer */ #define TIF_SECCOMP 5 /* secure computing */ #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling @@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index 50b4eb19a6cc9..c5f916ca6845f 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -109,7 +109,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct switch_stack *childstack = ((struct switch_stack *)childregs) - 1; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 916180e4a9978..68d626c4f1ba7 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -309,7 +309,8 @@ asmlinkage int do_notify_resume(struct pt_regs *regs) if (!user_mode(regs)) return 0; - if (test_thread_flag(TIF_SIGPENDING)) { + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs); if (unlikely(restart)) { diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c index b172da4eb1a95..86208178024f6 100644 --- a/arch/nios2/kernel/traps.c +++ b/arch/nios2/kernel/traps.c @@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err) show_regs(regs); spin_unlock_irq(&die_lock); /* - * do_exit() should take care of panic'ing from an interrupt + * make_task_dead() should take care of panic'ing from an interrupt * context so we don't handle it here */ - do_exit(err); + make_task_dead(err); } void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr) diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index 9afe68bc423b3..4f9d2a2614558 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h @@ -98,6 +98,7 @@ register struct thread_info *current_thread_info_reg asm("r10"); #define TIF_SINGLESTEP 4 /* restore singlestep on return to user * mode */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_RESTORE_SIGMASK 9 #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling * TIF_NEED_RESCHED @@ -109,6 +110,7 @@ register struct thread_info *current_thread_info_reg asm("r10"); #define _TIF_SIGPENDING (1<flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(kregs, 0, sizeof(struct pt_regs)); kregs->gpr[20] = usp; /* fn, kernel thread */ kregs->gpr[22] = arg; diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index af66f968dd459..1ebcff2710965 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -299,7 +299,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); - if (thread_flags & _TIF_SIGPENDING) { + if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { /* diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 206e5325e61bc..fca5317f3ce17 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -212,7 +212,7 @@ void die(const char *str, struct pt_regs *regs, long err) __asm__ __volatile__("l.nop 1"); do {} while (1); #endif - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* This is normally the 'Oops' routine */ diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 2d89f79f460cb..07a4d4badd697 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -315,6 +315,16 @@ config IRQSTACKS for handling hard and soft interrupts. This can help avoid overflowing the process kernel stacks. +config TLB_PTLOCK + bool "Use page table locks in TLB fault handler" + depends on SMP + default n + help + Select this option to enable page table locking in the TLB + fault handler. This ensures that page table entries are + updated consistently on SMP machines at the expense of some + loss in performance. + config HOTPLUG_CPU bool default y if SMP diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h index 9d3d7737c58b1..a005ebc547793 100644 --- a/arch/parisc/include/asm/hardware.h +++ b/arch/parisc/include/asm/hardware.h @@ -10,12 +10,12 @@ #define SVERSION_ANY_ID PA_SVERSION_ANY_ID struct hp_hardware { - unsigned short hw_type:5; /* HPHW_xxx */ - unsigned short hversion; - unsigned long sversion:28; - unsigned short opt; - const char name[80]; /* The hardware description */ -}; + unsigned int hw_type:8; /* HPHW_xxx */ + unsigned int hversion:12; + unsigned int sversion:12; + unsigned char opt; + unsigned char name[59]; /* The hardware description */ +} __packed; struct parisc_device; diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index cb5f2f7304213..aba69ff79e8c1 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -5,6 +5,7 @@ #include #include #include +#include #include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) @@ -52,6 +53,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) { +#ifdef CONFIG_TLB_PTLOCK + /* put physical address of page_table_lock in cr28 (tr4) + for TLB faults */ + spinlock_t *pgd_lock = &next->page_table_lock; + mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28); +#endif mtctl(__pa(next->pgd), 25); load_context(next->context); } diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 8802ce651a3af..0561568f7b489 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -112,7 +112,7 @@ extern int npmem_ranges; #else #define BITS_PER_PTE_ENTRY 2 #define BITS_PER_PMD_ENTRY 2 -#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY +#define BITS_PER_PGD_ENTRY 2 #endif #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY) #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index a6482b2ce0eab..dda5570853116 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -15,47 +15,23 @@ #define __HAVE_ARCH_PGD_FREE #include -/* Allocate the top level pgd (page directory) - * - * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we - * allocate the first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd sizes are - * arranged so that a single pmd covers 4GB (giving a full 64-bit - * process access to 8TB) so our lookups are effectively L2 for the - * first 4GB of the kernel (i.e. for all ILP32 processes and all the - * kernel for machines with under 4GB of memory) */ +/* Allocate the top level pgd (page directory) */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, - PGD_ALLOC_ORDER); - pgd_t *actual_pgd = pgd; + pgd_t *pgd; - if (likely(pgd != NULL)) { - memset(pgd, 0, PAGE_SIZE<> PxD_VALUE_SHIFT))); - /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as - * a signal that this pmd may not be freed */ - set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED)); -#endif - } - spin_lock_init(pgd_spinlock(actual_pgd)); - return actual_pgd; + pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (unlikely(pgd == NULL)) + return NULL; + + memset(pgd, 0, PAGE_SIZE << PGD_ORDER); + + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { -#if CONFIG_PGTABLE_LEVELS == 3 - pgd -= PTRS_PER_PGD; -#endif - free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); + free_pages((unsigned long)pgd, PGD_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 @@ -70,41 +46,25 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); + pmd_t *pmd; + + pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); + if (likely(pmd)) + memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER); + return pmd; } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { - /* - * This is the permanent pmd attached to the pgd; - * cannot free it. - * Increment the counter to compensate for the decrement - * done by generic mm code. - */ - mm_inc_nr_pmds(mm); - return; - } free_pages((unsigned long)pmd, PMD_ORDER); } - #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { -#if CONFIG_PGTABLE_LEVELS == 3 - /* preserve the gateway marker if this is the beginning of - * the permanent pmd */ - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | - PxD_FLAG_VALID | - PxD_FLAG_ATTACHED) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); - else -#endif - set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); + set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); } #define pmd_populate(mm, pmd, pte_page) \ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 75cf84070fc91..8964798b8274e 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -23,8 +23,6 @@ #include #include -static inline spinlock_t *pgd_spinlock(pgd_t *); - /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -42,12 +40,8 @@ static inline spinlock_t *pgd_spinlock(pgd_t *); /* This is for the serialization of PxTLB broadcasts. At least on the N class * systems, only one PxTLB inter processor broadcast can be active at any one - * time on the Merced bus. - - * PTE updates are protected by locks in the PMD. - */ + * time on the Merced bus. */ extern spinlock_t pa_tlb_flush_lock; -extern spinlock_t pa_swapper_pg_lock; #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) extern int pa_serialize_tlb_flushes; #else @@ -82,22 +76,25 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) purge_tlb_end(flags); } +extern void __update_cache(pte_t pte); + /* Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */ -#define set_pte(pteptr, pteval) \ - do{ \ - *(pteptr) = (pteval); \ - } while(0) - -#define set_pte_at(mm, addr, ptep, pteval) \ - do { \ - unsigned long flags; \ - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\ - set_pte(ptep, pteval); \ - purge_tlb_entries(mm, addr); \ - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\ +#define set_pte(pteptr, pteval) \ + do { \ + *(pteptr) = (pteval); \ + mb(); \ + } while(0) + +#define set_pte_at(mm, addr, pteptr, pteval) \ + do { \ + if (pte_present(pteval) && \ + pte_user(pteval)) \ + __update_cache(pteval); \ + *(pteptr) = (pteval); \ + purge_tlb_entries(mm, addr); \ } while (0) #endif /* !__ASSEMBLY__ */ @@ -120,12 +117,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) #if CONFIG_PGTABLE_LEVELS == 3 -#define PGD_ORDER 1 /* Number of pages per pgd */ -#define PMD_ORDER 1 /* Number of pages per pmd */ -#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */ +#define PMD_ORDER 1 +#define PGD_ORDER 0 #else -#define PGD_ORDER 1 /* Number of pages per pgd */ -#define PGD_ALLOC_ORDER (PGD_ORDER + 1) +#define PGD_ORDER 1 #endif /* Definitions for 3rd level (we use PLD here for Page Lower directory @@ -240,11 +235,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) * able to effectively address 40/42/44-bits of physical address space * depending on 4k/16k/64k PAGE_SIZE */ #define _PxD_PRESENT_BIT 31 -#define _PxD_ATTACHED_BIT 30 -#define _PxD_VALID_BIT 29 +#define _PxD_VALID_BIT 30 #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) -#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) #define PxD_FLAG_MASK (0xf) #define PxD_FLAG_SHIFT (4) @@ -317,6 +310,7 @@ extern unsigned long *empty_zero_page; #define pte_none(x) (pte_val(x) == 0) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) +#define pte_user(x) (pte_val(x) & _PAGE_USER) #define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) @@ -326,23 +320,10 @@ extern unsigned long *empty_zero_page; #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) -#if CONFIG_PGTABLE_LEVELS == 3 -/* The first entry of the permanent pmd is not there if it contains - * the gateway marker */ -#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) -#else #define pmd_none(x) (!pmd_val(x)) -#endif #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) static inline void pmd_clear(pmd_t *pmd) { -#if CONFIG_PGTABLE_LEVELS == 3 - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - /* This is the entry pointing to the permanent pmd - * attached to the pgd; cannot clear it */ - set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED)); - else -#endif set_pmd(pmd, __pmd(0)); } @@ -358,12 +339,6 @@ static inline void pmd_clear(pmd_t *pmd) { #define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID)) #define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT) static inline void pud_clear(pud_t *pud) { -#if CONFIG_PGTABLE_LEVELS == 3 - if(pud_flag(*pud) & PxD_FLAG_ATTACHED) - /* This is the permanent pmd attached to the pud; cannot - * free it */ - return; -#endif set_pud(pud, __pud(0)); } #endif @@ -443,7 +418,7 @@ extern void paging_init (void); #define PG_dcache_dirty PG_arch_1 -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); +#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep) /* Encode and de-code a swap entry */ @@ -456,32 +431,18 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - -static inline spinlock_t *pgd_spinlock(pgd_t *pgd) -{ - if (unlikely(pgd == swapper_pg_dir)) - return &pa_swapper_pg_lock; - return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1))); -} - - static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; - unsigned long flags; if (!pte_young(*ptep)) return 0; - spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags); pte = *ptep; if (!pte_young(pte)) { - spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); return 0; } - set_pte(ptep, pte_mkold(pte)); - purge_tlb_entries(vma->vm_mm, addr); - spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; } @@ -489,24 +450,16 @@ struct mm_struct; static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; - unsigned long flags; - spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); old_pte = *ptep; - set_pte(ptep, __pte(0)); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); + set_pte_at(mm, addr, ptep, __pte(0)); return old_pte; } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; - spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); - set_pte(ptep, pte_wrprotect(*ptep)); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); + set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 285757544cca2..0bd38a972ceab 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -52,6 +52,7 @@ struct thread_info { #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ +#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ #define TIF_SINGLESTEP 9 /* single stepping? */ @@ -61,6 +62,7 @@ struct thread_info { #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_32BIT (1 << TIF_32BIT) @@ -72,7 +74,7 @@ struct thread_info { #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ - _TIF_NEED_RESCHED) + _TIF_NEED_RESCHED | _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index ab78cba446ed3..ef9b53b80b9d1 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -49,28 +49,27 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ -#define MADV_COLD 20 /* deactivate these pages */ -#define MADV_PAGEOUT 21 /* reclaim these pages */ - -#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ -#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ -#define MADV_HUGEPAGE 67 /* Worth backing with hugepages */ -#define MADV_NOHUGEPAGE 68 /* Not worth backing with hugepages */ +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ -#define MADV_DONTDUMP 69 /* Explicity exclude from the core dump, +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, overrides the coredump filter bits */ -#define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ -#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */ -#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */ +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ #define MADV_HWPOISON 100 /* poison a page for testing */ #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ /* compatibility flags */ #define MAP_FILE 0 -#define MAP_VARIABLE 0 #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 305768a40773f..cd2cc1b1648c0 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -268,7 +268,6 @@ int main(void) DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD); DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD); DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE); - DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER)); DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT)); DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT); DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 86a1a63563fd5..c81ab0cb89255 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -83,9 +83,9 @@ EXPORT_SYMBOL(flush_cache_all_local); #define pfn_va(pfn) __va(PFN_PHYS(pfn)) void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +__update_cache(pte_t pte) { - unsigned long pfn = pte_pfn(*ptep); + unsigned long pfn = pte_pfn(pte); struct page *page; /* We don't have pte special. As a result, we can be called with diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index f5a25ed0930d2..d95157488832a 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -883,15 +883,13 @@ void __init walk_central_bus(void) &root); } -static void print_parisc_device(struct parisc_device *dev) +static __init void print_parisc_device(struct parisc_device *dev) { - char hw_path[64]; - static int count; + static int count __initdata; - print_pa_hwpath(dev, hw_path); - pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", - ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type, - dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); + pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }", + ++count, dev->name, &(dev->hpa.start), dev->id.hw_type, + dev->id.hversion, dev->id.sversion, dev->id.hversion_rev); if (dev->num_addrs) { int k; @@ -1080,7 +1078,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) -static int print_one_device(struct device * dev, void * data) +static __init int print_one_device(struct device * dev, void * data) { struct parisc_device * pdev = to_parisc_device(dev); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 3da39140babcf..05bed27eef859 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -35,10 +35,9 @@ .level 2.0 #endif - .import pa_tlb_lock,data - .macro load_pa_tlb_lock reg - mfctl %cr25,\reg - addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg + /* Get aligned page_table_lock address for this mm from cr28/tr4 */ + .macro get_ptl reg + mfctl %cr28,\reg .endm /* space_to_prot macro creates a prot id from a space id */ @@ -407,7 +406,9 @@ # endif #endif dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ +#if CONFIG_PGTABLE_LEVELS < 3 copy %r0,\pte +#endif ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ @@ -417,38 +418,23 @@ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ .endm - /* Look up PTE in a 3-Level scheme. - * - * Here we implement a Hybrid L2/L3 scheme: we allocate the - * first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd - * sizes are arranged so that a single pmd covers 4GB (giving - * a full LP64 process access to 8TB) so our lookups are - * effectively L2 for the first 4GB of the kernel (i.e. for - * all ILP32 processes and all the kernel for machines with - * under 4GB of memory) */ + /* Look up PTE in a 3-Level scheme. */ .macro L3_ptep pgd,pte,index,va,fault -#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ +#if CONFIG_PGTABLE_LEVELS == 3 + copy %r0,\pte extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldw,s \index(\pgd),\pgd - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - shld \pgd,PxD_VALUE_SHIFT,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - copy \index,\pgd - extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd + shld \pgd,PxD_VALUE_SHIFT,\pgd #endif L2_ptep \pgd,\pte,\index,\va,\fault .endm - /* Acquire pa_tlb_lock lock and check page is present. */ - .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault -#ifdef CONFIG_SMP + /* Acquire page_table_lock and check page is present. */ + .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault +#ifdef CONFIG_TLB_PTLOCK 98: cmpib,COND(=),n 0,\spc,2f - load_pa_tlb_lock \tmp + get_ptl \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop @@ -463,26 +449,26 @@ 3: .endm - /* Release pa_tlb_lock lock without reloading lock address. + /* Release page_table_lock without reloading lock address. Note that the values in the register spc are limited to NR_SPACE_IDS (262144). Thus, the stw instruction always stores a nonzero value even when register spc is 64 bits. We use an ordered store to ensure all prior accesses are performed prior to releasing the lock. */ - .macro tlb_unlock0 spc,tmp -#ifdef CONFIG_SMP + .macro ptl_unlock0 spc,tmp +#ifdef CONFIG_TLB_PTLOCK 98: or,COND(=) %r0,\spc,%r0 stw,ma \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm - /* Release pa_tlb_lock lock. */ - .macro tlb_unlock1 spc,tmp -#ifdef CONFIG_SMP -98: load_pa_tlb_lock \tmp + /* Release page_table_lock. */ + .macro ptl_unlock1 spc,tmp +#ifdef CONFIG_TLB_PTLOCK +98: get_ptl \tmp + ptl_unlock0 \spc,\tmp 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) - tlb_unlock0 \spc,\tmp #endif .endm @@ -1165,14 +1151,14 @@ dtlb_miss_20w: L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1191,14 +1177,14 @@ nadtlb_miss_20w: L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1219,7 +1205,7 @@ dtlb_miss_11: L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1232,7 +1218,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1252,7 +1238,7 @@ nadtlb_miss_11: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1265,7 +1251,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1285,7 +1271,7 @@ dtlb_miss_20: L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1294,7 +1280,7 @@ dtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1313,7 +1299,7 @@ nadtlb_miss_20: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1322,7 +1308,7 @@ nadtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1422,14 +1408,14 @@ itlb_miss_20w: L3_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1446,14 +1432,14 @@ naitlb_miss_20w: L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1474,7 +1460,7 @@ itlb_miss_11: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1487,7 +1473,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1498,7 +1484,7 @@ naitlb_miss_11: L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1511,7 +1497,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1532,7 +1518,7 @@ itlb_miss_20: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1541,7 +1527,7 @@ itlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1552,7 +1538,7 @@ naitlb_miss_20: L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1561,7 +1547,7 @@ naitlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1584,14 +1570,14 @@ dbit_trap_20w: L3_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop #else @@ -1604,7 +1590,7 @@ dbit_trap_11: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1617,7 +1603,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop @@ -1628,7 +1614,7 @@ dbit_trap_20: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 @@ -1637,7 +1623,7 @@ dbit_trap_20: idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop #endif diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 665b70086685c..7ed28ddcaba7d 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1230,7 +1230,7 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096]; */ int pdc_iodc_print(const unsigned char *str, unsigned count) { - unsigned int i; + unsigned int i, found = 0; unsigned long flags; for (i = 0; i < count;) { @@ -1239,6 +1239,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) iodc_dbuf[i+0] = '\r'; iodc_dbuf[i+1] = '\n'; i += 2; + found = 1; goto print; default: iodc_dbuf[i] = str[i]; @@ -1255,7 +1256,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0); spin_unlock_irqrestore(&pdc_lock, flags); - return i; + return i - found; } #if !defined(BOOTLOADER) diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index aa93d775c34db..598d0938449da 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -22,7 +22,7 @@ #include #include - .level PA_ASM_LEVEL + .level 1.1 __INITDATA ENTRY(boot_args) @@ -69,6 +69,47 @@ $bss_loop: stw,ma %arg2,4(%r1) stw,ma %arg3,4(%r1) +#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) + /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU + * and halt kernel if we detect a PA1.x CPU. */ + ldi 32,%r10 + mtctl %r10,%cr11 + .level 2.0 + mfctl,w %cr11,%r10 + .level 1.1 + comib,<>,n 0,%r10,$cpu_ok + + load32 PA(msg1),%arg0 + ldi msg1_end-msg1,%arg1 +$iodc_panic: + copy %arg0, %r10 + copy %arg1, %r11 + load32 PA(init_stack),%sp +#define MEM_CONS 0x3A0 + ldw MEM_CONS+32(%r0),%arg0 // HPA + ldi ENTRY_IO_COUT,%arg1 + ldw MEM_CONS+36(%r0),%arg2 // SPA + ldw MEM_CONS+8(%r0),%arg3 // layers + load32 PA(__bss_start),%r1 + stw %r1,-52(%sp) // arg4 + stw %r0,-56(%sp) // arg5 + stw %r10,-60(%sp) // arg6 = ptr to text + stw %r11,-64(%sp) // arg7 = len + stw %r0,-68(%sp) // arg8 + load32 PA(.iodc_panic_ret), %rp + ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC + bv,n (%r1) +.iodc_panic_ret: + b . /* wait endless with ... */ + or %r10,%r10,%r10 /* qemu idle sleep */ +msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n" +msg1_end: + +$cpu_ok: +#endif + + .level PA_ASM_LEVEL + /* Initialize startup VM. Just map first 16/32 MB of memory */ load32 PA(swapper_pg_dir),%r4 mtctl %r4,%cr24 /* Initialize kernel root pointer */ diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index a92a23d6acd93..5e4381280c97b 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -200,7 +200,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, extern void * const ret_from_kernel_thread; extern void * const child_return; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(cregs, 0, sizeof(struct pt_regs)); if (!usp) /* idle thread */ diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2127974982df9..df9b9f0591bfb 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -127,6 +127,12 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long tmp; long ret = -EIO; + unsigned long user_regs_struct_size = sizeof(struct user_regs_struct); +#ifdef CONFIG_64BIT + if (is_compat_task()) + user_regs_struct_size /= 2; +#endif + switch (request) { /* Read the word at location addr in the USER area. For ptraced @@ -182,14 +188,14 @@ long arch_ptrace(struct task_struct *child, long request, return copy_regset_to_user(child, task_user_regset_view(current), REGSET_GENERAL, - 0, sizeof(struct user_regs_struct), + 0, user_regs_struct_size, datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_GENERAL, - 0, sizeof(struct user_regs_struct), + 0, user_regs_struct_size, datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ @@ -303,6 +309,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } } break; + case PTRACE_GETREGS: + case PTRACE_SETREGS: + case PTRACE_GETFPREGS: + case PTRACE_SETFPREGS: + return arch_ptrace(child, request, addr, data); default: ret = compat_ptrace_request(child, request, addr, data); diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 8d6c9b88eb3f2..db1a47cf424dd 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -609,7 +609,8 @@ do_signal(struct pt_regs *regs, long in_syscall) void do_notify_resume(struct pt_regs *regs, long in_syscall) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 9549496f55230..40f881f729e4b 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -444,3 +444,30 @@ asmlinkage long parisc_inotify_init1(int flags) flags = FIX_O_NONBLOCK(flags); return sys_inotify_init1(flags); } + +/* + * madvise() wrapper + * + * Up to kernel v6.1 parisc has different values than all other + * platforms for the MADV_xxx flags listed below. + * To keep binary compatibility with existing userspace programs + * translate the former values to the new values. + * + * XXX: Remove this wrapper in year 2025 (or later) + */ + +asmlinkage notrace long parisc_madvise(unsigned long start, size_t len_in, int behavior) +{ + switch (behavior) { + case 65: behavior = MADV_MERGEABLE; break; + case 66: behavior = MADV_UNMERGEABLE; break; + case 67: behavior = MADV_HUGEPAGE; break; + case 68: behavior = MADV_NOHUGEPAGE; break; + case 69: behavior = MADV_DONTDUMP; break; + case 70: behavior = MADV_DODUMP; break; + case 71: behavior = MADV_WIPEONFORK; break; + case 72: behavior = MADV_KEEPONFORK; break; + } + + return sys_madvise(start, len_in, behavior); +} diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index d526ebfa58e50..dfe9254ee74b6 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -131,7 +131,7 @@ 116 common sysinfo sys_sysinfo compat_sys_sysinfo 117 common shutdown sys_shutdown 118 common fsync sys_fsync -119 common madvise sys_madvise +119 common madvise parisc_madvise 120 common clone sys_clone_wrapper 121 common setdomainname sys_setdomainname 122 common sendfile sys_sendfile compat_sys_sendfile diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index bce47e0fb692c..2fad7867af100 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -268,7 +268,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) panic("Fatal exception"); oops_exit(); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* gdb uses break 4,8 */ diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index d7ba014a7fbb5..43652de5f139f 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -142,24 +142,17 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned long flags; - - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); __set_huge_pte_at(mm, addr, ptep, entry); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; pte_t entry; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); entry = *ptep; __set_huge_pte_at(mm, addr, ptep, __pte(0)); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); return entry; } @@ -168,29 +161,23 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; pte_t old_pte; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); old_pte = *ptep; __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); } int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - unsigned long flags; int changed; struct mm_struct *mm = vma->vm_mm; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); changed = !pte_same(*ptep, pte); if (changed) { __set_huge_pte_at(mm, addr, ptep, pte); } - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); return changed; } diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 319afa00cdf7b..6a083fc87a038 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -37,11 +37,6 @@ extern int data_start; extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ #if CONFIG_PGTABLE_LEVELS == 3 -/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout - * with the first pmd adjacent to the pgd and below it. gcc doesn't actually - * guarantee that global objects will be laid out in memory in the same order - * as the order of declaration, so put these in different sections and use - * the linker script to order them. */ pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); #endif @@ -558,6 +553,11 @@ void __init mem_init(void) BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD > BITS_PER_LONG); +#if CONFIG_PGTABLE_LEVELS == 3 + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD); +#else + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD); +#endif high_memory = __va((max_pfn << PAGE_SHIFT)); set_max_mapnr(max_low_pfn); diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 59175651f0b9e..6122541412961 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -153,7 +153,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8 CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8) else CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) -CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) +CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4 endif else ifdef CONFIG_PPC_BOOK3E_64 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index e4b364b5da9e7..8b78eba755f9a 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -30,6 +30,7 @@ endif BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ + $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ $(LINUXINCLUDE) diff --git a/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi new file mode 100644 index 0000000000000..7e2a90cde72e5 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi @@ -0,0 +1,51 @@ +/* + * e500v1 Power ISA Device Tree Source (include) + * + * Copyright 2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/ { + cpus { + power-isa-version = "2.03"; + power-isa-b; // Base + power-isa-e; // Embedded + power-isa-atb; // Alternate Time Base + power-isa-cs; // Cache Specification + power-isa-e.le; // Embedded.Little-Endian + power-isa-e.pm; // Embedded.Performance Monitor + power-isa-ecl; // Embedded Cache Locking + power-isa-mmc; // Memory Coherence + power-isa-sp; // Signal Processing Engine + power-isa-sp.fs; // SPE.Embedded Float Scalar Single + power-isa-sp.fv; // SPE.Embedded Float Vector + mmu-type = "power-embedded"; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts index 18a885130538a..e03ae130162ba 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8540ADS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts index ac381e7b1c60e..a2a6c5cf852e9 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8541CDS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts index 9f58db2a7e661..901b6ff06dfbb 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8555CDS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts index a24722ccaebf1..c2f9aea78b29f 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8560ADS"; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi new file mode 100644 index 0000000000000..437dab3fc0176 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later +/* + * QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ] + * + * Copyright 2022 Sean Anderson + * Copyright 2012 - 2015 Freescale Semiconductor Inc. + */ + +fman@400000 { + fman0_rx_0x08: port@88000 { + cell-index = <0x8>; + compatible = "fsl,fman-v3-port-rx"; + reg = <0x88000 0x1000>; + fsl,fman-10g-port; + }; + + fman0_tx_0x28: port@a8000 { + cell-index = <0x28>; + compatible = "fsl,fman-v3-port-tx"; + reg = <0xa8000 0x1000>; + fsl,fman-10g-port; + }; + + ethernet@e0000 { + cell-index = <0>; + compatible = "fsl,fman-memac"; + reg = <0xe0000 0x1000>; + fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; + ptp-timer = <&ptp_timer0>; + pcsphy-handle = <&pcsphy0>; + }; + + mdio@e1000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; + reg = <0xe1000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ + + pcsphy0: ethernet-phy@0 { + reg = <0x0>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi new file mode 100644 index 0000000000000..ad116b17850a8 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later +/* + * QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ] + * + * Copyright 2022 Sean Anderson + * Copyright 2012 - 2015 Freescale Semiconductor Inc. + */ + +fman@400000 { + fman0_rx_0x09: port@89000 { + cell-index = <0x9>; + compatible = "fsl,fman-v3-port-rx"; + reg = <0x89000 0x1000>; + fsl,fman-10g-port; + }; + + fman0_tx_0x29: port@a9000 { + cell-index = <0x29>; + compatible = "fsl,fman-v3-port-tx"; + reg = <0xa9000 0x1000>; + fsl,fman-10g-port; + }; + + ethernet@e2000 { + cell-index = <1>; + compatible = "fsl,fman-memac"; + reg = <0xe2000 0x1000>; + fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; + ptp-timer = <&ptp_timer0>; + pcsphy-handle = <&pcsphy1>; + }; + + mdio@e3000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; + reg = <0xe3000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ + + pcsphy1: ethernet-phy@0 { + reg = <0x0>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi index ecbb447920bc6..27714dc2f04a5 100644 --- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi @@ -609,8 +609,8 @@ /include/ "qoriq-bman1.dtsi" /include/ "qoriq-fman3-0.dtsi" -/include/ "qoriq-fman3-0-1g-0.dtsi" -/include/ "qoriq-fman3-0-1g-1.dtsi" +/include/ "qoriq-fman3-0-10g-2.dtsi" +/include/ "qoriq-fman3-0-10g-3.dtsi" /include/ "qoriq-fman3-0-1g-2.dtsi" /include/ "qoriq-fman3-0-1g-3.dtsi" /include/ "qoriq-fman3-0-1g-4.dtsi" @@ -659,3 +659,19 @@ interrupts = <16 2 1 9>; }; }; + +&fman0_rx_0x08 { + /delete-property/ fsl,fman-10g-port; +}; + +&fman0_tx_0x28 { + /delete-property/ fsl,fman-10g-port; +}; + +&fman0_rx_0x09 { + /delete-property/ fsl,fman-10g-port; +}; + +&fman0_tx_0x29 { + /delete-property/ fsl,fman-10g-port; +}; diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h index 4f897993b7107..699a88584ae16 100644 --- a/arch/powerpc/include/asm/imc-pmu.h +++ b/arch/powerpc/include/asm/imc-pmu.h @@ -137,7 +137,7 @@ struct imc_pmu { * are inited. */ struct imc_pmu_ref { - struct mutex lock; + spinlock_t lock; unsigned int id; int refc; }; diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 6de3517bea94e..ff31d2fa2140f 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -96,6 +96,7 @@ void arch_setup_new_exec(void); #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */ #define TIF_SYSCALL_EMU 4 /* syscall emulation active */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ @@ -121,6 +122,7 @@ void arch_setup_new_exec(void); #define _TIF_SYSCALL_TRACE (1<list); parent = of_get_parent(dn); pdn->parent = parent ? PCI_DN(parent) : NULL; + of_node_put(parent); if (pdn->parent) list_add_tail(&pdn->list, &pdn->parent->child_list); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index c43cc26bde5db..cf375d67eacbd 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1684,7 +1684,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Copy registers */ sp -= sizeof(struct pt_regs); childregs = (struct pt_regs *) sp; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index bf962051af0a0..014229c40435a 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -715,6 +715,7 @@ void __noreturn rtas_halt(void) /* Must be in the RMO region, so we place it here */ static char rtas_os_term_buf[2048]; +static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE; void rtas_os_term(char *str) { @@ -726,16 +727,20 @@ void rtas_os_term(char *str) * this property may terminate the partition which we want to avoid * since it interferes with panic_timeout. */ - if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || - RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term")) + if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE) return; snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); + /* + * Keep calling as long as RTAS returns a "try again" status, + * but don't use rtas_busy_delay(), which potentially + * schedules. + */ do { - status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL, + status = rtas_call(ibm_os_term_token, 1, 1, NULL, __pa(rtas_os_term_buf)); - } while (rtas_busy_delay(status)); + } while (rtas_busy_delay_time(status)); if (status != 0) printk(KERN_EMERG "ibm,os-term call failed %d\n", status); @@ -1267,6 +1272,13 @@ void __init rtas_initialize(void) no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry); rtas.entry = no_entry ? rtas.base : entry; + /* + * Discover these now to avoid device tree lookups in the + * panic path. + */ + if (of_property_read_bool(rtas.dev, "ibm,extended-os-term")) + ibm_os_term_token = rtas_token("ibm,os-term"); + /* If RTAS was found, allocate the RMO buffer for it and look for * the stop-self token if any */ diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index d2c356f370775..a8bb0aca1d021 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -318,7 +318,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) if (thread_info_flags & _TIF_PATCH_PENDING) klp_update_patch_state(current); - if (thread_info_flags & _TIF_SIGPENDING) { + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { BUG_ON(regs != current->thread.regs); do_signal(current); } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 069d451240fa4..5e5a2448ae79a 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -245,7 +245,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, if (panic_on_oops) panic("Fatal exception"); - do_exit(signr); + make_task_dead(signr); } NOKPROBE_SYMBOL(oops_end); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 38b7a3491aac0..1d25932389951 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3399,8 +3399,22 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) kvmppc_set_host_core(pcpu); + context_tracking_guest_exit(); + if (!vtime_accounting_enabled_this_cpu()) { + local_irq_enable(); + /* + * Service IRQs here before vtime_account_guest_exit() so any + * ticks that occurred while running the guest are accounted to + * the guest. If vtime accounting is enabled, accounting uses + * TB rather than ticks, so it can be done without enabling + * interrupts here, which has the problem that it accounts + * interrupt processing overhead to the host. + */ + local_irq_disable(); + } + vtime_account_guest_exit(); + local_irq_enable(); - guest_exit(); /* Let secondaries go back to the offline loop */ for (i = 0; i < controlled_threads; ++i) { @@ -4235,8 +4249,22 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, kvmppc_set_host_core(pcpu); + context_tracking_guest_exit(); + if (!vtime_accounting_enabled_this_cpu()) { + local_irq_enable(); + /* + * Service IRQs here before vtime_account_guest_exit() so any + * ticks that occurred while running the guest are accounted to + * the guest. If vtime accounting is enabled, accounting uses + * TB rather than ticks, so it can be done without enabling + * interrupts here, which has the problem that it accounts + * interrupt processing overhead to the host. + */ + local_irq_disable(); + } + vtime_account_guest_exit(); + local_irq_enable(); - guest_exit(); cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index b1abcb8164397..75381beb7514a 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1016,7 +1016,21 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) } trace_kvm_exit(exit_nr, vcpu); - guest_exit_irqoff(); + + context_tracking_guest_exit(); + if (!vtime_accounting_enabled_this_cpu()) { + local_irq_enable(); + /* + * Service IRQs here before vtime_account_guest_exit() so any + * ticks that occurred while running the guest are accounted to + * the guest. If vtime accounting is enabled, accounting uses + * TB rather than ticks, so it can be done without enabling + * interrupts here, which has the problem that it accounts + * interrupt processing overhead to the host. + */ + local_irq_disable(); + } + vtime_account_guest_exit(); local_irq_enable(); diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c index 0a05e51964c10..90111c9e7521f 100644 --- a/arch/powerpc/math-emu/math_efp.c +++ b/arch/powerpc/math-emu/math_efp.c @@ -17,6 +17,7 @@ #include #include +#include #include #include diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 295959487b76d..ae4ba6a6745d4 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -997,15 +997,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre pmd = *pmdp; pmd_clear(pmdp); - /* - * pmdp collapse_flush need to ensure that there are no parallel gup - * walk after this call. This is needed so that we can have stable - * page ref count when collapsing a page. We don't allow a collapse page - * if we have gup taken on the page. We can ensure that by sending IPI - * because gup walk happens with IRQ disabled. - */ - serialize_against_pte_lookup(vma->vm_mm); - radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); return pmd; diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 6c028ee513c0d..99f3c4fc21cb0 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -61,6 +61,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re next_sp = fp[0]; if (next_sp == sp + STACK_INT_FRAME_SIZE && + validate_sp(sp, current, STACK_INT_FRAME_SIZE) && fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { /* * This looks like an interrupt frame for an diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h index 8965b4463d433..5e86371a20c78 100644 --- a/arch/powerpc/perf/hv-gpci-requests.h +++ b/arch/powerpc/perf/hv-gpci-requests.h @@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id) ) #include I(REQUEST_END) +#ifdef ENABLE_EVENTS_COUNTERINFO_V6 /* * Not available for counter_info_version >= 0x8, use * run_instruction_cycles_by_partition(0x100) instead. @@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id) __count(0x10, 8, cycles) ) #include I(REQUEST_END) +#endif #define REQUEST_NAME system_performance_capabilities #define REQUEST_NUM 0x40 @@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged) ) #include I(REQUEST_END) +#ifdef ENABLE_EVENTS_COUNTERINFO_V6 #define REQUEST_NAME processor_bus_utilization_abc_links #define REQUEST_NUM 0x50 #define REQUEST_IDX_KIND "hw_chip_id=?" @@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx) __count(0x28, 8, instructions_completed) ) #include I(REQUEST_END) +#endif /* Processor_core_power_mode (0x95) skipped, no counters */ /* Affinity_domain_information_by_virtual_processor (0xA0) skipped, diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index c756228a081fb..28b770bbc10b4 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -72,7 +72,7 @@ static struct attribute_group format_group = { static struct attribute_group event_group = { .name = "events", - .attrs = hv_gpci_event_attrs, + /* .attrs is set in init */ }; #define HV_CAPS_ATTR(_name, _format) \ @@ -330,6 +330,7 @@ static int hv_gpci_init(void) int r; unsigned long hret; struct hv_perf_caps caps; + struct hv_gpci_request_buffer *arg; hv_gpci_assert_offsets_correct(); @@ -353,6 +354,36 @@ static int hv_gpci_init(void) /* sampling not supported */ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + arg = (void *)get_cpu_var(hv_gpci_reqb); + memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); + + /* + * hcall H_GET_PERF_COUNTER_INFO populates the output + * counter_info_version value based on the system hypervisor. + * Pass the counter request 0x10 corresponds to request type + * 'Dispatch_timebase_by_processor', to get the supported + * counter_info_version. + */ + arg->params.counter_request = cpu_to_be32(0x10); + + r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, + virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); + if (r) { + pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r); + arg->params.counter_info_version_out = 0x8; + } + + /* + * Use counter_info_version_out value to assign + * required hv-gpci event list. + */ + if (arg->params.counter_info_version_out >= 0x8) + event_group.attrs = hv_gpci_event_attrs; + else + event_group.attrs = hv_gpci_event_attrs_v6; + + put_cpu_var(hv_gpci_reqb); + r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); if (r) return r; diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h index 4d108262bed79..c72020912dea5 100644 --- a/arch/powerpc/perf/hv-gpci.h +++ b/arch/powerpc/perf/hv-gpci.h @@ -26,6 +26,7 @@ enum { #define REQUEST_FILE "../hv-gpci-requests.h" #define NAME_LOWER hv_gpci #define NAME_UPPER HV_GPCI +#define ENABLE_EVENTS_COUNTERINFO_V6 #include "req-gen/perf.h" #undef REQUEST_FILE #undef NAME_LOWER diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index e8074d7f2401b..b773c411aa5c2 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -13,6 +13,7 @@ #include #include #include +#include /* Nest IMC data structures and variables */ @@ -49,7 +50,7 @@ static int trace_imc_mem_size; * core and trace-imc */ static struct imc_pmu_ref imc_global_refc = { - .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), + .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock), .id = 0, .refc = 0, }; @@ -393,7 +394,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu) get_hard_smp_processor_id(cpu)); /* * If this is the last cpu in this chip then, skip the reference - * count mutex lock and make the reference count on this chip zero. + * count lock and make the reference count on this chip zero. */ ref = get_nest_pmu_ref(cpu); if (!ref) @@ -455,15 +456,15 @@ static void nest_imc_counters_release(struct perf_event *event) /* * See if we need to disable the nest PMU. * If no events are currently in use, then we have to take a - * mutex to ensure that we don't race with another task doing + * lock to ensure that we don't race with another task doing * enable or disable the nest counters. */ ref = get_nest_pmu_ref(event->cpu); if (!ref) return; - /* Take the mutex lock for this node and then decrement the reference count */ - mutex_lock(&ref->lock); + /* Take the lock for this node and then decrement the reference count */ + spin_lock(&ref->lock); if (ref->refc == 0) { /* * The scenario where this is true is, when perf session is @@ -475,7 +476,7 @@ static void nest_imc_counters_release(struct perf_event *event) * an OPAL call to disable the engine in that node. * */ - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); return; } ref->refc--; @@ -483,7 +484,7 @@ static void nest_imc_counters_release(struct perf_event *event) rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, get_hard_smp_processor_id(event->cpu)); if (rc) { - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); return; } @@ -491,7 +492,7 @@ static void nest_imc_counters_release(struct perf_event *event) WARN(1, "nest-imc: Invalid event reference count\n"); ref->refc = 0; } - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); } static int nest_imc_event_init(struct perf_event *event) @@ -550,26 +551,25 @@ static int nest_imc_event_init(struct perf_event *event) /* * Get the imc_pmu_ref struct for this node. - * Take the mutex lock and then increment the count of nest pmu events - * inited. + * Take the lock and then increment the count of nest pmu events inited. */ ref = get_nest_pmu_ref(event->cpu); if (!ref) return -EINVAL; - mutex_lock(&ref->lock); + spin_lock(&ref->lock); if (ref->refc == 0) { rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, get_hard_smp_processor_id(event->cpu)); if (rc) { - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); pr_err("nest-imc: Unable to start the counters for node %d\n", node_id); return rc; } } ++ref->refc; - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); event->destroy = nest_imc_counters_release; return 0; @@ -605,9 +605,8 @@ static int core_imc_mem_init(int cpu, int size) return -ENOMEM; mem_info->vbase = page_address(page); - /* Init the mutex */ core_imc_refc[core_id].id = core_id; - mutex_init(&core_imc_refc[core_id].lock); + spin_lock_init(&core_imc_refc[core_id].lock); rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, __pa((void *)mem_info->vbase), @@ -696,9 +695,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); } else { /* - * If this is the last cpu in this core then, skip taking refernce - * count mutex lock for this core and directly zero "refc" for - * this core. + * If this is the last cpu in this core then skip taking reference + * count lock for this core and directly zero "refc" for this core. */ opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, get_hard_smp_processor_id(cpu)); @@ -713,11 +711,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) * last cpu in this core and core-imc event running * in this cpu. */ - mutex_lock(&imc_global_refc.lock); + spin_lock(&imc_global_refc.lock); if (imc_global_refc.id == IMC_DOMAIN_CORE) imc_global_refc.refc--; - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); } return 0; } @@ -732,7 +730,7 @@ static int core_imc_pmu_cpumask_init(void) static void reset_global_refc(struct perf_event *event) { - mutex_lock(&imc_global_refc.lock); + spin_lock(&imc_global_refc.lock); imc_global_refc.refc--; /* @@ -744,7 +742,7 @@ static void reset_global_refc(struct perf_event *event) imc_global_refc.refc = 0; imc_global_refc.id = 0; } - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); } static void core_imc_counters_release(struct perf_event *event) @@ -757,17 +755,17 @@ static void core_imc_counters_release(struct perf_event *event) /* * See if we need to disable the IMC PMU. * If no events are currently in use, then we have to take a - * mutex to ensure that we don't race with another task doing + * lock to ensure that we don't race with another task doing * enable or disable the core counters. */ core_id = event->cpu / threads_per_core; - /* Take the mutex lock and decrement the refernce count for this core */ + /* Take the lock and decrement the refernce count for this core */ ref = &core_imc_refc[core_id]; if (!ref) return; - mutex_lock(&ref->lock); + spin_lock(&ref->lock); if (ref->refc == 0) { /* * The scenario where this is true is, when perf session is @@ -779,7 +777,7 @@ static void core_imc_counters_release(struct perf_event *event) * an OPAL call to disable the engine in that core. * */ - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); return; } ref->refc--; @@ -787,7 +785,7 @@ static void core_imc_counters_release(struct perf_event *event) rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, get_hard_smp_processor_id(event->cpu)); if (rc) { - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); pr_err("IMC: Unable to stop the counters for core %d\n", core_id); return; } @@ -795,7 +793,7 @@ static void core_imc_counters_release(struct perf_event *event) WARN(1, "core-imc: Invalid event reference count\n"); ref->refc = 0; } - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); reset_global_refc(event); } @@ -833,7 +831,6 @@ static int core_imc_event_init(struct perf_event *event) if ((!pcmi->vbase)) return -ENODEV; - /* Get the core_imc mutex for this core */ ref = &core_imc_refc[core_id]; if (!ref) return -EINVAL; @@ -841,22 +838,22 @@ static int core_imc_event_init(struct perf_event *event) /* * Core pmu units are enabled only when it is used. * See if this is triggered for the first time. - * If yes, take the mutex lock and enable the core counters. + * If yes, take the lock and enable the core counters. * If not, just increment the count in core_imc_refc struct. */ - mutex_lock(&ref->lock); + spin_lock(&ref->lock); if (ref->refc == 0) { rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, get_hard_smp_processor_id(event->cpu)); if (rc) { - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); pr_err("core-imc: Unable to start the counters for core %d\n", core_id); return rc; } } ++ref->refc; - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); /* * Since the system can run either in accumulation or trace-mode @@ -867,7 +864,7 @@ static int core_imc_event_init(struct perf_event *event) * to know whether any other trace/thread imc * events are running. */ - mutex_lock(&imc_global_refc.lock); + spin_lock(&imc_global_refc.lock); if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { /* * No other trace/thread imc events are running in @@ -876,10 +873,10 @@ static int core_imc_event_init(struct perf_event *event) imc_global_refc.id = IMC_DOMAIN_CORE; imc_global_refc.refc++; } else { - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); return -EBUSY; } - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); event->destroy = core_imc_counters_release; @@ -951,10 +948,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu) mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); /* Reduce the refc if thread-imc event running on this cpu */ - mutex_lock(&imc_global_refc.lock); + spin_lock(&imc_global_refc.lock); if (imc_global_refc.id == IMC_DOMAIN_THREAD) imc_global_refc.refc--; - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); return 0; } @@ -994,7 +991,7 @@ static int thread_imc_event_init(struct perf_event *event) if (!target) return -EINVAL; - mutex_lock(&imc_global_refc.lock); + spin_lock(&imc_global_refc.lock); /* * Check if any other trace/core imc events are running in the * system, if not set the global id to thread-imc. @@ -1003,10 +1000,10 @@ static int thread_imc_event_init(struct perf_event *event) imc_global_refc.id = IMC_DOMAIN_THREAD; imc_global_refc.refc++; } else { - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); return -EBUSY; } - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); event->pmu->task_ctx_nr = perf_sw_context; event->destroy = reset_global_refc; @@ -1128,25 +1125,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags) /* * imc pmus are enabled only when it is used. * See if this is triggered for the first time. - * If yes, take the mutex lock and enable the counters. + * If yes, take the lock and enable the counters. * If not, just increment the count in ref count struct. */ ref = &core_imc_refc[core_id]; if (!ref) return -EINVAL; - mutex_lock(&ref->lock); + spin_lock(&ref->lock); if (ref->refc == 0) { if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, get_hard_smp_processor_id(smp_processor_id()))) { - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); pr_err("thread-imc: Unable to start the counter\ for core %d\n", core_id); return -EINVAL; } } ++ref->refc; - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); return 0; } @@ -1163,12 +1160,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags) return; } - mutex_lock(&ref->lock); + spin_lock(&ref->lock); ref->refc--; if (ref->refc == 0) { if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, get_hard_smp_processor_id(smp_processor_id()))) { - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); pr_err("thread-imc: Unable to stop the counters\ for core %d\n", core_id); return; @@ -1176,7 +1173,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags) } else if (ref->refc < 0) { ref->refc = 0; } - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); @@ -1217,9 +1214,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size) } } - /* Init the mutex, if not already */ trace_imc_refc[core_id].id = core_id; - mutex_init(&trace_imc_refc[core_id].lock); + spin_lock_init(&trace_imc_refc[core_id].lock); mtspr(SPRN_LDBAR, 0); return 0; @@ -1239,10 +1235,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu) * Reduce the refc if any trace-imc event running * on this cpu. */ - mutex_lock(&imc_global_refc.lock); + spin_lock(&imc_global_refc.lock); if (imc_global_refc.id == IMC_DOMAIN_TRACE) imc_global_refc.refc--; - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); return 0; } @@ -1364,17 +1360,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags) } mtspr(SPRN_LDBAR, ldbar_value); - mutex_lock(&ref->lock); + spin_lock(&ref->lock); if (ref->refc == 0) { if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE, get_hard_smp_processor_id(smp_processor_id()))) { - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); return -EINVAL; } } ++ref->refc; - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); return 0; } @@ -1407,19 +1403,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags) return; } - mutex_lock(&ref->lock); + spin_lock(&ref->lock); ref->refc--; if (ref->refc == 0) { if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE, get_hard_smp_processor_id(smp_processor_id()))) { - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); return; } } else if (ref->refc < 0) { ref->refc = 0; } - mutex_unlock(&ref->lock); + spin_unlock(&ref->lock); trace_imc_event_stop(event, flags); } @@ -1441,7 +1437,7 @@ static int trace_imc_event_init(struct perf_event *event) * no other thread is running any core/thread imc * events */ - mutex_lock(&imc_global_refc.lock); + spin_lock(&imc_global_refc.lock); if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { /* * No core/thread imc events are running in the @@ -1450,10 +1446,10 @@ static int trace_imc_event_init(struct perf_event *event) imc_global_refc.id = IMC_DOMAIN_TRACE; imc_global_refc.refc++; } else { - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); return -EBUSY; } - mutex_unlock(&imc_global_refc.lock); + spin_unlock(&imc_global_refc.lock); event->hw.idx = -1; @@ -1525,10 +1521,10 @@ static int init_nest_pmu_ref(void) i = 0; for_each_node(nid) { /* - * Mutex lock to avoid races while tracking the number of + * Take the lock to avoid races while tracking the number of * sessions using the chip's nest pmu units. */ - mutex_init(&nest_imc_refc[i].lock); + spin_lock_init(&nest_imc_refc[i].lock); /* * Loop to init the "id" with the node_id. Variable "i" initialized to diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h index fa9bc804e67af..6b2a59fefffa7 100644 --- a/arch/powerpc/perf/req-gen/perf.h +++ b/arch/powerpc/perf/req-gen/perf.h @@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \ #define REQUEST_(r_name, r_value, r_idx_1, r_fields) \ r_fields +/* Generate event list for platforms with counter_info_version 0x6 or below */ +static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = { +#include REQUEST_FILE + NULL +}; + +/* + * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci + * events were deprecated for platform firmware that supports + * counter_info_version 0x8 or above. + * Those deprecated events are still part of platform firmware that + * support counter_info_version 0x6 and below. As per the getPerfCountInfo + * v1.018 documentation there is no counter_info_version 0x7. + * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of + * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms + * that supports counter_info_version 0x8 or above. + */ +#undef ENABLE_EVENTS_COUNTERINFO_V6 + +/* Generate event list for platforms with counter_info_version 0x8 or above*/ static __maybe_unused struct attribute *hv_gpci_event_attrs[] = { #include REQUEST_FILE NULL diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 05e19470d523c..22e264bd3ed21 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -530,6 +530,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op) err_bcom_rx_irq: bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); err_bcom_rx: + free_irq(lpbfifo.irq, &lpbfifo); err_irq: iounmap(lpbfifo.regs); lpbfifo.regs = NULL; diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index 622c625d5ce4b..1114b6a11b3f8 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -106,7 +106,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, goto next; unreg: - platform_device_del(pdev); + platform_device_put(pdev); err: pr_err("%pOF: registration failed\n", np); next: diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index c61c3b62c8c62..1d05c168c8fb5 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -892,6 +892,7 @@ static void opal_export_attrs(void) kobj = kobject_create_and_add("exports", opal_kobj); if (!kobj) { pr_warn("kobject_create_and_add() of exports failed\n"); + of_node_put(np); return; } diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 7ed38ebd0c7b6..4601ad10ca7b4 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -846,18 +846,8 @@ static int __init eeh_pseries_init(void) return -EINVAL; } - /* Initialize error log lock and size */ - spin_lock_init(&slot_errbuf_lock); - eeh_error_buf_size = rtas_token("rtas-error-log-max"); - if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { - pr_info("%s: unknown EEH error log size\n", - __func__); - eeh_error_buf_size = 1024; - } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { - pr_info("%s: EEH error log size %d exceeds the maximal %d\n", - __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); - eeh_error_buf_size = RTAS_ERROR_LOG_MAX; - } + /* Initialize error log size */ + eeh_error_buf_size = rtas_get_error_log_max(); /* Set EEH probe mode */ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 2f73cb5bf12d5..f386a7bc38114 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -59,18 +59,31 @@ static int mobility_rtas_call(int token, char *buf, s32 scope) return rc; } -static int delete_dt_node(__be32 phandle) +static int delete_dt_node(struct device_node *dn) { - struct device_node *dn; + struct device_node *pdn; + bool is_platfac; - dn = of_find_node_by_phandle(be32_to_cpu(phandle)); - if (!dn) - return -ENOENT; + pdn = of_get_parent(dn); + is_platfac = of_node_is_type(dn, "ibm,platform-facilities") || + of_node_is_type(pdn, "ibm,platform-facilities"); + of_node_put(pdn); - pr_debug("removing node %pOFfp\n", dn); + /* + * The drivers that bind to nodes in the platform-facilities + * hierarchy don't support node removal, and the removal directive + * from firmware is always followed by an add of an equivalent + * node. The capability (e.g. RNG, encryption, compression) + * represented by the node is never interrupted by the migration. + * So ignore changes to this part of the tree. + */ + if (is_platfac) { + pr_notice("ignoring remove operation for %pOFfp\n", dn); + return 0; + } + pr_debug("removing node %pOFfp\n", dn); dlpar_detach_node(dn); - of_node_put(dn); return 0; } @@ -135,10 +148,9 @@ static int update_dt_property(struct device_node *dn, struct property **prop, return 0; } -static int update_dt_node(__be32 phandle, s32 scope) +static int update_dt_node(struct device_node *dn, s32 scope) { struct update_props_workarea *upwa; - struct device_node *dn; struct property *prop = NULL; int i, rc, rtas_rc; char *prop_data; @@ -155,14 +167,8 @@ static int update_dt_node(__be32 phandle, s32 scope) if (!rtas_buf) return -ENOMEM; - dn = of_find_node_by_phandle(be32_to_cpu(phandle)); - if (!dn) { - kfree(rtas_buf); - return -ENOENT; - } - upwa = (struct update_props_workarea *)&rtas_buf[0]; - upwa->phandle = phandle; + upwa->phandle = cpu_to_be32(dn->phandle); do { rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf, @@ -221,25 +227,30 @@ static int update_dt_node(__be32 phandle, s32 scope) cond_resched(); } while (rtas_rc == 1); - of_node_put(dn); kfree(rtas_buf); return 0; } -static int add_dt_node(__be32 parent_phandle, __be32 drc_index) +static int add_dt_node(struct device_node *parent_dn, __be32 drc_index) { struct device_node *dn; - struct device_node *parent_dn; int rc; - parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle)); - if (!parent_dn) - return -ENOENT; - dn = dlpar_configure_connector(drc_index, parent_dn); - if (!dn) { - of_node_put(parent_dn); + if (!dn) return -ENOENT; + + /* + * Since delete_dt_node() ignores this node type, this is the + * necessary counterpart. We also know that a platform-facilities + * node returned from dlpar_configure_connector() has children + * attached, and dlpar_attach_node() only adds the parent, leaking + * the children. So ignore these on the add side for now. + */ + if (of_node_is_type(dn, "ibm,platform-facilities")) { + pr_notice("ignoring add operation for %pOF\n", dn); + dlpar_free_cc_nodes(dn); + return 0; } rc = dlpar_attach_node(dn, parent_dn); @@ -248,7 +259,6 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) pr_debug("added node %pOFfp\n", dn); - of_node_put(parent_dn); return rc; } @@ -281,22 +291,31 @@ int pseries_devicetree_update(s32 scope) data++; for (i = 0; i < node_count; i++) { + struct device_node *np; __be32 phandle = *data++; __be32 drc_index; + np = of_find_node_by_phandle(be32_to_cpu(phandle)); + if (!np) { + pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n", + be32_to_cpu(phandle), action); + continue; + } + switch (action) { case DELETE_DT_NODE: - delete_dt_node(phandle); + delete_dt_node(np); break; case UPDATE_DT_NODE: - update_dt_node(phandle, scope); + update_dt_node(np, scope); break; case ADD_DT_NODE: drc_index = *data++; - add_dt_node(phandle, drc_index); + add_dt_node(np, drc_index); break; } + of_node_put(np); cond_resched(); } } diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 808e7118abfc5..d276c5e964458 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -211,8 +211,10 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) dev_err(&pdev->dev, "node %pOF has an invalid fsl,msi phandle %u\n", hose->dn, np->phandle); + of_node_put(np); return -EINVAL; } + of_node_put(np); } for_each_pci_msi_entry(entry, pdev) { diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 38e8b98961744..53cf14349d5e9 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -425,6 +425,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); if (!data->trig_mmio) { + iounmap(data->eoi_mmio); pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); return -ENOMEM; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 5559edf36756c..2872b66d9fec7 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1383,7 +1383,6 @@ static long check_bp_loc(unsigned long addr) return 1; } -#ifndef CONFIG_PPC_8xx static int find_free_data_bpt(void) { int i; @@ -1395,7 +1394,6 @@ static int find_free_data_bpt(void) printf("Couldn't find free breakpoint register\n"); return -1; } -#endif static void print_data_bpts(void) { @@ -1435,10 +1433,9 @@ bpt_cmds(void) cmd = inchar(); switch (cmd) { -#ifndef CONFIG_PPC_8xx - static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; - int mode; - case 'd': /* bd - hardware data breakpoint */ + case 'd': { /* bd - hardware data breakpoint */ + static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; + int mode; if (xmon_is_ro) { printf(xmon_ro_msg); break; @@ -1471,6 +1468,7 @@ bpt_cmds(void) force_enable_xmon(); break; + } case 'i': /* bi - hardware instr breakpoint */ if (xmon_is_ro) { @@ -1497,7 +1495,6 @@ bpt_cmds(void) force_enable_xmon(); } break; -#endif case 'c': if (!scanhex(&a)) { diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 1b894c3275781..557c4a8c4087d 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -35,7 +35,7 @@ config RISCV select CLINT_TIMER if !MMU select COMMON_CLK select EDAC_SUPPORT - select GENERIC_ARCH_TOPOLOGY if SMP + select GENERIC_ARCH_TOPOLOGY select GENERIC_ATOMIC64 if !64BIT select GENERIC_CLOCKEVENTS select GENERIC_EARLY_IOREMAP diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 3d3016092b31c..9446282b52bab 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -37,6 +37,7 @@ else endif ifeq ($(CONFIG_LD_IS_LLD),y) +ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0) KBUILD_CFLAGS += -mno-relax KBUILD_AFLAGS += -mno-relax ifneq ($(LLVM_IAS),1) @@ -44,6 +45,7 @@ ifneq ($(LLVM_IAS),1) KBUILD_AFLAGS += -Wa,-mno-relax endif endif +endif # ISA string setting riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima @@ -72,6 +74,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y) KBUILD_CFLAGS += -fno-omit-frame-pointer endif +# Avoid generating .eh_frame sections. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables + KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts index 60846e88ae4b1..dddabfbbc7a90 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts @@ -3,6 +3,8 @@ #include "fu540-c000.dtsi" #include +#include +#include /* Clock frequency (in Hz) of the PCB crystal for rtcclk */ #define RTCCLK_FREQ 1000000 @@ -46,6 +48,42 @@ compatible = "gpio-restart"; gpios = <&gpio 10 GPIO_ACTIVE_LOW>; }; + + led-controller { + compatible = "pwm-leds"; + + led-d1 { + pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = ; + max-brightness = <255>; + label = "d1"; + }; + + led-d2 { + pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = ; + max-brightness = <255>; + label = "d2"; + }; + + led-d3 { + pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = ; + max-brightness = <255>; + label = "d3"; + }; + + led-d4 { + pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = ; + max-brightness = <255>; + label = "d4"; + }; + }; }; &uart0 { diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index a5c2ca1d1cd8b..ec19d6afc8965 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -5,4 +5,10 @@ #include #include +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} +#define arch_clear_hugepage_flags arch_clear_hugepage_flags + #endif /* _ASM_RISCV_HUGETLB_H */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index c025a746a1486..391dd869db64b 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -114,9 +114,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) __io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr)) __io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr)) __io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) -#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) -#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) -#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) +#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count) +#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count) +#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count) __io_writes_outs(writes, u8, b, __io_bw(), __io_aw()) __io_writes_outs(writes, u16, w, __io_bw(), __io_aw()) @@ -128,22 +128,22 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw()) __io_writes_outs(outs, u8, b, __io_pbw(), __io_paw()) __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw()) __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) -#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count) -#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count) -#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) +#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count) +#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count) +#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count) #ifdef CONFIG_64BIT __io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) #define readsq(addr, buffer, count) __readsq(addr, buffer, count) __io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) -#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count) +#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count) __io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) #define writesq(addr, buffer, count) __writesq(addr, buffer, count) __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) -#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count) +#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count) #endif #include diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 73e8b5e5bb654..b16304fdf4489 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -470,6 +470,7 @@ extern void *dtb_early_va; extern uintptr_t dtb_early_pa; void setup_bootmem(void); void paging_init(void); +void misc_mem_init(void); #define FIRST_USER_ADDRESS 0 diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index d79ae9d98999f..c55e0a1f07a0f 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -80,6 +80,7 @@ struct thread_info { #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ #define TIF_SECCOMP 8 /* syscall secure computing */ +#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -88,9 +89,11 @@ struct thread_info { #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_WORK_MASK \ - (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED) + (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_WORK \ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index f944062c9d990..66af6abfe8af4 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -216,7 +216,7 @@ do { \ might_fault(); \ access_ok(__p, sizeof(*__p)) ? \ __get_user((x), __p) : \ - ((x) = 0, -EFAULT); \ + ((x) = (__force __typeof__(x))0, -EFAULT); \ }) #define __put_user_asm(insn, x, ptr, err) \ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index dd5f985b1f40e..7868050ff426d 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -111,8 +111,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, { struct pt_regs *childregs = task_pt_regs(p); + memset(&p->thread.s, 0, sizeof(p->thread.s)); + /* p->thread holds context to be restored by __switch_to() */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* Kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); childregs->gp = gp_in_global; diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 117f3212a8e4b..8e78a8ab6a345 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -54,10 +54,17 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); static void __init parse_dtb(void) { /* Early scan of device tree from init memory */ - if (early_init_dt_scan(dtb_early_va)) - return; + if (early_init_dt_scan(dtb_early_va)) { + const char *name = of_flat_dt_get_machine_name(); + + if (name) { + pr_info("Machine model: %s\n", name); + dump_stack_set_arch_desc("%s (DT)", name); + } + } else { + pr_err("No DTB passed to the kernel\n"); + } - pr_err("No DTB passed to the kernel\n"); #ifdef CONFIG_CMDLINE_FORCE strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); pr_info("Forcing kernel command line to: %s\n", boot_command_line); @@ -89,6 +96,8 @@ void __init setup_arch(char **cmdline_p) else pr_err("No DTB found in kernel mappings\n"); #endif + early_init_fdt_scan_reserved_mem(); + misc_mem_init(); #ifdef CONFIG_SWIOTLB swiotlb_init(1); diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index bc6841867b512..50a8225c58bca 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -121,6 +121,8 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_altstack(&frame->uc.uc_stack)) goto badframe; + regs->cause = -1UL; + return regs->a0; badframe: @@ -310,7 +312,7 @@ asmlinkage __visible void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { /* Handle pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 0b04e0eae3ab5..0e0aed380e281 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -46,6 +46,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) int cpuid; int ret; + store_cpu_topology(smp_processor_id()); + /* This covers non-smp usecase mandated by "nosmp" option */ if (max_cpus == 0) return; @@ -152,8 +154,8 @@ asmlinkage __visible void smp_callin(void) mmgrab(mm); current->active_mm = mm; + store_cpu_topology(curr_cpuid); notify_cpu_starting(curr_cpuid); - update_siblings_masks(curr_cpuid); set_cpu_online(curr_cpuid, 1); /* diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 595342910c3f6..1e53fbe5eb783 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -57,9 +57,15 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, /* Unwind stack frame */ frame = (struct stackframe *)fp - 1; sp = fp; - fp = frame->fp; - pc = ftrace_graph_ret_addr(current, NULL, frame->ra, - (unsigned long *)(fp - 8)); + if (regs && (regs->epc == pc) && (frame->fp & 0x7)) { + fp = frame->ra; + pc = regs->ra; + } else { + fp = frame->fp; + pc = ftrace_graph_ret_addr(current, NULL, frame->ra, + &frame->ra); + } + } } diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 8a7880b9c433e..bb402685057a2 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -18,9 +18,6 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; - if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ))) - return -EINVAL; - return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index c1a13011fb8e5..23fe03ca7ec7b 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -57,7 +57,7 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 24d936c147cdf..f4ac7ff56bcea 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -17,6 +17,7 @@ vdso-syms += flush_icache obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o ccflags-y := -fno-stack-protector +ccflags-y += -DDISABLE_BRANCH_PROFILING ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) @@ -28,9 +29,12 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) obj-y += vdso.o vdso-syms.o CPPFLAGS_vdso.lds += -P -C -U$(ARCH) +ifneq ($(filter vgettimeofday, $(vdso-syms)),) +CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY +endif # Disable -pg to prevent insert call site -CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) # Disable profiling and instrumentation for VDSO code GCOV_PROFILE := n diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index e6f558bca71bb..b3e58402c3426 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -64,9 +64,11 @@ VERSION LINUX_4.15 { global: __vdso_rt_sigreturn; +#ifdef HAS_VGETTIMEOFDAY __vdso_gettimeofday; __vdso_clock_gettime; __vdso_clock_getres; +#endif __vdso_getcpu; __vdso_flush_icache; local: *; diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 89f81067e09ed..2ae1201cff886 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -85,7 +85,9 @@ void flush_icache_pte(pte_t pte) { struct page *page = pte_page(pte); - if (!test_and_set_bit(PG_dcache_clean, &page->flags)) + if (!test_bit(PG_dcache_clean, &page->flags)) { flush_icache_all(); + set_bit(PG_dcache_clean, &page->flags); + } } #endif /* CONFIG_MMU */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 3c8b9e433c673..54b12943cc7b0 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -34,7 +34,7 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); die(regs, "Oops"); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) @@ -167,7 +167,8 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma) } break; case EXC_LOAD_PAGE_FAULT: - if (!(vma->vm_flags & VM_READ)) { + /* Write implies read */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) { return true; } break; diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index e8921e78a2926..6c2f38aac5443 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -41,13 +42,14 @@ struct pt_alloc_ops { #endif }; +static phys_addr_t dma32_phys_limit __ro_after_init; + static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, - (unsigned long) PFN_PHYS(max_low_pfn))); + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; @@ -193,6 +195,7 @@ void __init setup_bootmem(void) max_pfn = PFN_DOWN(dram_end); max_low_pfn = max_pfn; + dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn); #ifdef CONFIG_BLK_DEV_INITRD @@ -205,7 +208,7 @@ void __init setup_bootmem(void) */ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); - early_init_fdt_scan_reserved_mem(); + dma_contiguous_reserve(dma32_phys_limit); memblock_allow_resize(); memblock_dump_all(); } @@ -665,8 +668,12 @@ static void __init resource_init(void) void __init paging_init(void) { setup_vm_final(); - sparse_init(); setup_zero_page(); +} + +void __init misc_mem_init(void) +{ + sparse_init(); zone_sizes_init(); resource_init(); } diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c index 3061b11c4d27f..8eaa1712a1c8d 100644 --- a/arch/s390/boot/compressed/decompressor.c +++ b/arch/s390/boot/compressed/decompressor.c @@ -79,6 +79,6 @@ void *decompress_kernel(void) void *output = (void *)decompress_offset; __decompress(_compressed_start, _compressed_end - _compressed_start, - NULL, NULL, output, 0, NULL, error); + NULL, NULL, output, vmlinux.image_size, NULL, error); return output; } diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S index 9427e2cd0c154..11bf3919610e4 100644 --- a/arch/s390/boot/compressed/vmlinux.lds.S +++ b/arch/s390/boot/compressed/vmlinux.lds.S @@ -91,8 +91,17 @@ SECTIONS _compressed_start = .; *(.vmlinux.bin.compressed) _compressed_end = .; - FILL(0xff); - . = ALIGN(4096); + } + +#define SB_TRAILER_SIZE 32 + /* Trailer needed for Secure Boot */ + . += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */ + . = ALIGN(4096) - SB_TRAILER_SIZE; + .sb.trailer : { + QUAD(0) + QUAD(0) + QUAD(0) + QUAD(0x000000207a49504c) } _end = .; diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 0d90cbeb89b43..a0914bc6c9bdd 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -128,19 +128,21 @@ struct hws_combined_entry { struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ } __packed; -struct hws_trailer_entry { - union { - struct { - unsigned int f:1; /* 0 - Block Full Indicator */ - unsigned int a:1; /* 1 - Alert request control */ - unsigned int t:1; /* 2 - Timestamp format */ - unsigned int :29; /* 3 - 31: Reserved */ - unsigned int bsdes:16; /* 32-47: size of basic SDE */ - unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ - }; - unsigned long long flags; /* 0 - 63: All indicators */ +union hws_trailer_header { + struct { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ + unsigned int :29; /* 3 - 31: Reserved */ + unsigned int bsdes:16; /* 32-47: size of basic SDE */ + unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ + unsigned long long overflow; /* 64 - Overflow Count */ }; - unsigned long long overflow; /* 64 - sample Overflow count */ + __uint128_t val; +}; + +struct hws_trailer_entry { + union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count */ unsigned char timestamp[16]; /* 16 - 31 timestamp */ unsigned long long reserved1; /* 32 -Reserved */ unsigned long long reserved2; /* */ @@ -287,14 +289,11 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi, return USEC_PER_SEC * qsi->cpu_speed / rate; } -#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL -#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL - /* Return TOD timestamp contained in an trailer entry */ static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) { /* TOD in STCKE format */ - if (te->t) + if (te->header.t) return *((unsigned long long *) &te->timestamp[1]); /* TOD in STCK format */ diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index c1b82bcc017cf..29a1badbe2f5d 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h @@ -4,8 +4,8 @@ * * Copyright IBM Corp. 1999, 2020 */ -#ifndef DEBUG_H -#define DEBUG_H +#ifndef _ASM_S390_DEBUG_H +#define _ASM_S390_DEBUG_H #include #include @@ -425,4 +425,4 @@ int debug_unregister_view(debug_info_t *id, struct debug_view *view); #define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x) #endif /* DASD_DEBUG */ -#endif /* DEBUG_H */ +#endif /* _ASM_S390_DEBUG_H */ diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 26f9144562c9e..e1d0b2aaaddd3 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -16,7 +16,8 @@ "3: jl 1b\n" \ " lhi %0,0\n" \ "4: sacf 768\n" \ - EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ + EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ "=m" (*uaddr) \ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 918f0ba4f4d20..5e26e2c4641b4 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -31,7 +31,7 @@ pcp_op_T__ *ptr__; \ preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ - prev__ = *ptr__; \ + prev__ = READ_ONCE(*ptr__); \ do { \ old__ = prev__; \ new__ = old__ op (val); \ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 13a04fcf77625..0045341ade486 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -65,6 +65,7 @@ void arch_setup_new_exec(void); #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ #define TIF_PATCH_PENDING 5 /* pending live patching update */ #define TIF_PGSTE 6 /* New mm's will use 4K page tables */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_ISOLATE_BP 8 /* Run process with isolated BP */ #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ @@ -82,6 +83,7 @@ void arch_setup_new_exec(void); #define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ #define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) #define _TIF_SIGPENDING BIT(TIF_SIGPENDING) #define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED) #define _TIF_UPROBE BIT(TIF_UPROBE) diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 76762dc67ca90..f292c3e106710 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -44,7 +44,7 @@ struct save_area { u64 fprs[16]; u32 fpc; u32 prefix; - u64 todpreg; + u32 todpreg; u64 timer; u64 todcmp; u64 vxrs_low[16]; diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 0dc4b258b98d5..763e726025b3d 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -214,5 +214,5 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception: panic_on_oops"); oops_exit(); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 963e8cb936e28..88ecbcf097a36 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -52,7 +52,8 @@ STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) + _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \ + _TIF_NOTIFY_SIGNAL) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) _CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU) @@ -481,8 +482,8 @@ ENTRY(system_call) #endif TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART jo .Lsysc_syscall_restart - TSTMSK __TI_flags(%r12),_TIF_SIGPENDING - jo .Lsysc_sigpending + TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL) + jnz .Lsysc_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME jo .Lsysc_notify_resume TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) @@ -863,8 +864,8 @@ ENTRY(io_int_handler) TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING jo .Lio_patch_pending #endif - TSTMSK __TI_flags(%r12),_TIF_SIGPENDING - jo .Lio_sigpending + TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL) + jnz .Lio_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME jo .Lio_notify_resume TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c index 53da174754d97..bf0596749ebd3 100644 --- a/arch/s390/kernel/machine_kexec_file.c +++ b/arch/s390/kernel/machine_kexec_file.c @@ -185,8 +185,6 @@ static int kexec_file_add_ipl_report(struct kimage *image, data->memsz = ALIGN(data->memsz, PAGE_SIZE); buf.mem = data->memsz; - if (image->type == KEXEC_TYPE_CRASH) - buf.mem += crashk_res.start; ptr = (void *)ipl_cert_list_addr; end = ptr + ipl_cert_list_size; @@ -223,6 +221,9 @@ static int kexec_file_add_ipl_report(struct kimage *image, data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr); *lc_ipl_parmblock_ptr = (__u32)buf.mem; + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + ret = kexec_add_buffer(&buf); out: return ret; diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 86c8d5370e7f4..0102376eca3db 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -178,7 +178,7 @@ void s390_handle_mcck(void) "malfunction (code 0x%016lx).\n", mcck.mcck_code); printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", current->comm, current->pid); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } } EXPORT_SYMBOL_GPL(s390_handle_mcck); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 19cd7b961c45c..bcd31e0b4edb3 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -163,14 +163,15 @@ static void free_sampling_buffer(struct sf_buffer *sfb) static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) { - unsigned long sdb, *trailer; + struct hws_trailer_entry *te; + unsigned long sdb; /* Allocate and initialize sample-data-block */ sdb = get_zeroed_page(gfp_flags); if (!sdb) return -ENOMEM; - trailer = trailer_entry_ptr(sdb); - *trailer = SDB_TE_ALERT_REQ_MASK; + te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb); + te->header.a = 1; /* Link SDB into the sample-data-block-table */ *sdbt = sdb; @@ -1206,7 +1207,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, "%s: Found unknown" " sampling data entry: te->f %i" " basic.def %#4x (%p)\n", __func__, - te->f, sample->def, sample); + te->header.f, sample->def, sample); /* Sample slot is not yet written or other record. * * This condition can occur if the buffer was reused @@ -1217,7 +1218,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, * that are not full. Stop processing if the first * invalid format was detected. */ - if (!te->f) + if (!te->header.f) break; } @@ -1227,6 +1228,16 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, } } +static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t new) +{ + asm volatile( + " cdsg %[old],%[new],%[ptr]\n" + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) + : "memory", "cc"); + return old; +} + /* hw_perf_event_update() - Process sampling buffer * @event: The perf event * @flush_all: Flag to also flush partially filled sample-data-blocks @@ -1243,10 +1254,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, */ static void hw_perf_event_update(struct perf_event *event, int flush_all) { + unsigned long long event_overflow, sampl_overflow, num_sdb; + union hws_trailer_header old, prev, new; struct hw_perf_event *hwc = &event->hw; struct hws_trailer_entry *te; unsigned long *sdbt; - unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; int done; /* @@ -1266,25 +1278,25 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); /* Leave loop if no more work to do (block full indicator) */ - if (!te->f) { + if (!te->header.f) { done = 1; if (!flush_all) break; } /* Check the sample overflow count */ - if (te->overflow) + if (te->header.overflow) /* Account sample overflows and, if a particular limit * is reached, extend the sampling buffer. * For details, see sfb_account_overflows(). */ - sampl_overflow += te->overflow; + sampl_overflow += te->header.overflow; /* Timestamps are valid for full sample-data-blocks only */ debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx " "overflow %llu timestamp %#llx\n", - __func__, (unsigned long)sdbt, te->overflow, - (te->f) ? trailer_timestamp(te) : 0ULL); + __func__, (unsigned long)sdbt, te->header.overflow, + (te->header.f) ? trailer_timestamp(te) : 0ULL); /* Collect all samples from a single sample-data-block and * flag if an (perf) event overflow happened. If so, the PMU @@ -1294,12 +1306,16 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) num_sdb++; /* Reset trailer (using compare-double-and-swap) */ + /* READ_ONCE() 16 byte header */ + prev.val = __cdsg(&te->header.val, 0, 0); do { - te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; - te_flags |= SDB_TE_ALERT_REQ_MASK; - } while (!cmpxchg_double(&te->flags, &te->overflow, - te->flags, te->overflow, - te_flags, 0ULL)); + old.val = prev.val; + new.val = prev.val; + new.f = 0; + new.a = 1; + new.overflow = 0; + prev.val = __cdsg(&te->header.val, old.val, new.val); + } while (prev.val != old.val); /* Advance to next sample-data-block */ sdbt++; @@ -1384,7 +1400,7 @@ static void aux_output_end(struct perf_output_handle *handle) range_scan = AUX_SDB_NUM_ALERT(aux); for (i = 0, idx = aux->head; i < range_scan; i++, idx++) { te = aux_sdb_trailer(aux, idx); - if (!(te->flags & SDB_TE_BUFFER_FULL_MASK)) + if (!te->header.f) break; } /* i is num of SDBs which are full */ @@ -1392,7 +1408,7 @@ static void aux_output_end(struct perf_output_handle *handle) /* Remove alert indicators in the buffer */ te = aux_sdb_trailer(aux, aux->alert_mark); - te->flags &= ~SDB_TE_ALERT_REQ_MASK; + te->header.a = 0; debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n", __func__, i, range_scan, aux->head); @@ -1437,9 +1453,9 @@ static int aux_output_begin(struct perf_output_handle *handle, idx = aux->empty_mark + 1; for (i = 0; i < range_scan; i++, idx++) { te = aux_sdb_trailer(aux, idx); - te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | - SDB_TE_ALERT_REQ_MASK); - te->overflow = 0; + te->header.f = 0; + te->header.a = 0; + te->header.overflow = 0; } /* Save the position of empty SDBs */ aux->empty_mark = aux->head + range - 1; @@ -1448,7 +1464,7 @@ static int aux_output_begin(struct perf_output_handle *handle, /* Set alert indicator */ aux->alert_mark = aux->head + range/2 - 1; te = aux_sdb_trailer(aux, aux->alert_mark); - te->flags = te->flags | SDB_TE_ALERT_REQ_MASK; + te->header.a = 1; /* Reset hardware buffer head */ head = AUX_SDB_INDEX(aux, aux->head); @@ -1475,14 +1491,17 @@ static int aux_output_begin(struct perf_output_handle *handle, static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, unsigned long long *overflow) { - unsigned long long orig_overflow, orig_flags, new_flags; + union hws_trailer_header old, prev, new; struct hws_trailer_entry *te; te = aux_sdb_trailer(aux, alert_index); + /* READ_ONCE() 16 byte header */ + prev.val = __cdsg(&te->header.val, 0, 0); do { - orig_flags = te->flags; - *overflow = orig_overflow = te->overflow; - if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { + old.val = prev.val; + new.val = prev.val; + *overflow = old.overflow; + if (old.f) { /* * SDB is already set by hardware. * Abort and try to set somewhere @@ -1490,10 +1509,10 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, */ return false; } - new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK; - } while (!cmpxchg_double(&te->flags, &te->overflow, - orig_flags, orig_overflow, - new_flags, 0ULL)); + new.a = 1; + new.overflow = 0; + prev.val = __cdsg(&te->header.val, old.val, new.val); + } while (prev.val != old.val); return true; } @@ -1522,8 +1541,9 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, unsigned long long *overflow) { - unsigned long long orig_overflow, orig_flags, new_flags; unsigned long i, range_scan, idx, idx_old; + union hws_trailer_header old, prev, new; + unsigned long long orig_overflow; struct hws_trailer_entry *te; debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld " @@ -1554,17 +1574,20 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, idx_old = idx = aux->empty_mark + 1; for (i = 0; i < range_scan; i++, idx++) { te = aux_sdb_trailer(aux, idx); + /* READ_ONCE() 16 byte header */ + prev.val = __cdsg(&te->header.val, 0, 0); do { - orig_flags = te->flags; - orig_overflow = te->overflow; - new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK; + old.val = prev.val; + new.val = prev.val; + orig_overflow = old.overflow; + new.f = 0; + new.overflow = 0; if (idx == aux->alert_mark) - new_flags |= SDB_TE_ALERT_REQ_MASK; + new.a = 1; else - new_flags &= ~SDB_TE_ALERT_REQ_MASK; - } while (!cmpxchg_double(&te->flags, &te->overflow, - orig_flags, orig_overflow, - new_flags, 0ULL)); + new.a = 0; + prev.val = __cdsg(&te->header.val, old.val, new.val); + } while (prev.val != old.val); *overflow += orig_overflow; } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 137a170f47d4f..bd7da4049707c 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -127,7 +127,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, frame->sf.gprs[9] = (unsigned long) frame; /* Store access registers to kernel stack of new process. */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(&frame->childregs, 0, sizeof(struct pt_regs)); frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index b51ab19eb9721..64d1dfe6dca53 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -81,8 +81,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) struct esca_block *sca = vcpu->kvm->arch.sca; union esca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; + union esca_sigp_ctrl new_val = {0}, old_val; + old_val = READ_ONCE(*sigp_ctrl); new_val.scn = src_id; new_val.c = 1; old_val.c = 0; @@ -93,8 +94,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) struct bsca_block *sca = vcpu->kvm->arch.sca; union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; + union bsca_sigp_ctrl new_val = {0}, old_val; + old_val = READ_ONCE(*sigp_ctrl); new_val.scn = src_id; new_val.c = 1; old_val.c = 0; @@ -124,16 +126,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu) struct esca_block *sca = vcpu->kvm->arch.sca; union esca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union esca_sigp_ctrl old = *sigp_ctrl; + union esca_sigp_ctrl old; + old = READ_ONCE(*sigp_ctrl); expect = old.value; rc = cmpxchg(&sigp_ctrl->value, old.value, 0); } else { struct bsca_block *sca = vcpu->kvm->arch.sca; union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union bsca_sigp_ctrl old = *sigp_ctrl; + union bsca_sigp_ctrl old; + old = READ_ONCE(*sigp_ctrl); expect = old.value; rc = cmpxchg(&sigp_ctrl->value, old.value, 0); } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d8e9239c24ffc..59db85fb63e1c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1092,6 +1092,8 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm, return 0; } +static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); + static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) { struct kvm_s390_vm_tod_clock gtod; @@ -1101,7 +1103,7 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) return -EINVAL; - kvm_s390_set_tod_clock(kvm, >od); + __kvm_s390_set_tod_clock(kvm, >od); VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", gtod.epoch_idx, gtod.tod); @@ -1132,7 +1134,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) sizeof(gtod.tod))) return -EFAULT; - kvm_s390_set_tod_clock(kvm, >od); + __kvm_s390_set_tod_clock(kvm, >od); VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); return 0; } @@ -1144,6 +1146,16 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) if (attr->flags) return -EINVAL; + mutex_lock(&kvm->lock); + /* + * For protected guests, the TOD is managed by the ultravisor, so trying + * to change it will never bring the expected results. + */ + if (kvm_s390_pv_is_protected(kvm)) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + switch (attr->attr) { case KVM_S390_VM_TOD_EXT: ret = kvm_s390_set_tod_ext(kvm, attr); @@ -1158,6 +1170,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) ret = -ENXIO; break; } + +out_unlock: + mutex_unlock(&kvm->lock); return ret; } @@ -3862,14 +3877,12 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) return 0; } -void kvm_s390_set_tod_clock(struct kvm *kvm, - const struct kvm_s390_vm_tod_clock *gtod) +static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) { struct kvm_vcpu *vcpu; struct kvm_s390_tod_clock_ext htod; int i; - mutex_lock(&kvm->lock); preempt_disable(); get_tod_clock_ext((char *)&htod); @@ -3890,7 +3903,15 @@ void kvm_s390_set_tod_clock(struct kvm *kvm, kvm_s390_vcpu_unblock_all(kvm); preempt_enable(); +} + +int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) +{ + if (!mutex_trylock(&kvm->lock)) + return 0; + __kvm_s390_set_tod_clock(kvm, gtod); mutex_unlock(&kvm->lock); + return 1; } /** diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index a3e9b71d426f9..b6ff64796af9d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -326,8 +326,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ -void kvm_s390_set_tod_clock(struct kvm *kvm, - const struct kvm_s390_vm_tod_clock *gtod); +int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 3b1a498e58d25..e34d518dd3d39 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -102,7 +102,20 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) return kvm_s390_inject_prog_cond(vcpu, rc); VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); - kvm_s390_set_tod_clock(vcpu->kvm, >od); + /* + * To set the TOD clock the kvm lock must be taken, but the vcpu lock + * is already held in handle_set_clock. The usual lock order is the + * opposite. As SCK is deprecated and should not be used in several + * cases, for example when the multiple epoch facility or TOD clock + * steering facility is installed (see Principles of Operation), a + * slow path can be used. If the lock can not be taken via try_lock, + * the instruction will be retried via -EAGAIN at a later point in + * time. + */ + if (!kvm_s390_try_set_tod_clock(vcpu->kvm, >od)) { + kvm_s390_retry_instr(vcpu); + return -EAGAIN; + } kvm_s390_set_psw_cc(vcpu, 0); return 0; diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 3fbf7081c000c..ff58decfef5e8 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -535,8 +535,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) scb_s->eca |= scb_o->eca & ECA_CEI; /* Epoch Extension */ - if (test_kvm_facility(vcpu->kvm, 139)) + if (test_kvm_facility(vcpu->kvm, 139)) { scb_s->ecd |= scb_o->ecd & ECD_MEF; + scb_s->epdx = scb_o->epdx; + } /* etoken */ if (test_kvm_facility(vcpu->kvm, 156)) diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 37b1bbd1a27cc..1ec8076209cab 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -64,7 +64,7 @@ static inline int __pcistg_mio_inuser( asm volatile ( " sacf 256\n" "0: llgc %[tmp],0(%[src])\n" - " sllg %[val],%[val],8\n" + "4: sllg %[val],%[val],8\n" " aghi %[src],1\n" " ogr %[val],%[tmp]\n" " brctg %[cnt],0b\n" @@ -72,7 +72,7 @@ static inline int __pcistg_mio_inuser( "2: ipm %[cc]\n" " srl %[cc],28\n" "3: sacf 768\n" - EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) + EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) : [src] "+a" (src), [cnt] "+d" (cnt), [val] "+d" (val), [tmp] "=d" (tmp), @@ -222,10 +222,10 @@ static inline int __pcilg_mio_inuser( "2: ahi %[shift],-8\n" " srlg %[tmp],%[val],0(%[shift])\n" "3: stc %[tmp],0(%[dst])\n" - " aghi %[dst],1\n" + "5: aghi %[dst],1\n" " brctg %[cnt],2b\n" "4: sacf 768\n" - EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) + EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b) : [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len), [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 8edb824049b9e..0cb0ca149ac34 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -4,7 +4,7 @@ #include -extern long __machvec_start, __machvec_end; +extern char __machvec_start[], __machvec_end[]; extern char __uncached_start, __uncached_end; extern char __start_eh_frame[], __stop_eh_frame[]; diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 243ea5150aa00..598d0184ffeae 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -105,6 +105,7 @@ extern void init_thread_xstate(void); #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */ #define TIF_SINGLESTEP 4 /* singlestepping active */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ @@ -116,6 +117,7 @@ extern void init_thread_xstate(void); #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) @@ -132,7 +134,7 @@ extern void init_thread_xstate(void); #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ - _TIF_SYSCALL_TRACEPOINT) + _TIF_SYSCALL_TRACEPOINT | _TIF_NOTIFY_SIGNAL) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index d606679a211e1..57efaf5b82ae0 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -20,8 +20,8 @@ #define MV_NAME_SIZE 32 #define for_each_mv(mv) \ - for ((mv) = (struct sh_machine_vector *)&__machvec_start; \ - (mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \ + for ((mv) = (struct sh_machine_vector *)__machvec_start; \ + (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \ (mv)++) static struct sh_machine_vector * __init get_mv_byname(const char *name) @@ -87,8 +87,8 @@ void __init sh_mv_setup(void) if (!machvec_selected) { unsigned long machvec_size; - machvec_size = ((unsigned long)&__machvec_end - - (unsigned long)&__machvec_start); + machvec_size = ((unsigned long)__machvec_end - + (unsigned long)__machvec_start); /* * Sanity check for machvec section alignment. Ensure @@ -102,7 +102,7 @@ void __init sh_mv_setup(void) * vector (usually the only one) from .machvec.init. */ if (machvec_size >= sizeof(struct sh_machine_vector)) - sh_mv = *(struct sh_machine_vector *)&__machvec_start; + sh_mv = *(struct sh_machine_vector *)__machvec_start; } pr_notice("Booting machvec: %s\n", get_system_type()); diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 80a5d1c66a517..1aa508eb0823a 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -114,7 +114,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, childregs = task_pt_regs(p); p->thread.sp = (unsigned long) childregs; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); p->thread.pc = (unsigned long) ret_from_kernel_thread; childregs->regs[4] = arg; diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 1add47fd31f62..dd3092911efad 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -499,7 +499,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, unsigned long thread_info_flags) { /* deal with pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, save_r0); if (thread_info_flags & _TIF_NOTIFY_RESUME) diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 9c3d32b80038a..4efffc18c8512 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -57,7 +57,7 @@ void die(const char *str, struct pt_regs *regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } void die_if_kernel(const char *str, struct pt_regs *regs, long err) diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 548b366165dd1..45b4955b253f2 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -104,6 +104,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_USEDFPU 8 /* FPU was used by this task * this quantum (SMP) */ #define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling @@ -115,11 +116,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_NOTIFY_RESUME (1<ksp = (unsigned long) new_stack; p->thread.kregs = childregs; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { extern int nwindows; unsigned long psr; memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 6f8c7822fc065..7afd0a859a78c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -597,7 +597,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, sizeof(struct sparc_stackf)); t->fpsaved[0] = 0; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(child_trap_frame, 0, child_stack_sz); __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = (current_pt_regs()->tstate + 1) & TSTATE_CWP; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 1da36dd34990b..4d478510550c3 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -521,7 +521,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) tracehook_notify_resume(regs); diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index f7ef7edcd5c1a..a0eec62c825df 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -549,7 +549,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long user_exit(); if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) tracehook_notify_resume(regs); diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 247a0d9683b2b..5d47f4a342261 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } printk("Instruction DUMP:"); instruction_dump ((unsigned long *) regs->pc); - if(regs->psr & PSR_PS) - do_exit(SIGKILL); - do_exit(SIGSEGV); + make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV); } void do_hw_interrupt(struct pt_regs *regs, unsigned long type) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index a850dccd78ea1..814277d0e3e8f 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2564,9 +2564,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } if (panic_on_oops) panic("Fatal exception"); - if (regs->tstate & TSTATE_PRIV) - do_exit(SIGKILL); - do_exit(SIGSEGV); + make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV); } EXPORT_SYMBOL(die_if_kernel); diff --git a/arch/um/Makefile b/arch/um/Makefile index 1cea46ff9bb78..7756151413393 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -131,10 +131,18 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT) # The wrappers will select whether using "malloc" or the kernel allocator. LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc +# Avoid binutils 2.39+ warnings by marking the stack non-executable and +# ignorning warnings for the kallsyms sections. +LDFLAGS_EXECSTACK = -z noexecstack +ifeq ($(CONFIG_LD_IS_BFD),y) +LDFLAGS_EXECSTACK += $(call ld-option,--no-warn-rwx-segments) +endif + LD_FLAGS_CMDLINE = $(foreach opt,$(KBUILD_LDFLAGS),-Wl,$(opt)) # Used by link-vmlinux.sh which has special support for um link export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) +export LDFLAGS_vmlinux := $(LDFLAGS_EXECSTACK) # When cleaning we don't include .config, so we don't include # TT or skas makefiles and don't clean skas_ptregs.h. diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 66ab6a07330b2..e610e932cfe1e 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -57,6 +57,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */ #define TIF_RESTART_BLOCK 4 #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ #define TIF_SYSCALL_AUDIT 6 @@ -68,6 +69,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_MEMDIE (1 << TIF_MEMDIE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 8eb8b736abc18..e6c9b11b20334 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -99,7 +99,8 @@ void interrupt_end(void) if (need_resched()) schedule(); - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) tracehook_notify_resume(regs); @@ -156,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); - int kthread = current->flags & PF_KTHREAD; + int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER); int ret = 0; p->thread = (struct thread_struct) INIT_THREAD; diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 52e2e2a3e4aef..00c6dce14bd2b 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -77,7 +77,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { - return *pos < NR_CPUS ? cpu_data + *pos : NULL; + return *pos < nr_cpu_ids ? cpu_data + *pos : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 159646da3c6bc..d64e690139950 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1945,7 +1945,6 @@ config EFI config EFI_STUB bool "EFI stub support" depends on EFI && !X86_USE_3DNOW - depends on $(cc-option,-mabi=ms) || X86_32 select RELOCATABLE help This kernel feature allows a bzImage to be loaded directly diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S index 5521ea12f44e0..aa9b964575843 100644 --- a/arch/x86/boot/bioscall.S +++ b/arch/x86/boot/bioscall.S @@ -32,7 +32,7 @@ intcall: movw %dx, %si movw %sp, %di movw $11, %cx - rep; movsd + rep; movsl /* Pop full state from the stack */ popal @@ -67,7 +67,7 @@ intcall: jz 4f movw %sp, %si movw $11, %cx - rep; movsd + rep; movsl 4: addw $44, %sp /* Restore state and return */ diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 8fcd6a42b3a18..70bd81b6c612e 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -1333,14 +1333,14 @@ SYM_CODE_START(asm_exc_nmi) SYM_CODE_END(asm_exc_nmi) .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_and_make_dead) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp - call do_exit + call make_task_dead 1: jmp 1b -SYM_CODE_END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_and_make_dead) .popsection diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 559c82b834757..23212c53cef7f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1509,7 +1509,7 @@ SYM_CODE_END(ignore_sysret) #endif .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_and_make_dead) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1518,6 +1518,6 @@ SYM_CODE_START(rewind_stack_do_exit) leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_REGS - call do_exit -SYM_CODE_END(rewind_stack_do_exit) + call make_task_dead +SYM_CODE_END(rewind_stack_and_make_dead) .popsection diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 39eb276d02775..52eba415928a3 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -976,7 +976,7 @@ static int __init amd_core_pmu_init(void) * numbered counter following it. */ for (i = 0; i < x86_pmu.num_counters - 1; i += 2) - even_ctr_mask |= 1 << i; + even_ctr_mask |= BIT_ULL(i); pair_constraint = (struct event_constraint) __EVENT_CONSTRAINT(0, even_ctr_mask, 0, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index f6eadf9320a1a..990d5543e3bf2 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4412,6 +4412,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 945d470f62d0f..48f30ffef1f4b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -855,8 +855,13 @@ struct event_constraint intel_icl_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ - INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ - INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 42173a7be3bb4..4b6c39c5facba 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1847,7 +1847,7 @@ void __init intel_pmu_arch_lbr_init(void) return; clear_arch_lbr: - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR); + setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR); } /** diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index cc3b79c066853..d87421acddc39 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -13,6 +13,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include +#include #include #include @@ -1245,6 +1247,15 @@ static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages) if (1 << order != nr_pages) goto out; + /* + * Some processors cannot always support single range for more than + * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might + * also be affected, so for now rather than trying to keep track of + * which ones, just disable it for all. + */ + if (nr_pages > 1) + goto out; + buf->single = true; buf->nr_pages = nr_pages; ret = 0; @@ -1348,10 +1359,36 @@ static void pt_addr_filters_fini(struct perf_event *event) event->hw.addr_filters = NULL; } -static inline bool valid_kernel_ip(unsigned long ip) +#ifdef CONFIG_X86_64 +static u64 canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); +} + +static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return canonical_address(vaddr, vaddr_bits) == vaddr; +} + +/* Clamp to a canonical address greater-than-or-equal-to the address given */ +static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits) +{ + return is_canonical_address(vaddr, vaddr_bits) ? + vaddr : + -BIT_ULL(vaddr_bits - 1); +} + +/* Clamp to a canonical address less-than-or-equal-to the address given */ +static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits) { - return virt_addr_valid(ip) && kernel_ip(ip); + return is_canonical_address(vaddr, vaddr_bits) ? + vaddr : + BIT_ULL(vaddr_bits - 1) - 1; } +#else +#define clamp_to_ge_canonical_addr(x, y) (x) +#define clamp_to_le_canonical_addr(x, y) (x) +#endif static int pt_event_addr_filters_validate(struct list_head *filters) { @@ -1367,14 +1404,6 @@ static int pt_event_addr_filters_validate(struct list_head *filters) filter->action == PERF_ADDR_FILTER_ACTION_START) return -EOPNOTSUPP; - if (!filter->path.dentry) { - if (!valid_kernel_ip(filter->offset)) - return -EINVAL; - - if (!valid_kernel_ip(filter->offset + filter->size)) - return -EINVAL; - } - if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) return -EOPNOTSUPP; } @@ -1398,9 +1427,26 @@ static void pt_event_addr_filters_sync(struct perf_event *event) if (filter->path.dentry && !fr[range].start) { msr_a = msr_b = 0; } else { - /* apply the offset */ - msr_a = fr[range].start; - msr_b = msr_a + fr[range].size - 1; + unsigned long n = fr[range].size - 1; + unsigned long a = fr[range].start; + unsigned long b; + + if (a > ULONG_MAX - n) + b = ULONG_MAX; + else + b = a + n; + /* + * Apply the offset. 64-bit addresses written to the + * MSRs must be canonical, but the range can encompass + * non-canonical addresses. Since software cannot + * execute at non-canonical addresses, adjusting to + * canonical addresses does not affect the result of the + * address filter. + */ + msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits); + msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits); + if (msr_b < msr_a) + msr_a = msr_b = 0; } filters->filter[range].msr_a = msr_a; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 9efea154349d3..4e2953a9eff09 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -84,6 +84,7 @@ struct intel_uncore_type { /* * Optional callbacks for managing mapping of Uncore units to PMONs */ + int (*get_topology)(struct intel_uncore_type *type); int (*set_mapping)(struct intel_uncore_type *type); void (*cleanup_mapping)(struct intel_uncore_type *type); }; diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index fa9289718147a..a4c20e37bec21 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -1274,6 +1274,7 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) /* MCHBAR is disabled */ if (!(mch_bar & BIT(0))) { pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); + pci_dev_put(pdev); return; } mch_bar &= ~BIT(0); @@ -1287,6 +1288,8 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) box->io_addr = ioremap(addr, type->mmio_map_size); if (!box->io_addr) pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); + + pci_dev_put(pdev); } static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 03c8047bebb38..ad084a5a1463b 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -2828,6 +2828,7 @@ static bool hswep_has_limit_sbox(unsigned int device) return false; pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); + pci_dev_put(dev); if (!hswep_get_chop(capid4)) return true; @@ -3642,12 +3643,19 @@ static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die) } static umode_t -skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) +pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, + int die, int zero_bus_pmu) { struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj)); - /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */ - return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode; + return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode; +} + +static umode_t +skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) +{ + /* Root bus 0x00 is valid only for pmu_idx = 0. */ + return pmu_iio_mapping_visible(kobj, attr, die, 0); } static ssize_t skx_iio_mapping_show(struct device *dev, @@ -3739,7 +3747,23 @@ static const struct attribute_group *skx_iio_attr_update[] = { NULL, }; -static int skx_iio_set_mapping(struct intel_uncore_type *type) +static void pmu_clear_mapping_attr(const struct attribute_group **groups, + struct attribute_group *ag) +{ + int i; + + for (i = 0; groups[i]; i++) { + if (groups[i] == ag) { + for (i++; groups[i]; i++) + groups[i - 1] = groups[i]; + groups[i - 1] = NULL; + break; + } + } +} + +static int +pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) { char buf[64]; int ret; @@ -3747,8 +3771,8 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type) struct attribute **attrs = NULL; struct dev_ext_attribute *eas = NULL; - ret = skx_iio_get_topology(type); - if (ret) + ret = type->get_topology(type); + if (ret < 0) goto clear_attr_update; ret = -ENOMEM; @@ -3774,7 +3798,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type) eas[die].var = (void *)die; attrs[die] = &eas[die].attr.attr; } - skx_iio_mapping_group.attrs = attrs; + ag->attrs = attrs; return 0; err: @@ -3786,10 +3810,15 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type) clear_topology: kfree(type->topology); clear_attr_update: - type->attr_update = NULL; + pmu_clear_mapping_attr(type->attr_update, ag); return ret; } +static int skx_iio_set_mapping(struct intel_uncore_type *type) +{ + return pmu_iio_set_mapping(type, &skx_iio_mapping_group); +} + static void skx_iio_cleanup_mapping(struct intel_uncore_type *type) { struct attribute **attr = skx_iio_mapping_group.attrs; @@ -3820,6 +3849,7 @@ static struct intel_uncore_type skx_uncore_iio = { .ops = &skx_uncore_iio_ops, .format_group = &skx_uncore_iio_format_group, .attr_update = skx_iio_attr_update, + .get_topology = skx_iio_get_topology, .set_mapping = skx_iio_set_mapping, .cleanup_mapping = skx_iio_cleanup_mapping, }; @@ -4680,6 +4710,8 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box, addr += box_ctl; + pci_dev_put(pdev); + box->io_addr = ioremap(addr, type->mmio_map_size); if (!box->io_addr) { pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 01860c0d324d7..70fd21ebb9d55 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -453,8 +453,6 @@ void hyperv_cleanup(void) { union hv_x64_msr_hypercall_contents hypercall_msr; - unregister_syscore_ops(&hv_syscore_ops); - /* Reset our OS id */ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index f507ad7c7fd7b..1fcda82635546 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -300,6 +300,7 @@ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */ +#define X86_FEATURE_MSR_TSX_CTRL (11*32+18) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index cfdf307ddc012..9ed8343c9b3cb 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno) asm("mov %%db6, %0" :"=r" (val)); break; case 7: - asm("mov %%db7, %0" :"=r" (val)); + /* + * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them + * with other code. + * + * This is needed because a DR7 access can cause a #VC exception + * when running under SEV-ES. Taking a #VC exception is not a + * safe thing to do just anywhere in the entry code and + * re-ordering might place the access into an unsafe location. + * + * This happened in the NMI handler, where the DR7 read was + * re-ordered to happen before the call to sev_es_ist_enter(), + * causing stack recursion. + */ + asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER); break; default: BUG(); @@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value) asm("mov %0, %%db6" ::"r" (value)); break; case 7: - asm("mov %0, %%db7" ::"r" (value)); + /* + * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them + * with other code. + * + * While is didn't happen with a DR7 write (see the DR7 read + * comment above which explains where it happened), add the + * __FORCE_ORDER here too to avoid similar problems in the + * future. + */ + asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER); break; default: BUG(); diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 0ed20e8bba9e5..ae7192b751361 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -474,7 +474,7 @@ struct hv_enlightened_vmcs { u64 guest_rip; u32 hv_clean_fields; - u32 hv_padding_32; + u32 padding32_1; u32 hv_synthetic_controls; struct { u32 nested_flush_hypercall:1; @@ -482,7 +482,7 @@ struct hv_enlightened_vmcs { u32 reserved:30; } __packed hv_enlightenments_control; u32 hv_vp_id; - + u32 padding32_2; u64 hv_vm_id; u64 partition_assist_page; u64 padding64_4[4]; diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index bf1ed2ddc74bd..7a983119bc403 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -17,8 +17,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { u64 start = rmrr->base_address; u64 end = rmrr->end_address + 1; + int entry_type; - if (e820__mapped_all(start, end, E820_TYPE_RESERVED)) + entry_type = e820__get_entry_type(start, end); + if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS) return 0; pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 38c63a78aba6f..660012ab7bfa5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1275,6 +1275,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + void (*guest_memory_reclaimed)(struct kvm *kvm); int (*get_msr_feature)(struct kvm_msr_entry *entry); diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 91a06cef50c1b..f73327397b898 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -9,6 +9,7 @@ struct ucode_patch { struct list_head plist; void *data; /* Intel uses only this one */ + unsigned int size; u32 patch_id; u16 equiv_cpu; }; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 144dc164b7596..5a8ee3b83af2a 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -489,6 +489,11 @@ #define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 + +#define MSR_AMD64_DE_CFG 0xc0011029 +#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 +#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) + #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 @@ -565,9 +570,6 @@ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define MSR_FAM10H_NODE_ID 0xc001100c -#define MSR_F10H_DECFG 0xc0011029 -#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 -#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 07f5030073bbc..f14cdf9512493 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -310,7 +310,7 @@ static inline void indirect_branch_prediction_barrier(void) /* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base; DECLARE_PER_CPU(u64, x86_spec_ctrl_current); -extern void write_spec_ctrl_current(u64 val, bool force); +extern void update_spec_ctrl_cond(u64 val); extern u64 spec_ctrl_current(void); /* diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e701f29b48817..012c8ee93b67f 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -93,6 +93,7 @@ struct thread_info { #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* IA32 compatibility process */ #define TIF_SLD 18 /* Restore split lock detection on context switch */ +#define TIF_NOTIFY_SIGNAL 19 /* signal notifications exist */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ @@ -121,6 +122,7 @@ struct thread_info { #define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_SLD (1 << TIF_SLD) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 49ae4e1ac9cd8..d28d43d774a26 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -79,6 +79,21 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, */ flags->bm_control = 0; } + if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) { + /* + * For all AMD Zen or newer CPUs that support C3, caches + * should not be flushed by software while entering C3 + * type state. Set bm->check to 1 so that kernel doesn't + * need to execute cache flush operation. + */ + flags->bm_check = 1; + /* + * In current AMD C state implementation ARB_DIS is no longer + * used. So set bm_control to zero to indicate ARB_DIS is not + * required while entering C3 type state. + */ + flags->bm_control = 0; + } } EXPORT_SYMBOL(acpi_processor_power_init_bm_check); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a85fb17f11804..e6e63a9d27cbe 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1330,22 +1330,23 @@ struct bp_patching_desc { atomic_t refs; }; -static struct bp_patching_desc *bp_desc; +static struct bp_patching_desc bp_desc; static __always_inline -struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp) +struct bp_patching_desc *try_get_desc(void) { - /* rcu_dereference */ - struct bp_patching_desc *desc = __READ_ONCE(*descp); + struct bp_patching_desc *desc = &bp_desc; - if (!desc || !arch_atomic_inc_not_zero(&desc->refs)) + if (!arch_atomic_inc_not_zero(&desc->refs)) return NULL; return desc; } -static __always_inline void put_desc(struct bp_patching_desc *desc) +static __always_inline void put_desc(void) { + struct bp_patching_desc *desc = &bp_desc; + smp_mb__before_atomic(); arch_atomic_dec(&desc->refs); } @@ -1378,15 +1379,15 @@ noinstr int poke_int3_handler(struct pt_regs *regs) /* * Having observed our INT3 instruction, we now must observe - * bp_desc: + * bp_desc with non-zero refcount: * - * bp_desc = desc INT3 + * bp_desc.refs = 1 INT3 * WMB RMB - * write INT3 if (desc) + * write INT3 if (bp_desc.refs != 0) */ smp_rmb(); - desc = try_get_desc(&bp_desc); + desc = try_get_desc(); if (!desc) return 0; @@ -1440,7 +1441,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs) ret = 1; out_put: - put_desc(desc); + put_desc(); return ret; } @@ -1471,18 +1472,20 @@ static int tp_vec_nr; */ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) { - struct bp_patching_desc desc = { - .vec = tp, - .nr_entries = nr_entries, - .refs = ATOMIC_INIT(1), - }; unsigned char int3 = INT3_INSN_OPCODE; unsigned int i; int do_sync; lockdep_assert_held(&text_mutex); - smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */ + bp_desc.vec = tp; + bp_desc.nr_entries = nr_entries; + + /* + * Corresponds to the implicit memory barrier in try_get_desc() to + * ensure reading a non-zero refcount provides up to date bp_desc data. + */ + atomic_set_release(&bp_desc.refs, 1); /* * Corresponding read barrier in int3 notifier for making sure the @@ -1570,12 +1573,10 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries text_poke_sync(); /* - * Remove and synchronize_rcu(), except we have a very primitive - * refcount based completion. + * Remove and wait for refs to be zero. */ - WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */ - if (!atomic_dec_and_test(&desc.refs)) - atomic_cond_read_acquire(&desc.refs, !VAL); + if (!atomic_dec_and_test(&bp_desc.refs)) + atomic_cond_read_acquire(&bp_desc.refs, !VAL); } static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 8b9e3277a6ceb..ec3fa4dc90318 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -822,8 +822,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } -#define MSR_AMD64_DE_CFG 0xC0011029 - static void init_amd_ln(struct cpuinfo_x86 *c) { /* @@ -1018,8 +1016,8 @@ static void init_amd(struct cpuinfo_x86 *c) * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. */ - msr_set_bit(MSR_F10H_DECFG, - MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + msr_set_bit(MSR_AMD64_DE_CFG, + MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); /* A serializing LFENCE stops RDTSC speculation */ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index a300a19255b66..a2a087a797ae5 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -59,11 +59,18 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); static DEFINE_MUTEX(spec_ctrl_mutex); +/* Update SPEC_CTRL MSR and its cached copy unconditionally */ +static void update_spec_ctrl(u64 val) +{ + this_cpu_write(x86_spec_ctrl_current, val); + wrmsrl(MSR_IA32_SPEC_CTRL, val); +} + /* * Keep track of the SPEC_CTRL MSR value for the current task, which may differ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). */ -void write_spec_ctrl_current(u64 val, bool force) +void update_spec_ctrl_cond(u64 val) { if (this_cpu_read(x86_spec_ctrl_current) == val) return; @@ -74,7 +81,7 @@ void write_spec_ctrl_current(u64 val, bool force) * When KERNEL_IBRS this MSR is written on return-to-user, unless * forced the update can be delayed until that time. */ - if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) + if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) wrmsrl(MSR_IA32_SPEC_CTRL, val); } @@ -1291,7 +1298,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void) if (ia32_cap & ARCH_CAP_RRSBA) { x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; - write_spec_ctrl_current(x86_spec_ctrl_base, true); + update_spec_ctrl(x86_spec_ctrl_base); } } @@ -1413,7 +1420,7 @@ static void __init spectre_v2_select_mitigation(void) if (spectre_v2_in_ibrs_mode(mode)) { x86_spec_ctrl_base |= SPEC_CTRL_IBRS; - write_spec_ctrl_current(x86_spec_ctrl_base, true); + update_spec_ctrl(x86_spec_ctrl_base); } switch (mode) { @@ -1527,7 +1534,7 @@ static void __init spectre_v2_select_mitigation(void) static void update_stibp_msr(void * __unused) { u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); - write_spec_ctrl_current(val, true); + update_spec_ctrl(val); } /* Update x86_spec_ctrl_base in case SMT state changed. */ @@ -1760,7 +1767,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) x86_amd_ssb_disable(); } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; - write_spec_ctrl_current(x86_spec_ctrl_base, true); + update_spec_ctrl(x86_spec_ctrl_base); } } @@ -1889,6 +1896,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) if (ctrl == PR_SPEC_FORCE_DISABLE) task_set_spec_ib_force_disable(task); task_update_spec_tif(task); + if (task == current) + indirect_branch_prediction_barrier(); break; default: return -ERANGE; @@ -1978,7 +1987,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) - write_spec_ctrl_current(x86_spec_ctrl_base, true); + update_spec_ctrl(x86_spec_ctrl_base); if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 29a3bedabd060..d7541851288e3 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include #include #include -#include "cpu.h" #undef pr_fmt #define pr_fmt(fmt) "x86/cpu: " fmt diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 774ca6bfda9f4..205fa420ee7ca 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -342,8 +342,8 @@ static void init_hygon(struct cpuinfo_x86 *c) * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. */ - msr_set_bit(MSR_F10H_DECFG, - MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + msr_set_bit(MSR_AMD64_DE_CFG, + MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); /* A serializing LFENCE stops RDTSC speculation */ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 09f7c652346a9..4f9b7c1cfc36f 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -513,7 +513,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high, /* Fall back to method we used for older processors: */ switch (block) { case 0: - addr = msr_ops.misc(bank); + addr = mca_msr_reg(bank, MCA_MISC); break; case 1: offset = ((low & MASK_BLKPTR_LO) >> 21); @@ -952,6 +952,24 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) return status & MCI_STATUS_DEFERRED; } +static bool _log_error_deferred(unsigned int bank, u32 misc) +{ + if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), + mca_msr_reg(bank, MCA_ADDR), misc)) + return false; + + /* + * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. + * Return true here to avoid accessing these registers. + */ + if (!mce_flags.smca) + return true; + + /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ + wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); + return true; +} + /* * We have three scenarios for checking for Deferred errors: * @@ -963,19 +981,8 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) */ static void log_error_deferred(unsigned int bank) { - bool defrd; - - defrd = _log_error_bank(bank, msr_ops.status(bank), - msr_ops.addr(bank), 0); - - if (!mce_flags.smca) - return; - - /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */ - if (defrd) { - wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); + if (_log_error_deferred(bank, 0)) return; - } /* * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check @@ -996,7 +1003,7 @@ static void amd_deferred_error_interrupt(void) static void log_error_thresholding(unsigned int bank, u64 misc) { - _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); + _log_error_deferred(bank, misc); } static void log_and_reset_block(struct threshold_block *block) @@ -1384,7 +1391,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, } } - err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); + err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC)); if (err) goto out_kobj; diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 5cf1a024408bf..1906387a0faf4 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -176,53 +176,27 @@ void mce_unregister_decode_chain(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); -static inline u32 ctl_reg(int bank) +u32 mca_msr_reg(int bank, enum mca_msr reg) { - return MSR_IA32_MCx_CTL(bank); -} - -static inline u32 status_reg(int bank) -{ - return MSR_IA32_MCx_STATUS(bank); -} - -static inline u32 addr_reg(int bank) -{ - return MSR_IA32_MCx_ADDR(bank); -} - -static inline u32 misc_reg(int bank) -{ - return MSR_IA32_MCx_MISC(bank); -} - -static inline u32 smca_ctl_reg(int bank) -{ - return MSR_AMD64_SMCA_MCx_CTL(bank); -} - -static inline u32 smca_status_reg(int bank) -{ - return MSR_AMD64_SMCA_MCx_STATUS(bank); -} + if (mce_flags.smca) { + switch (reg) { + case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); + case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); + case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); + case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank); + } + } -static inline u32 smca_addr_reg(int bank) -{ - return MSR_AMD64_SMCA_MCx_ADDR(bank); -} + switch (reg) { + case MCA_CTL: return MSR_IA32_MCx_CTL(bank); + case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); + case MCA_MISC: return MSR_IA32_MCx_MISC(bank); + case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank); + } -static inline u32 smca_misc_reg(int bank) -{ - return MSR_AMD64_SMCA_MCx_MISC(bank); + return 0; } -struct mca_msr_regs msr_ops = { - .ctl = ctl_reg, - .status = status_reg, - .addr = addr_reg, - .misc = misc_reg -}; - static void __print_mce(struct mce *m) { pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", @@ -371,11 +345,11 @@ static int msr_to_offset(u32 msr) if (msr == mca_cfg.rip_msr) return offsetof(struct mce, ip); - if (msr == msr_ops.status(bank)) + if (msr == mca_msr_reg(bank, MCA_STATUS)) return offsetof(struct mce, status); - if (msr == msr_ops.addr(bank)) + if (msr == mca_msr_reg(bank, MCA_ADDR)) return offsetof(struct mce, addr); - if (msr == msr_ops.misc(bank)) + if (msr == mca_msr_reg(bank, MCA_MISC)) return offsetof(struct mce, misc); if (msr == MSR_IA32_MCG_STATUS) return offsetof(struct mce, mcgstatus); @@ -694,10 +668,10 @@ static struct notifier_block mce_default_nb = { static noinstr void mce_read_aux(struct mce *m, int i) { if (m->status & MCI_STATUS_MISCV) - m->misc = mce_rdmsrl(msr_ops.misc(i)); + m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC)); if (m->status & MCI_STATUS_ADDRV) { - m->addr = mce_rdmsrl(msr_ops.addr(i)); + m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR)); /* * Mask the reported address by the reported granularity. @@ -767,7 +741,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) m.bank = i; barrier(); - m.status = mce_rdmsrl(msr_ops.status(i)); + m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); /* If this entry is not valid, ignore it */ if (!(m.status & MCI_STATUS_VAL)) @@ -835,7 +809,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) /* * Clear state for this bank. */ - mce_wrmsrl(msr_ops.status(i), 0); + mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } /* @@ -860,7 +834,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, int i; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { - m->status = mce_rdmsrl(msr_ops.status(i)); + m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); if (!(m->status & MCI_STATUS_VAL)) continue; @@ -1149,7 +1123,7 @@ static void mce_clear_state(unsigned long *toclear) for (i = 0; i < this_cpu_read(mce_num_banks); i++) { if (test_bit(i, toclear)) - mce_wrmsrl(msr_ops.status(i), 0); + mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } } @@ -1208,7 +1182,7 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin m->addr = 0; m->bank = i; - m->status = mce_rdmsrl(msr_ops.status(i)); + m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); if (!(m->status & MCI_STATUS_VAL)) continue; @@ -1704,8 +1678,8 @@ static void __mcheck_cpu_init_clear_banks(void) if (!b->init) continue; - wrmsrl(msr_ops.ctl(i), b->ctl); - wrmsrl(msr_ops.status(i), 0); + wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); + wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } } @@ -1731,7 +1705,7 @@ static void __mcheck_cpu_check_banks(void) if (!b->init) continue; - rdmsrl(msr_ops.ctl(i), msrval); + rdmsrl(mca_msr_reg(i, MCA_CTL), msrval); b->init = !!msrval; } } @@ -1890,13 +1864,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); mce_flags.amd_threshold = 1; - - if (mce_flags.smca) { - msr_ops.ctl = smca_ctl_reg; - msr_ops.status = smca_status_reg; - msr_ops.addr = smca_addr_reg; - msr_ops.misc = smca_misc_reg; - } } } @@ -2272,7 +2239,7 @@ static void mce_disable_error_reporting(void) struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(msr_ops.ctl(i), 0); + wrmsrl(mca_msr_reg(i, MCA_CTL), 0); } return; } @@ -2624,7 +2591,7 @@ static void mce_reenable_cpu(void) struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(msr_ops.ctl(i), b->ctl); + wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); } } diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 88dcc79cfb07d..3a485c0d5791b 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -168,14 +168,14 @@ struct mce_vendor_flags { extern struct mce_vendor_flags mce_flags; -struct mca_msr_regs { - u32 (*ctl) (int bank); - u32 (*status) (int bank); - u32 (*addr) (int bank); - u32 (*misc) (int bank); +enum mca_msr { + MCA_CTL, + MCA_STATUS, + MCA_ADDR, + MCA_MISC, }; -extern struct mca_msr_regs msr_ops; +u32 mca_msr_reg(int bank, enum mca_msr reg); /* Decide whether to add MCE record to MCE event pool or filter it out. */ extern bool filter_mce(struct mce *m); diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 3f6b137ef4e6e..234a96f25248d 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -441,7 +441,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p return ret; native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - if (rev >= mc->hdr.patch_id) + + /* + * Allow application of the same revision to pick up SMT-specific + * changes even if the revision of the other SMT thread is already + * up-to-date. + */ + if (rev > mc->hdr.patch_id) return ret; if (!__apply_microcode_amd(mc)) { @@ -523,8 +529,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - /* Check whether we have saved a new patch already: */ - if (*new_rev && rev < mc->hdr.patch_id) { + /* + * Check whether a new patch has been saved already. Also, allow application of + * the same revision in order to pick up SMT-thread-specific configuration even + * if the sibling SMT thread already has an up-to-date revision. + */ + if (*new_rev && rev <= mc->hdr.patch_id) { if (!__apply_microcode_amd(mc)) { *new_rev = mc->hdr.patch_id; return; @@ -783,6 +793,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, kfree(patch); return -EINVAL; } + patch->size = *patch_size; mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); proc_id = mc_hdr->processor_rev_id; @@ -864,7 +875,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) return ret; memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); - memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE)); + memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE)); return ret; } diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 7e8e07bddd5fe..1ba590e6ef7bb 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -659,7 +659,6 @@ void load_ucode_intel_ap(void) else iup = &intel_ucode_patch; -reget: if (!*iup) { patch = __load_ucode_intel(&uci); if (!patch) @@ -670,12 +669,7 @@ void load_ucode_intel_ap(void) uci.mc = *iup; - if (apply_microcode_early(&uci, true)) { - /* Mixed-silicon system? Try to refetch the proper patch: */ - *iup = NULL; - - goto reget; - } + apply_microcode_early(&uci, true); } static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 6a80f36b5d598..5f436cb4f7c49 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -794,8 +794,6 @@ void mtrr_ap_init(void) if (!use_intel() || mtrr_aps_delayed_init) return; - rcu_cpu_starting(smp_processor_id()); - /* * Ideally we should hold mtrr_mutex here to avoid mtrr entries * changed, but this routine will be called in cpu boot time, diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 0daf2f1cf7a86..465dce141bfc8 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -416,6 +416,7 @@ static int pseudo_lock_fn(void *_rdtgrp) struct pseudo_lock_region *plr = rdtgrp->plr; u32 rmid_p, closid_p; unsigned long i; + u64 saved_msr; #ifdef CONFIG_KASAN /* * The registers used for local register variables are also used @@ -459,6 +460,7 @@ static int pseudo_lock_fn(void *_rdtgrp) * the buffer and evict pseudo-locked memory read earlier from the * cache. */ + saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL); __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); closid_p = this_cpu_read(pqr_state.cur_closid); rmid_p = this_cpu_read(pqr_state.cur_rmid); @@ -510,7 +512,7 @@ static int pseudo_lock_fn(void *_rdtgrp) __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p); /* Re-enable the hardware prefetcher(s) */ - wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); + wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr); local_irq_enable(); plr->thread_done = 1; @@ -867,6 +869,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) static int measure_cycles_lat_fn(void *_plr) { struct pseudo_lock_region *plr = _plr; + u32 saved_low, saved_high; unsigned long i; u64 start, end; void *mem_r; @@ -875,6 +878,7 @@ static int measure_cycles_lat_fn(void *_plr) /* * Disable hardware prefetchers. */ + rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); mem_r = READ_ONCE(plr->kmem); /* @@ -891,7 +895,7 @@ static int measure_cycles_lat_fn(void *_plr) end = rdtsc_ordered(); trace_pseudo_lock_mem_latency((u32)(end - start)); } - wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); + wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); local_irq_enable(); plr->thread_done = 1; wake_up_interruptible(&plr->lock_thread_wq); @@ -936,6 +940,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0; struct perf_event *miss_event, *hit_event; int hit_pmcnum, miss_pmcnum; + u32 saved_low, saved_high; unsigned int line_size; unsigned int size; unsigned long i; @@ -969,6 +974,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, /* * Disable hardware prefetchers. */ + rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); /* Initialize rest of local variables */ @@ -1027,7 +1033,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, */ rmb(); /* Re-enable hardware prefetchers */ - wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); + wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); local_irq_enable(); out_hit: perf_event_release_kernel(hit_event); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 5a59e3315b340..ff26de11b3f15 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -577,8 +577,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk, /* * Ensure the task's closid and rmid are written before determining if * the task is current that will decide if it will be interrupted. + * This pairs with the full barrier between the rq->curr update and + * resctrl_sched_in() during context switch. */ - barrier(); + smp_mb(); /* * By now, the task's closid and rmid are set. If the task is current @@ -2313,19 +2315,23 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, t->closid = to->closid; t->rmid = to->mon.rmid; -#ifdef CONFIG_SMP /* - * This is safe on x86 w/o barriers as the ordering - * of writing to task_cpu() and t->on_cpu is - * reverse to the reading here. The detection is - * inaccurate as tasks might move or schedule - * before the smp function call takes place. In - * such a case the function call is pointless, but + * Order the closid/rmid stores above before the loads + * in task_curr(). This pairs with the full barrier + * between the rq->curr update and resctrl_sched_in() + * during context switch. + */ + smp_mb(); + + /* + * If the task is on a CPU, set the CPU in the mask. + * The detection is inaccurate as tasks might move or + * schedule before the smp function call takes place. + * In such a case the function call is pointless, but * there is no other side effect. */ - if (mask && t->on_cpu) + if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) cpumask_set_cpu(task_cpu(t), mask); -#endif } } read_unlock(&tasklist_lock); diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 91288da295995..37d48ab3d077c 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -96,6 +96,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c) unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width; unsigned int core_select_mask, core_level_siblings; unsigned int die_select_mask, die_level_siblings; + unsigned int pkg_mask_width; + bool die_level_present = false; int leaf; leaf = detect_extended_topology_leaf(c); @@ -110,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c) core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); - die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); sub_index = 1; - do { + while (true) { cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx); /* @@ -126,23 +128,33 @@ int detect_extended_topology(struct cpuinfo_x86 *c) die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) { + die_level_present = true; die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } + if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE) + pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + else + break; + sub_index++; - } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); + } - core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; + core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width; die_select_mask = (~(-1 << die_plus_mask_width)) >> core_plus_mask_width; c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) & core_select_mask; - c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, - core_plus_mask_width) & die_select_mask; + + if (die_level_present) { + c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, + core_plus_mask_width) & die_select_mask; + } + c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, - die_plus_mask_width); + pkg_mask_width); /* * Reinit the apicid, now that we have extended initial_apicid. */ diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c index e2ad30e474f82..da06bbb5d68e1 100644 --- a/arch/x86/kernel/cpu/tsx.c +++ b/arch/x86/kernel/cpu/tsx.c @@ -58,24 +58,6 @@ void tsx_enable(void) wrmsrl(MSR_IA32_TSX_CTRL, tsx); } -static bool __init tsx_ctrl_is_supported(void) -{ - u64 ia32_cap = x86_read_arch_cap_msr(); - - /* - * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this - * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. - * - * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a - * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES - * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get - * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, - * tsx= cmdline requests will do nothing on CPUs without - * MSR_IA32_TSX_CTRL support. - */ - return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR); -} - static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) { if (boot_cpu_has_bug(X86_BUG_TAA)) @@ -89,9 +71,22 @@ void __init tsx_init(void) char arg[5] = {}; int ret; - if (!tsx_ctrl_is_supported()) + /* + * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this + * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. + * + * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a + * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES + * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get + * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, + * tsx= cmdline requests will do nothing on CPUs without + * MSR_IA32_TSX_CTRL support. + */ + if (!(x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR)) return; + setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL); + ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg)); if (ret >= 0) { if (!strcmp(arg, "on")) { diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 97aa900386cbb..b4964300153a1 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -351,7 +351,7 @@ unsigned long oops_begin(void) } NOKPROBE_SYMBOL(oops_begin); -void __noreturn rewind_stack_do_exit(int signr); +void __noreturn rewind_stack_and_make_dead(int signr); void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { @@ -386,7 +386,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) * reuse the task stack and that existing poisons are invalid. */ kasan_unpoison_task_stack(current); - rewind_stack_do_exit(signr); + rewind_stack_and_make_dead(signr); } NOKPROBE_SYMBOL(oops_end); diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 701f196d7c686..3c0a621b97921 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -138,9 +138,6 @@ static void __init fpu__init_system_generic(void) unsigned int fpu_kernel_xstate_size; EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size); -/* Get alignment of the TYPE. */ -#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test) - /* * Enforce that 'MEMBER' is the last field of 'TYPE'. * @@ -148,8 +145,8 @@ EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size); * because that's how C aligns structs. */ #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \ - BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \ - TYPE_ALIGN(TYPE))) + BUILD_BUG_ON(sizeof(TYPE) != \ + ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE))) /* * We append the 'struct fpu' to the task_struct: diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 9a8633a6506ca..6d546f4426ac3 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -219,7 +219,9 @@ void ftrace_replace_code(int enable) ret = ftrace_verify_code(rec->ip, old); if (ret) { + ftrace_expected = old; ftrace_bug(ret, rec); + ftrace_expected = NULL; return; } } @@ -322,12 +324,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) unsigned long offset; unsigned long npages; unsigned long size; - unsigned long retq; unsigned long *ptr; void *trampoline; void *ip; /* 48 8b 15 is movq (%rip), %rdx */ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; + unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE }; union ftrace_op_code_union op_ptr; int ret; @@ -367,13 +369,10 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ip = trampoline + size; /* The trampoline ends with ret(q) */ - retq = (unsigned long)ftrace_stub; if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE); else - ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE); - if (WARN_ON(ret < 0)) - goto fail; + memcpy(ip, retq, sizeof(retq)); /* No need to test direct calls on created trampolines */ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index e3a375185a1b4..5b2dabedcf664 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -170,7 +170,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) /* * This is weak to keep gas from relaxing the jumps. - * It is also used to copy the RET for trampolines. */ SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) UNWIND_HINT_FUNC @@ -325,7 +324,7 @@ SYM_FUNC_END(ftrace_graph_caller) SYM_CODE_START(return_to_handler) UNWIND_HINT_EMPTY - subq $24, %rsp + subq $16, %rsp /* Save the return values */ movq %rax, (%rsp) @@ -337,7 +336,19 @@ SYM_CODE_START(return_to_handler) movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax - addq $24, %rsp - JMP_NOSPEC rdi + + addq $16, %rsp + /* + * Jump back to the old return address. This cannot be JMP_NOSPEC rdi + * since IBT would demand that contain ENDBR, which simply isn't so for + * return addresses. Use a retpoline here to keep the RSB balanced. + */ + ANNOTATE_INTRA_FUNCTION_CALL + call .Ldo_rop + int3 +.Ldo_rop: + mov %rdi, (%rsp) + UNWIND_HINT_FUNC + RET SYM_CODE_END(return_to_handler) #endif diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 4ab7a9757e521..574df24a8e5a4 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -1325,8 +1325,12 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) hpet_rtc_timer_reinit(); memset(&curr_time, 0, sizeof(struct rtc_time)); - if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) - mc146818_get_time(&curr_time); + if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) { + if (unlikely(mc146818_get_time(&curr_time) < 0)) { + pr_err_ratelimited("unable to read current time from RTC\n"); + return IRQ_HANDLED; + } + } if (hpet_rtc_flags & RTC_UIE && curr_time.tm_sec != hpet_prev_update_sec) { diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 282b4ee1339f8..f325389d03516 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq) disable_irq_nosync(irq); io_apic_irqs &= ~(1<init(0); - for (i = 0; i < nr_legacy_irqs(); i++) + for (i = 0; i < nr_legacy_irqs(); i++) { irq_set_chip_and_handler(i, chip, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); + } } void __init init_IRQ(void) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index ee85f1b258d0a..5de757099186c 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -292,6 +293,8 @@ static int can_probe(unsigned long paddr) /* Decode instructions */ addr = paddr - offset; while (addr < paddr) { + int ret; + /* * Check if the instruction has been modified by another * kprobe, in which case we replace the breakpoint by the @@ -303,15 +306,20 @@ static int can_probe(unsigned long paddr) __addr = recover_probed_instruction(buf, addr); if (!__addr) return 0; - kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); - insn_get_length(&insn); + ret = insn_decode(&insn, (void *)__addr, MAX_INSN_SIZE, INSN_MODE_KERN); + if (ret < 0) + return 0; + +#ifdef CONFIG_KGDB /* - * Another debugging subsystem might insert this breakpoint. - * In that case, we can't recover it. + * If there is a dynamically installed kgdb sw breakpoint, + * this function should not be probed. */ - if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) + if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && + kgdb_has_hit_break(addr)) return 0; +#endif addr += insn.length; } @@ -347,8 +355,8 @@ static int is_IF_modifier(kprobe_opcode_t *insn) int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) { kprobe_opcode_t buf[MAX_INSN_SIZE]; - unsigned long recovered_insn = - recover_probed_instruction(buf, (unsigned long)src); + unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); + int ret; if (!recovered_insn || !insn) return 0; @@ -358,8 +366,9 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) MAX_INSN_SIZE)) return 0; - kernel_insn_init(insn, dest, MAX_INSN_SIZE); - insn_get_length(insn); + ret = insn_decode(insn, dest, MAX_INSN_SIZE, INSN_MODE_KERN); + if (ret < 0) + return 0; /* We can not probe force emulate prefixed instruction */ if (insn_has_emulate_prefix(insn)) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 08eb23074f92f..3d62014920064 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -272,19 +273,6 @@ static int insn_is_indirect_jump(struct insn *insn) return ret; } -static bool is_padding_int3(unsigned long addr, unsigned long eaddr) -{ - unsigned char ops; - - for (; addr < eaddr; addr++) { - if (get_kernel_nofault(ops, (void *)addr) < 0 || - ops != INT3_INSN_OPCODE) - return false; - } - - return true; -} - /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { @@ -312,6 +300,8 @@ static int can_optimize(unsigned long paddr) addr = paddr - offset; while (addr < paddr - offset + size) { /* Decode until function end */ unsigned long recovered_insn; + int ret; + if (search_exception_tables(addr)) /* * Since some fixup code will jumps into this function, @@ -321,16 +311,19 @@ static int can_optimize(unsigned long paddr) recovered_insn = recover_probed_instruction(buf, addr); if (!recovered_insn) return 0; - kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); - insn_get_length(&insn); + + ret = insn_decode(&insn, (void *)recovered_insn, MAX_INSN_SIZE, INSN_MODE_KERN); + if (ret < 0) + return 0; +#ifdef CONFIG_KGDB /* - * In the case of detecting unknown breakpoint, this could be - * a padding INT3 between functions. Let's check that all the - * rest of the bytes are also INT3. + * If there is a dynamically installed kgdb sw breakpoint, + * this function should not be probed. */ - if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) - return is_padding_int3(addr, paddr - offset + size) ? 1 : 0; - + if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && + kgdb_has_hit_break(addr)) + return 0; +#endif /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4505d845daba6..5e17c3939dd1c 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -178,6 +178,23 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, task_user_gs(p) = get_user_gs(current_pt_regs()); #endif + if (unlikely(p->flags & PF_IO_WORKER)) { + /* + * An IO thread is a user space thread, but it doesn't + * return to ret_after_fork(). + * + * In order to indicate that to tools like gdb, + * we reset the stack and instruction pointers. + * + * It does the same kernel frame setup to return to a kernel + * function that a kernel thread does. + */ + childregs->sp = 0; + childregs->ip = 0; + kthread_frame_init(frame, sp, arg); + return 0; + } + /* Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) ret = set_new_tls(p, tls); @@ -556,7 +573,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, } if (updmsr) - write_spec_ctrl_current(msr, false); + update_spec_ctrl_cond(msr); } static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b001ba811cabb..9eff481715320 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -798,11 +798,11 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -void arch_do_signal(struct pt_regs *regs) +void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { struct ksignal ksig; - if (get_signal(&ksig)) { + if (has_signal && get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ handle_signal(&ksig, regs); return; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8baff500914ea..e8e5515fb7e9c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -229,6 +229,7 @@ static void notrace start_secondary(void *unused) #endif cpu_init_exception_handling(); cpu_init(); + rcu_cpu_starting(raw_smp_processor_id()); x86_cpuinit.early_percpu_clock_init(); smp_callin(); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index cc071c4c65240..d557a545f4bc5 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -697,7 +697,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, /* Otherwise, skip ahead to the user-specified starting frame: */ while (!unwind_done(state) && (!on_stack(&state->stack_info, first_frame, sizeof(long)) || - state->sp < (unsigned long)first_frame)) + state->sp <= (unsigned long)first_frame)) unwind_next_frame(state); return; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 138bdb1fd1360..9f948b2d26f6b 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) switch (opc1) { case 0xeb: /* jmp 8 */ case 0xe9: /* jmp 32 */ - case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ break; + case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ + goto setup; case 0xe8: /* call relative */ branch_clear_offset(auprobe, insn); @@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) return -ENOTSUPP; } +setup: auprobe->branch.opc1 = opc1; auprobe->branch.ilen = insn->length; auprobe->branch.offs = insn->immediate.value; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 6e1ea5e85e598..de4b171cb76bc 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -511,16 +511,22 @@ struct kvm_cpuid_array { int nent; }; +static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) +{ + if (array->nent >= array->maxnent) + return NULL; + + return &array->entries[array->nent++]; +} + static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, u32 function, u32 index) { - struct kvm_cpuid_entry2 *entry; + struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); - if (array->nent >= array->maxnent) + if (!entry) return NULL; - entry = &array->entries[array->nent++]; - entry->function = function; entry->index = index; entry->flags = 0; @@ -661,8 +667,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->edx = 0; } break; - case 9: - break; case 0xa: { /* Architectural Performance Monitoring */ struct x86_pmu_capability cap; union cpuid10_eax eax; @@ -700,22 +704,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->edx = edx.full; break; } - /* - * Per Intel's SDM, the 0x1f is a superset of 0xb, - * thus they can be handled by common code. - */ case 0x1f: case 0xb: /* - * Populate entries until the level type (ECX[15:8]) of the - * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is - * the starting entry, filled by the primary do_host_cpuid(). + * No topology; a valid topology is indicated by the presence + * of subleaf 1. */ - for (i = 1; entry->ecx & 0xff00; ++i) { - entry = do_host_cpuid(array, function, i); - if (!entry) - goto out; - } + entry->eax = entry->ebx = entry->ecx = 0; break; case 0xd: entry->eax &= supported_xcr0; @@ -815,11 +810,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->eax = min(entry->eax, 0x8000001f); break; case 0x80000001: + entry->ebx &= ~GENMASK(27, 16); cpuid_entry_override(entry, CPUID_8000_0001_EDX); cpuid_entry_override(entry, CPUID_8000_0001_ECX); break; case 0x80000006: - /* L2 cache and TLB: pass through host info. */ + /* Drop reserved bits, pass host L2 cache and TLB info. */ + entry->edx &= ~GENMASK(17, 16); break; case 0x80000007: /* Advanced power management */ /* invariant TSC is CPUID.80000007H:EDX[8] */ @@ -842,6 +839,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) g_phys_as = phys_as; entry->eax = g_phys_as | (virt_as << 8); + entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0008_EBX); break; @@ -861,7 +859,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->ecx = entry->edx = 0; break; case 0x8000001a: + entry->eax &= GENMASK(2, 0); + entry->ebx = entry->ecx = entry->edx = 0; + break; case 0x8000001e: + /* Do not return host topology information. */ + entry->eax = entry->ebx = entry->ecx = 0; + entry->edx = 0; /* reserved */ break; /* Support memory encryption cpuid if host supports it */ case 0x8000001F: diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 2aa41d682bb2c..63efccc8f4292 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -796,8 +796,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ctxt->mode, linear); } -static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, - enum x86emul_mode mode) +static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) { ulong linear; int rc; @@ -807,41 +806,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); - rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); + rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; } +static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt) +{ + u64 efer; + struct desc_struct cs; + u16 selector; + u32 base3; + + ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); + + if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) { + /* Real mode. cpu must not have long mode active */ + if (efer & EFER_LMA) + return X86EMUL_UNHANDLEABLE; + ctxt->mode = X86EMUL_MODE_REAL; + return X86EMUL_CONTINUE; + } + + if (ctxt->eflags & X86_EFLAGS_VM) { + /* Protected/VM86 mode. cpu must not have long mode active */ + if (efer & EFER_LMA) + return X86EMUL_UNHANDLEABLE; + ctxt->mode = X86EMUL_MODE_VM86; + return X86EMUL_CONTINUE; + } + + if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS)) + return X86EMUL_UNHANDLEABLE; + + if (efer & EFER_LMA) { + if (cs.l) { + /* Proper long mode */ + ctxt->mode = X86EMUL_MODE_PROT64; + } else if (cs.d) { + /* 32 bit compatibility mode*/ + ctxt->mode = X86EMUL_MODE_PROT32; + } else { + ctxt->mode = X86EMUL_MODE_PROT16; + } + } else { + /* Legacy 32 bit / 16 bit mode */ + ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; + } + + return X86EMUL_CONTINUE; +} + static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { - return assign_eip(ctxt, dst, ctxt->mode); + return assign_eip(ctxt, dst); } -static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, - const struct desc_struct *cs_desc) +static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst) { - enum x86emul_mode mode = ctxt->mode; - int rc; + int rc = emulator_recalc_and_set_mode(ctxt); -#ifdef CONFIG_X86_64 - if (ctxt->mode >= X86EMUL_MODE_PROT16) { - if (cs_desc->l) { - u64 efer = 0; + if (rc != X86EMUL_CONTINUE) + return rc; - ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); - if (efer & EFER_LMA) - mode = X86EMUL_MODE_PROT64; - } else - mode = X86EMUL_MODE_PROT32; /* temporary value */ - } -#endif - if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) - mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; - rc = assign_eip(ctxt, dst, mode); - if (rc == X86EMUL_CONTINUE) - ctxt->mode = mode; - return rc; + return assign_eip(ctxt, dst); } static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) @@ -2039,7 +2068,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; - if (ctxt->modrm_reg == VCPU_SREG_SS) + if (seg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; if (ctxt->op_bytes > 2) rsp_increment(ctxt, ctxt->op_bytes - 2); @@ -2256,7 +2285,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; - rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); + rc = assign_eip_far(ctxt, ctxt->src.val); /* Error handling is not implemented. */ if (rc != X86EMUL_CONTINUE) return X86EMUL_UNHANDLEABLE; @@ -2337,7 +2366,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) &new_desc); if (rc != X86EMUL_CONTINUE) return rc; - rc = assign_eip_far(ctxt, eip, &new_desc); + rc = assign_eip_far(ctxt, eip); /* Error handling is not implemented. */ if (rc != X86EMUL_CONTINUE) return X86EMUL_UNHANDLEABLE; @@ -2957,6 +2986,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; + ctxt->mode = usermode; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; @@ -3553,7 +3583,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; - rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); + rc = assign_eip_far(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) goto fail; @@ -3695,11 +3725,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt) static int em_cr_write(struct x86_emulate_ctxt *ctxt) { - if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) + int cr_num = ctxt->modrm_reg; + int r; + + if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; + + if (cr_num == 0) { + /* + * CR0 write might have updated CR0.PE and/or CR0.PG + * which can affect the cpu's execution mode. + */ + r = emulator_recalc_and_set_mode(ctxt); + if (r != X86EMUL_CONTINUE) + return r; + } + return X86EMUL_CONTINUE; } diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7397cc449e2fc..c2b34998c27df 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void) sev_flush_asids(); } +void sev_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!sev_guest(kvm)) + return; + + wbinvd_on_all_cpus(); +} + void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 442705517caf4..5775983fec56e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -305,12 +305,6 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) return 0; } -static int is_external_interrupt(u32 info) -{ - info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; - return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); -} - static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1357,6 +1351,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) */ svm_clear_current_vmcb(svm->vmcb); + svm_leave_nested(vcpu); svm_free_nested(svm); __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); @@ -2475,9 +2470,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) msr->data = 0; switch (msr->index) { - case MSR_F10H_DECFG: - if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) - msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; + case MSR_AMD64_DE_CFG: + if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) + msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; break; case MSR_IA32_PERF_CAPABILITIES: return 0; @@ -2584,7 +2579,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0x1E; } break; - case MSR_F10H_DECFG: + case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; default: @@ -2764,7 +2759,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; - case MSR_F10H_DECFG: { + case MSR_AMD64_DE_CFG: { struct kvm_msr_entry msr_entry; msr_entry.index = msr->index; @@ -3115,15 +3110,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) return 0; } - if (is_external_interrupt(svm->vmcb->control.exit_int_info) && - exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && - exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && - exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) - printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " - "exit_code 0x%x\n", - __func__, svm->vmcb->control.exit_int_info, - exit_code); - if (exit_fastpath != EXIT_FASTPATH_NONE) return 1; @@ -3494,8 +3480,14 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { - if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && - to_svm(vcpu)->vmcb->control.exit_info_1) + struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; + + /* + * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM + * can't read guest memory (dereference memslots) to decode the WRMSR. + */ + if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 && + nrips && control->next_rip) return handle_fastpath_set_msr_irqoff(vcpu); return EXIT_FASTPATH_NONE; @@ -4325,6 +4317,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .guest_memory_reclaimed = sev_guest_memory_reclaimed, .can_emulate_instruction = svm_can_emulate_instruction, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 10aba1dd264ed..f62d13fc6e01f 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +void sev_guest_memory_reclaimed(struct kvm *kvm); + void pre_sev_run(struct vcpu_svm *svm, int cpu); int __init sev_hardware_setup(void); void sev_hardware_teardown(void); diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index a2835d784f4be..3d4988ea8b572 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -304,25 +304,29 @@ TRACE_EVENT(kvm_inj_virq, * Tracepoint for kvm interrupt injection: */ TRACE_EVENT(kvm_inj_exception, - TP_PROTO(unsigned exception, bool has_error, unsigned error_code), - TP_ARGS(exception, has_error, error_code), + TP_PROTO(unsigned exception, bool has_error, unsigned error_code, + bool reinjected), + TP_ARGS(exception, has_error, error_code, reinjected), TP_STRUCT__entry( __field( u8, exception ) __field( u8, has_error ) __field( u32, error_code ) + __field( bool, reinjected ) ), TP_fast_assign( __entry->exception = exception; __entry->has_error = has_error; __entry->error_code = error_code; + __entry->reinjected = reinjected; ), - TP_printk("%s (0x%x)", + TP_printk("%s (0x%x)%s", __print_symbolic(__entry->exception, kvm_trace_sym_exc), /* FIXME: don't print error_code if not present */ - __entry->has_error ? __entry->error_code : 0) + __entry->has_error ? __entry->error_code : 0, + __entry->reinjected ? " [reinjected]" : "") ); /* diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 6c4277e99d586..91371b01eae0c 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2232,7 +2232,8 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, } } -static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, + struct vmcs12 *vmcs12) { u32 exec_control, vmcs12_exec_ctrl; u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); @@ -2243,7 +2244,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) /* * PIN CONTROLS */ - exec_control = vmx_pin_based_exec_ctrl(vmx); + exec_control = __pin_controls_get(vmcs01); exec_control |= (vmcs12->pin_based_vm_exec_control & ~PIN_BASED_VMX_PREEMPTION_TIMER); @@ -2258,7 +2259,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) /* * EXEC CONTROLS */ - exec_control = vmx_exec_control(vmx); /* L0's desires */ + exec_control = __exec_controls_get(vmcs01); /* L0's desires */ exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; exec_control &= ~CPU_BASED_TPR_SHADOW; @@ -2295,17 +2296,20 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) * SECONDARY EXEC CONTROLS */ if (cpu_has_secondary_exec_ctrls()) { - exec_control = vmx->secondary_exec_control; + exec_control = __secondary_exec_controls_get(vmcs01); /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_ENABLE_RDTSCP | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_ENABLE_VMFUNC); + SECONDARY_EXEC_ENABLE_VMFUNC | + SECONDARY_EXEC_DESC); + if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & @@ -2341,9 +2345,15 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate * on the related bits (if supported by the CPU) in the hope that * we can avoid VMWrites during vmx_set_efer(). + * + * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is + * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to + * do the same for L2. */ - exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & - ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; + exec_control = __vm_entry_controls_get(vmcs01); + exec_control |= (vmcs12->vm_entry_controls & + ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); + exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER); if (cpu_has_load_ia32_efer()) { if (guest_efer & EFER_LMA) exec_control |= VM_ENTRY_IA32E_MODE; @@ -2359,9 +2369,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits may be modified by vmx_set_efer() in prepare_vmcs02(). */ - exec_control = vmx_vmexit_ctrl(); + exec_control = __vm_exit_controls_get(vmcs01); if (cpu_has_load_ia32_efer() && guest_efer != host_efer) exec_control |= VM_EXIT_LOAD_IA32_EFER; + else + exec_control &= ~VM_EXIT_LOAD_IA32_EFER; vm_exit_controls_set(vmx, exec_control); /* @@ -3370,7 +3382,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); - prepare_vmcs02_early(vmx, vmcs12); + prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); if (from_vmentry) { if (unlikely(!nested_get_vmcs12_pages(vcpu))) { @@ -3776,7 +3788,16 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, u32 intr_info = nr | INTR_INFO_VALID_MASK; if (vcpu->arch.exception.has_error_code) { - vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; + /* + * Intel CPUs do not generate error codes with bits 31:16 set, + * and more importantly VMX disallows setting bits 31:16 in the + * injected error code for VM-Entry. Drop the bits to mimic + * hardware and avoid inducing failure on nested VM-Entry if L1 + * chooses to inject the exception back to L2. AMD CPUs _do_ + * generate "full" 32-bit error codes, so KVM allows userspace + * to inject exception error codes with bits 31:16 set. + */ + vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code; intr_info |= INTR_INFO_DELIVER_CODE_MASK; } @@ -4183,14 +4204,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); } - - /* - * Drop what we picked up for L2 via vmx_complete_interrupts. It is - * preserved above and would only end up incorrectly in L1. - */ - vcpu->arch.nmi_injected = false; - kvm_clear_exception_queue(vcpu); - kvm_clear_interrupt_queue(vcpu); } /* @@ -4530,8 +4543,30 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, WARN_ON_ONCE(nested_early_check); } + /* + * Drop events/exceptions that were queued for re-injection to L2 + * (picked up via vmx_complete_interrupts()), as well as exceptions + * that were pending for L2. Note, this must NOT be hoisted above + * prepare_vmcs12(), events/exceptions queued for re-injection need to + * be captured in vmcs12 (see vmcs12_save_pending_event()). + */ + vcpu->arch.nmi_injected = false; + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + /* + * If IBRS is advertised to the vCPU, KVM must flush the indirect + * branch predictors when transitioning from L2 to L1, as L1 expects + * hardware (KVM in this case) to provide separate predictor modes. + * Bare metal isolates VMX root (host) from VMX non-root (guest), but + * doesn't isolate different VMCSs, i.e. in this case, doesn't provide + * separate modes for L2 vs L1. + */ + if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + indirect_branch_prediction_barrier(); + /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); @@ -4877,24 +4912,35 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; /* - * Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks - * that have higher priority than VM-Exit (see Intel SDM's pseudocode - * for VMXON), as KVM must load valid CR0/CR4 values into hardware while - * running the guest, i.e. KVM needs to check the _guest_ values. + * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter + * the guest and so cannot rely on hardware to perform the check, + * which has higher priority than VM-Exit (see Intel SDM's pseudocode + * for VMXON). * - * Rely on hardware for the other two pre-VM-Exit checks, !VM86 and - * !COMPATIBILITY modes. KVM may run the guest in VM86 to emulate Real - * Mode, but KVM will never take the guest out of those modes. + * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 + * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't + * force any of the relevant guest state. For a restricted guest, KVM + * does force CR0.PE=1, but only to also force VM86 in order to emulate + * Real Mode, and so there's no need to check CR0.PE manually. */ - if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || - !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { + if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* - * CPL=0 and all other checks that are lower priority than VM-Exit must - * be checked manually. + * The CPL is checked for "not in VMX operation" and for "in VMX root", + * and has higher priority than the VM-Fail due to being post-VMXON, + * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, + * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits + * from L2 to L1, i.e. there's no need to check for the vCPU being in + * VMX non-root. + * + * Forwarding the VM-Exit unconditionally, i.e. without performing the + * #UD checks (see above), is functionally ok because KVM doesn't allow + * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's + * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are + * missed by hardware due to shadowing CR0 and/or CR4. */ if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); @@ -4904,6 +4950,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu) if (vmx->nested.vmxon) return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); + /* + * Invalid CR0/CR4 generates #GP. These checks are performed if and + * only if the vCPU isn't already in VMX operation, i.e. effectively + * have lower priority than the VM-Fail above. + */ + if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || + !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b33d0f283d4f8..c37cbd3fdd852 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1431,8 +1431,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, /* * No indirect branch prediction barrier needed when switching - * the active VMCS within a guest, e.g. on nested VM-Enter. - * The L1 VMM can protect itself with retpolines, IBPB or IBRS. + * the active VMCS within a vCPU, unless IBRS is advertised to + * the vCPU. To minimize the number of IBPBs executed, KVM + * performs IBPB on nested VM-Exit (a single nested transition + * may switch the active VMCS multiple times). */ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) indirect_branch_prediction_barrier(); @@ -1737,7 +1739,17 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) kvm_deliver_exception_payload(vcpu); if (has_error_code) { - vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); + /* + * Despite the error code being architecturally defined as 32 + * bits, and the VMCS field being 32 bits, Intel CPUs and thus + * VMX don't actually supporting setting bits 31:16. Hardware + * will (should) never provide a bogus error code, but AMD CPUs + * do generate error codes with bits 31:16 set, and so KVM's + * ABI lets userspace shove in arbitrary 32-bit values. Drop + * the upper bits to avoid VM-Fail, losing information that + * does't really exist is preferable to killing the VM. + */ + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; } @@ -3322,18 +3334,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; - if (var->unusable || !var->present) - ar = 1 << 16; - else { - ar = var->type & 15; - ar |= (var->s & 1) << 4; - ar |= (var->dpl & 3) << 5; - ar |= (var->present & 1) << 7; - ar |= (var->avl & 1) << 12; - ar |= (var->l & 1) << 13; - ar |= (var->db & 1) << 14; - ar |= (var->g & 1) << 15; - } + ar = var->type & 15; + ar |= (var->s & 1) << 4; + ar |= (var->dpl & 3) << 5; + ar |= (var->present & 1) << 7; + ar |= (var->avl & 1) << 12; + ar |= (var->l & 1) << 13; + ar |= (var->db & 1) << 14; + ar |= (var->g & 1) << 15; + ar |= (var->unusable || !var->present) << 16; return ar; } diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 24903f05c204b..ed4b6da83aa87 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -386,9 +386,13 @@ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ vmx->loaded_vmcs->controls_shadow.lname = val; \ } \ } \ +static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \ +{ \ + return vmcs->controls_shadow.lname; \ +} \ static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ { \ - return vmx->loaded_vmcs->controls_shadow.lname; \ + return __##lname##_controls_get(vmx->loaded_vmcs); \ } \ static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ { \ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c5a08ec348e6f..0ccc8d1b972c9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -459,6 +459,7 @@ static int exception_class(int vector) #define EXCPT_TRAP 1 #define EXCPT_ABORT 2 #define EXCPT_INTERRUPT 3 +#define EXCPT_DB 4 static int exception_type(int vector) { @@ -469,8 +470,14 @@ static int exception_type(int vector) mask = 1 << vector; - /* #DB is trap, as instruction watchpoints are handled elsewhere */ - if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) + /* + * #DBs can be trap-like or fault-like, the caller must check other CPU + * state, e.g. DR6, to determine whether a #DB is a trap or fault. + */ + if (mask & (1 << DB_VECTOR)) + return EXCPT_DB; + + if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR))) return EXCPT_TRAP; if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) @@ -1355,7 +1362,7 @@ static const u32 msr_based_features_all[] = { MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_VMFUNC, - MSR_F10H_DECFG, + MSR_AMD64_DE_CFG, MSR_IA32_UCODE_REV, MSR_IA32_ARCH_CAPABILITIES, MSR_IA32_PERF_CAPABILITIES, @@ -4448,12 +4455,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, { unsigned long val; + memset(dbgregs, 0, sizeof(*dbgregs)); memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); kvm_get_dr(vcpu, 6, &val); dbgregs->dr6 = val; dbgregs->dr7 = vcpu->arch.dr7; - dbgregs->flags = 0; - memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, @@ -5353,6 +5359,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, r = 0; break; case KVM_CAP_X86_USER_SPACE_MSR: + r = -EINVAL; + if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | + KVM_MSR_EXIT_REASON_UNKNOWN | + KVM_MSR_EXIT_REASON_FILTER)) + break; kvm->arch.user_space_msr_mask = cap->args[0]; r = 0; break; @@ -5434,23 +5445,22 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, return r; } -static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) +static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, + struct kvm_msr_filter *filter) { - struct kvm_msr_filter __user *user_msr_filter = argp; struct kvm_x86_msr_filter *new_filter, *old_filter; - struct kvm_msr_filter filter; bool default_allow; bool empty = true; int r = 0; u32 i; - if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) - return -EFAULT; + if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) + return -EINVAL; - for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) - empty &= !filter.ranges[i].nmsrs; + for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) + empty &= !filter->ranges[i].nmsrs; - default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY); + default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); if (empty && !default_allow) return -EINVAL; @@ -5458,8 +5468,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) if (!new_filter) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { - r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); + for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { + r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); if (r) { kvm_free_msr_filter(new_filter); return r; @@ -5482,6 +5492,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) return 0; } +#ifdef CONFIG_KVM_COMPAT +/* for KVM_X86_SET_MSR_FILTER */ +struct kvm_msr_filter_range_compat { + __u32 flags; + __u32 nmsrs; + __u32 base; + __u32 bitmap; +}; + +struct kvm_msr_filter_compat { + __u32 flags; + struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; +}; + +#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) + +long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct kvm *kvm = filp->private_data; + long r = -ENOTTY; + + switch (ioctl) { + case KVM_X86_SET_MSR_FILTER_COMPAT: { + struct kvm_msr_filter __user *user_msr_filter = argp; + struct kvm_msr_filter_compat filter_compat; + struct kvm_msr_filter filter; + int i; + + if (copy_from_user(&filter_compat, user_msr_filter, + sizeof(filter_compat))) + return -EFAULT; + + filter.flags = filter_compat.flags; + for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { + struct kvm_msr_filter_range_compat *cr; + + cr = &filter_compat.ranges[i]; + filter.ranges[i] = (struct kvm_msr_filter_range) { + .flags = cr->flags, + .nmsrs = cr->nmsrs, + .base = cr->base, + .bitmap = (__u8 *)(ulong)cr->bitmap, + }; + } + + r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); + break; + } + } + + return r; +} +#endif + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -5788,9 +5854,16 @@ long kvm_arch_vm_ioctl(struct file *filp, case KVM_SET_PMU_EVENT_FILTER: r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); break; - case KVM_X86_SET_MSR_FILTER: - r = kvm_vm_ioctl_set_msr_filter(kvm, argp); + case KVM_X86_SET_MSR_FILTER: { + struct kvm_msr_filter __user *user_msr_filter = argp; + struct kvm_msr_filter filter; + + if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) + return -EFAULT; + + r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; + } default: r = -ENOTTY; } @@ -7461,7 +7534,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, write_fault_to_spt, emulation_type)) return 1; - if (ctxt->have_exception) { + + if (ctxt->have_exception && + !(emulation_type & EMULTYPE_SKIP)) { /* * #UD should result in just EMULATION_FAILED, and trap-like * exception should not be encountered during decode. @@ -7560,6 +7635,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; + + /* + * Note, EXCPT_DB is assumed to be fault-like as the emulator + * only supports code breakpoints and general detect #DB, both + * of which are fault-like. + */ if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) { kvm_rip_write(vcpu, ctxt->eip); @@ -8347,6 +8428,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) static void kvm_inject_exception(struct kvm_vcpu *vcpu) { + trace_kvm_inj_exception(vcpu->arch.exception.nr, + vcpu->arch.exception.has_error_code, + vcpu->arch.exception.error_code, + vcpu->arch.exception.injected); + if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) vcpu->arch.exception.error_code = false; kvm_x86_ops.queue_exception(vcpu); @@ -8404,13 +8490,16 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit /* try to inject new event if pending */ if (vcpu->arch.exception.pending) { - trace_kvm_inj_exception(vcpu->arch.exception.nr, - vcpu->arch.exception.has_error_code, - vcpu->arch.exception.error_code); - - vcpu->arch.exception.pending = false; - vcpu->arch.exception.injected = true; - + /* + * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS + * value pushed on the stack. Trap-like exception and all #DBs + * leave RF as-is (KVM follows Intel's behavior in this regard; + * AMD states that code breakpoint #DBs excplitly clear RF=0). + * + * Note, most versions of Intel's SDM and AMD's APM incorrectly + * describe the behavior of General Detect #DBs, which are + * fault-like. They do _not_ set RF, a la code breakpoints. + */ if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | X86_EFLAGS_RF); @@ -8424,6 +8513,10 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit } kvm_inject_exception(vcpu); + + vcpu->arch.exception.pending = false; + vcpu->arch.exception.injected = true; + can_inject = false; } @@ -8875,6 +8968,12 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); } +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ + if (kvm_x86_ops.guest_memory_reclaimed) + kvm_x86_ops.guest_memory_reclaimed(kvm); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S index a1f9416bf67a5..6ff2f56cb0f71 100644 --- a/arch/x86/lib/iomap_copy_64.S +++ b/arch/x86/lib/iomap_copy_64.S @@ -10,6 +10,6 @@ */ SYM_FUNC_START(__iowrite32_copy) movl %edx,%ecx - rep movsd + rep movsl RET SYM_FUNC_END(__iowrite32_copy) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 91e61dbba3e0c..88cb537ccdea1 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -216,9 +216,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; - phys_addr &= PHYSICAL_PAGE_MASK; + phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; + /* + * Mask out any bits not part of the actual physical + * address, like memory encryption bits. + */ + phys_addr &= PHYSICAL_PAGE_MASK; + retval = memtype_reserve(phys_addr, (u64)phys_addr + size, pcm, &new_pcm); if (retval) { diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index d023c85e3c536..4e4e76ecd3ecd 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -516,15 +516,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c) static void pm_save_spec_msr(void) { - u32 spec_msr_id[] = { - MSR_IA32_SPEC_CTRL, - MSR_IA32_TSX_CTRL, - MSR_TSX_FORCE_ABORT, - MSR_IA32_MCU_OPT_CTRL, - MSR_AMD64_LS_CFG, + struct msr_enumeration { + u32 msr_no; + u32 feature; + } msr_enum[] = { + { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL }, + { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL }, + { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT }, + { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL }, + { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD }, + { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC }, }; + int i; - msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id)); + for (i = 0; i < ARRAY_SIZE(msr_enum); i++) { + if (boot_cpu_has(msr_enum[i].feature)) + msr_build_context(&msr_enum[i].msr_no, 1); + } } static int pm_check_save_msr(void) diff --git a/arch/x86/um/shared/sysdep/syscalls_32.h b/arch/x86/um/shared/sysdep/syscalls_32.h index 68fd2cf526fd7..f6e9f84397e79 100644 --- a/arch/x86/um/shared/sysdep/syscalls_32.h +++ b/arch/x86/um/shared/sysdep/syscalls_32.h @@ -6,10 +6,9 @@ #include #include -typedef long syscall_handler_t(struct pt_regs); +typedef long syscall_handler_t(struct syscall_args); extern syscall_handler_t *sys_call_table[]; #define EXECUTE_SYSCALL(syscall, regs) \ - ((long (*)(struct syscall_args)) \ - (*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) + ((*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c index ac8eee093f9cd..66162eafd8e8f 100644 --- a/arch/x86/um/tls_32.c +++ b/arch/x86/um/tls_32.c @@ -65,9 +65,6 @@ static int get_free_idx(struct task_struct* task) struct thread_struct *t = &task->thread; int idx; - if (!t->arch.tls_array) - return GDT_ENTRY_TLS_MIN; - for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) if (!t->arch.tls_array[idx].present) return idx + GDT_ENTRY_TLS_MIN; @@ -240,9 +237,6 @@ static int get_tls_entry(struct task_struct *task, struct user_desc *info, { struct thread_struct *t = &task->thread; - if (!t->arch.tls_array) - goto clear; - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile index 5943387e3f357..5ca366e15c767 100644 --- a/arch/x86/um/vdso/Makefile +++ b/arch/x86/um/vdso/Makefile @@ -62,7 +62,7 @@ quiet_cmd_vdso = VDSO $@ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' -VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv +VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack GCOV_PROFILE := n # diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 804c65d2b95f3..815030b7f6fa8 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -768,6 +768,7 @@ static void xen_load_idt(const struct desc_ptr *desc) { static DEFINE_SPINLOCK(lock); static struct trap_info traps[257]; + static const struct trap_info zero = { }; unsigned out; trace_xen_cpu_load_idt(desc); @@ -777,7 +778,7 @@ static void xen_load_idt(const struct desc_ptr *desc) memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); out = xen_convert_trap_info(desc, traps, false); - memset(&traps[out], 0, sizeof(traps[0])); + traps[out] = zero; xen_mc_flush(); if (HYPERVISOR_set_trap_table(traps)) diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c1b2f764b29a2..cdec892b28e2e 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) void xen_smp_intr_free(unsigned int cpu) { + kfree(per_cpu(xen_resched_irq, cpu).name); + per_cpu(xen_resched_irq, cpu).name = NULL; if (per_cpu(xen_resched_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); per_cpu(xen_resched_irq, cpu).irq = -1; - kfree(per_cpu(xen_resched_irq, cpu).name); - per_cpu(xen_resched_irq, cpu).name = NULL; } + kfree(per_cpu(xen_callfunc_irq, cpu).name); + per_cpu(xen_callfunc_irq, cpu).name = NULL; if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); per_cpu(xen_callfunc_irq, cpu).irq = -1; - kfree(per_cpu(xen_callfunc_irq, cpu).name); - per_cpu(xen_callfunc_irq, cpu).name = NULL; } + kfree(per_cpu(xen_debug_irq, cpu).name); + per_cpu(xen_debug_irq, cpu).name = NULL; if (per_cpu(xen_debug_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); per_cpu(xen_debug_irq, cpu).irq = -1; - kfree(per_cpu(xen_debug_irq, cpu).name); - per_cpu(xen_debug_irq, cpu).name = NULL; } + kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); + per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, NULL); per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; - kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); - per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; } } @@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu) char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); + per_cpu(xen_resched_irq, cpu).name = resched_name; rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, @@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_resched_irq, cpu).irq = rc; - per_cpu(xen_resched_irq, cpu).name = resched_name; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); + per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, @@ -86,10 +87,10 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_callfunc_irq, cpu).irq = rc; - per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; if (!xen_fifo_events) { debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); + per_cpu(xen_debug_irq, cpu).name = debug_name; rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, IRQF_PERCPU | IRQF_NOBALANCING, @@ -97,10 +98,10 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_debug_irq, cpu).irq = rc; - per_cpu(xen_debug_irq, cpu).name = debug_name; } callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, @@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; - per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; return 0; diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 35b6d15d874d0..64873937cd1d7 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -98,18 +98,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void) void xen_smp_intr_free_pv(unsigned int cpu) { + kfree(per_cpu(xen_irq_work, cpu).name); + per_cpu(xen_irq_work, cpu).name = NULL; if (per_cpu(xen_irq_work, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); per_cpu(xen_irq_work, cpu).irq = -1; - kfree(per_cpu(xen_irq_work, cpu).name); - per_cpu(xen_irq_work, cpu).name = NULL; } + kfree(per_cpu(xen_pmu_irq, cpu).name); + per_cpu(xen_pmu_irq, cpu).name = NULL; if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); per_cpu(xen_pmu_irq, cpu).irq = -1; - kfree(per_cpu(xen_pmu_irq, cpu).name); - per_cpu(xen_pmu_irq, cpu).name = NULL; } } @@ -119,6 +119,7 @@ int xen_smp_intr_init_pv(unsigned int cpu) char *callfunc_name, *pmu_name; callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); + per_cpu(xen_irq_work, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, xen_irq_work_interrupt, @@ -128,10 +129,10 @@ int xen_smp_intr_init_pv(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_irq_work, cpu).irq = rc; - per_cpu(xen_irq_work, cpu).name = callfunc_name; if (is_xen_pmu) { pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); + per_cpu(xen_pmu_irq, cpu).name = pmu_name; rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, xen_pmu_irq_handler, IRQF_PERCPU|IRQF_NOBALANCING, @@ -139,7 +140,6 @@ int xen_smp_intr_init_pv(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_pmu_irq, cpu).irq = rc; - per_cpu(xen_pmu_irq, cpu).name = pmu_name; } return 0; diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 043c73dfd2c98..5c6fc16e4b925 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu) cpu, per_cpu(lock_kicker_irq, cpu)); name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); + per_cpu(irq_name, cpu) = name; irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, dummy_handler, @@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu) if (irq >= 0) { disable_irq(irq); /* make sure it's never delivered */ per_cpu(lock_kicker_irq, cpu) = irq; - per_cpu(irq_name, cpu) = name; } printk("cpu %d spinlock event irq %d\n", cpu, irq); @@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu) if (!xen_pvspin) return; + kfree(per_cpu(irq_name, cpu)); + per_cpu(irq_name, cpu) = NULL; /* * When booting the kernel with 'mitigations=auto,nosmt', the secondary * CPUs are not activated, and lock_kicker_irq is not initialized. @@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu) unbind_from_irqhandler(irq, NULL); per_cpu(lock_kicker_irq, cpu) = -1; - kfree(per_cpu(irq_name, cpu)); - per_cpu(irq_name, cpu) = NULL; } PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 6acbbe0d87d3d..a312333a9addc 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -111,18 +111,21 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ #define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */ -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_DB_DISABLED 8 /* debug trap disabled for syscall */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ +#define TIF_MEMDIE 11 /* is terminating due to OOM killer */ #define _TIF_SYSCALL_TRACE (1<thread.sp = (unsigned long)childregs; - if (!(p->flags & PF_KTHREAD)) { + if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { struct pt_regs *regs = current_pt_regs(); unsigned long usp = usp_thread_fn ? usp_thread_fn : regs->areg[1]; diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 1cb230fafdf2b..f2b00f43cf236 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -498,7 +498,8 @@ static void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index efc3a29cde803..129f23c0ab553 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -545,5 +545,5 @@ void die(const char * str, struct pt_regs * regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(err); + make_task_dead(err); } diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index be6733558b831..badb90352bf33 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -611,6 +611,10 @@ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) struct bfq_group *bfqg; while (blkg) { + if (!blkg->online) { + blkg = blkg->parent; + continue; + } bfqg = blkg_to_bfqg(blkg); if (bfqg->online) { bio_associate_blkg_from_css(bio, &blkg->blkcg->css); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 592d32a46c4c3..7c4b8d0635ebd 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -421,6 +421,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, */ void bfq_schedule_dispatch(struct bfq_data *bfqd) { + lockdep_assert_held(&bfqd->lock); + if (bfqd->queued != 0) { bfq_log(bfqd, "schedule dispatch"); blk_mq_run_hw_queues(bfqd->queue, true); @@ -6269,8 +6271,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_bfqq_expire(bfqd, bfqq, true, reason); schedule_dispatch: - spin_unlock_irqrestore(&bfqd->lock, flags); bfq_schedule_dispatch(bfqd); + spin_unlock_irqrestore(&bfqd->lock, flags); } /* diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 484c6b2dd264e..c623632c1cda0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1370,6 +1370,10 @@ int blkcg_activate_policy(struct request_queue *q, list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) pol->pd_init_fn(blkg->pd[pol->plid]); + if (pol->pd_online_fn) + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) + pol->pd_online_fn(blkg->pd[pol->plid]); + __set_bit(pol->plid, q->blkcg_pols); ret = 0; diff --git a/block/blk-core.c b/block/blk-core.c index 26664f2a139eb..9afb79b322fb0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -700,9 +700,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return false; - - WARN_ONCE(1, - "Trying to write to read-only block-device %s (partno %d)\n", + pr_warn("Trying to write to read-only block-device %s (partno %d)\n", bio_devname(bio, b), part->partno); /* Older lvm-tools actually trigger this */ return false; diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 7b52e7657b2d1..f0bc3398f3ed2 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -242,7 +242,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; - int i, ret; + int i, j, ret; if (!hctx->nr_ctx) return 0; @@ -254,9 +254,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) hctx_for_each_ctx(hctx, ctx, i) { ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); if (ret) - break; + goto out; } + return 0; +out: + hctx_for_each_ctx(hctx, ctx, j) { + if (j < i) + kobject_del(&ctx->kobj); + } + kobject_del(&hctx->kobj); return ret; } diff --git a/block/blk-mq.c b/block/blk-mq.c index cfc039fabf8ce..e37ba792902af 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -105,7 +105,8 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, { struct mq_inflight *mi = priv; - if (rq->part == mi->part && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) + if ((!mi->part->partno || rq->part == mi->part) && + blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) mi->inflight[rq_data_dir(rq)]++; return true; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index c53a254171a29..c526fdd0a7b90 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -944,7 +944,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, u64 bps_limit, unsigned long *wait) { bool rw = bio_data_dir(bio); - u64 bytes_allowed, extra_bytes, tmp; + u64 bytes_allowed, extra_bytes; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned int bio_size = throtl_bio_data_size(bio); @@ -961,10 +961,8 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, jiffy_elapsed_rnd = tg->td->throtl_slice; jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); - - tmp = bps_limit * jiffy_elapsed_rnd; - do_div(tmp, HZ); - bytes_allowed = tmp; + bytes_allowed = mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed_rnd, + (u64)HZ); if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { if (wait) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 35d81b5deae1c..6f63920f073c6 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -838,9 +838,11 @@ int wbt_init(struct request_queue *q) rwb->last_comp = rwb->last_issue = jiffies; rwb->win_nsec = RWB_WINDOW_NSEC; rwb->enable_state = WBT_STATE_ON_DEFAULT; - rwb->wc = 1; + rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags); rwb->rq_depth.default_depth = RWB_DEF_DEPTH; - wbt_update_limits(rwb); + rwb->min_lat_nsec = wbt_default_latency_nsec(q); + + wbt_queue_depth_changed(&rwb->rqos); /* * Assign rwb and add the stats callback. @@ -848,10 +850,5 @@ int wbt_init(struct request_queue *q) rq_qos_add(q, &rwb->rqos); blk_stat_add_callback(q, rwb->cb); - rwb->min_lat_nsec = wbt_default_latency_nsec(q); - - wbt_queue_depth_changed(&rwb->rqos); - wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); - return 0; } diff --git a/block/partitions/core.c b/block/partitions/core.c index a02e224115943..e3d61ec4a5a64 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -329,6 +329,7 @@ void delete_partition(struct hd_struct *part) struct gendisk *disk = part_to_disk(part); struct disk_part_tbl *ptbl = rcu_dereference_protected(disk->part_tbl, 1); + struct block_device *bdev; /* * ->part_tbl is referenced in this part's release handler, so @@ -346,6 +347,12 @@ void delete_partition(struct hd_struct *part) * "in-use" until we really free the gendisk. */ blk_invalidate_devt(part_devt(part)); + + bdev = bdget_part(part); + if (bdev) { + remove_inode_hash(bdev->bd_inode); + bdput(bdev); + } percpu_ref_kill(&part->ref); } diff --git a/block/sed-opal.c b/block/sed-opal.c index daafadbb88cae..0ac5a4f3f2261 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -88,8 +88,8 @@ struct opal_dev { u64 lowest_lba; size_t pos; - u8 cmd[IO_BUFFER_LENGTH]; - u8 resp[IO_BUFFER_LENGTH]; + u8 *cmd; + u8 *resp; struct parsed_resp parsed; size_t prev_d_len; @@ -2134,6 +2134,8 @@ void free_opal_dev(struct opal_dev *dev) return; clean_opal_dev(dev); + kfree(dev->resp); + kfree(dev->cmd); kfree(dev); } EXPORT_SYMBOL(free_opal_dev); @@ -2146,17 +2148,39 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv) if (!dev) return NULL; + /* + * Presumably DMA-able buffers must be cache-aligned. Kmalloc makes + * sure the allocated buffer is DMA-safe in that regard. + */ + dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL); + if (!dev->cmd) + goto err_free_dev; + + dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL); + if (!dev->resp) + goto err_free_cmd; + INIT_LIST_HEAD(&dev->unlk_lst); mutex_init(&dev->dev_lock); dev->data = data; dev->send_recv = send_recv; if (check_opal_support(dev) != 0) { pr_debug("Opal is not supported on this device\n"); - kfree(dev); - return NULL; + goto err_free_resp; } return dev; + +err_free_resp: + kfree(dev->resp); + +err_free_cmd: + kfree(dev->cmd); + +err_free_dev: + kfree(dev); + + return NULL; } EXPORT_SYMBOL(init_opal_dev); diff --git a/crypto/akcipher.c b/crypto/akcipher.c index f866085c8a4a3..ab975a420e1e9 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req) return -ENOSYS; } +static int akcipher_default_set_key(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + return -ENOSYS; +} + int crypto_register_akcipher(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; @@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg) alg->encrypt = akcipher_default_op; if (!alg->decrypt) alg->decrypt = akcipher_default_op; + if (!alg->set_priv_key) + alg->set_priv_key = akcipher_default_set_key; akcipher_prepare_alg(alg); return crypto_register_alg(base); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 668095eca0faf..ca3a40fc7da91 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -68,11 +68,12 @@ struct aead_instance_ctx { struct cryptd_skcipher_ctx { refcount_t refcnt; - struct crypto_sync_skcipher *child; + struct crypto_skcipher *child; }; struct cryptd_skcipher_request_ctx { crypto_completion_t complete; + struct skcipher_request req; }; struct cryptd_hash_ctx { @@ -227,13 +228,13 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_sync_skcipher *child = ctx->child; + struct crypto_skcipher *child = ctx->child; - crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(child, - crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(child, key, keylen); + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, + crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + return crypto_skcipher_setkey(child, key, keylen); } static void cryptd_skcipher_complete(struct skcipher_request *req, int err) @@ -258,13 +259,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base, struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + struct crypto_skcipher *child = ctx->child; if (unlikely(err == -EINPROGRESS)) goto out; - skcipher_request_set_sync_tfm(subreq, child); + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, @@ -286,13 +287,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base, struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + struct crypto_skcipher *child = ctx->child; if (unlikely(err == -EINPROGRESS)) goto out; - skcipher_request_set_sync_tfm(subreq, child); + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, @@ -343,9 +344,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctx->child = (struct crypto_sync_skcipher *)cipher; + ctx->child = cipher; crypto_skcipher_set_reqsize( - tfm, sizeof(struct cryptd_skcipher_request_ctx)); + tfm, sizeof(struct cryptd_skcipher_request_ctx) + + crypto_skcipher_reqsize(cipher)); return 0; } @@ -353,7 +355,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->child); + crypto_free_skcipher(ctx->child); } static void cryptd_skcipher_free(struct skcipher_instance *inst) @@ -931,7 +933,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - return &ctx->child->base; + return ctx->child; } EXPORT_SYMBOL_GPL(cryptd_skcipher_child); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 8609174e036e8..7972d2784b3b5 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1282,15 +1282,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, goto out_free_tfm; } - - for (i = 0; i < num_mb; ++i) - if (testmgr_alloc_buf(data[i].xbuf)) { - while (i--) - testmgr_free_buf(data[i].xbuf); - goto out_free_tfm; - } - - for (i = 0; i < num_mb; ++i) { data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c index 48019660a0967..63c5444f0f1ae 100644 --- a/drivers/accessibility/speakup/main.c +++ b/drivers/accessibility/speakup/main.c @@ -1780,7 +1780,7 @@ static void speakup_con_update(struct vc_data *vc) { unsigned long flags; - if (!speakup_console[vc->vc_num] || spk_parked) + if (!speakup_console[vc->vc_num] || spk_parked || !synth) return; if (!spin_trylock_irqsave(&speakup_info.spinlock, flags)) /* Speakup output, discard */ diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 72f1fb77abcd0..e648158368a7d 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, int cpu = mce->extcpu; struct acpi_hest_generic_status *estatus, *tmp; struct acpi_hest_generic_data *gdata; - const guid_t *fru_id = &guid_null; - char *fru_text = ""; + const guid_t *fru_id; + char *fru_text; guid_t *sec_type; static u32 err_seq; @@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, /* log event via trace */ err_seq++; - gdata = (struct acpi_hest_generic_data *)(tmp + 1); - if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) - fru_id = (guid_t *)gdata->fru_id; - if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) - fru_text = gdata->fru_text; - sec_type = (guid_t *)gdata->section_type; - if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { - struct cper_sec_mem_err *mem = (void *)(gdata + 1); - if (gdata->error_data_length >= sizeof(*mem)) - trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, - (u8)gdata->error_severity); + apei_estatus_for_each_section(tmp, gdata) { + if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) + fru_id = (guid_t *)gdata->fru_id; + else + fru_id = &guid_null; + if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) + fru_text = gdata->fru_text; + else + fru_text = ""; + sec_type = (guid_t *)gdata->section_type; + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem = (void *)(gdata + 1); + + if (gdata->error_data_length >= sizeof(*mem)) + trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, + (u8)gdata->error_severity); + } } out: diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index eb04b2f828eef..cf6c9ffe04a2d 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -498,6 +498,22 @@ static const struct dmi_system_id video_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"), }, }, + { + .callback = video_disable_backlight_sysfs_if, + .ident = "Toshiba Satellite Z830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Z830"), + }, + }, + { + .callback = video_disable_backlight_sysfs_if, + .ident = "Toshiba Portege Z830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE Z830"), + }, + }, /* * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set * but the IDs actually follow the Device ID Scheme. diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index cf67caff878ab..97971c79c5f56 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; - goto cleanup; + goto pop_walk_state; } info->parameters = &this_walk_state->operands[0]; @@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ACPI_FREE(info); if (ACPI_FAILURE(status)) { - goto cleanup; + goto pop_walk_state; } next_walk_state->method_nesting_depth = @@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, return_ACPI_STATUS(status); +pop_walk_state: + + /* On error, pop the walk state to be deleted from thread */ + + acpi_ds_pop_walk_state(thread); + cleanup: /* On error, we must terminate the method properly */ diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index 41bdd0278dd8e..9a7cc679e5447 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, status = acpi_ut_walk_package_tree(source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); - if (ACPI_FAILURE(status)) { - - /* On failure, delete the destination package object */ - - acpi_ut_remove_reference(dest_obj); - } - return_ACPI_STATUS(status); } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 0c8330ed1ffd5..9bdb5bd5fda63 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) clear_fixmap(fixmap_idx); } -int ghes_estatus_pool_init(int num_ghes) +int ghes_estatus_pool_init(unsigned int num_ghes) { unsigned long addr, len; int rc; @@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) ghes_estatus_cache_add(generic, estatus); } - if (task_work_pending && current->mm != &init_mm) { + if (task_work_pending && current->mm) { estatus_node->task_work.func = ghes_kick_task_work; estatus_node->task_work_cpu = smp_processor_id(); ret = task_work_add(current, &estatus_node->task_work, diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c index 137a5dd880c26..26453a945da44 100644 --- a/drivers/acpi/numa/hmat.c +++ b/drivers/acpi/numa/hmat.c @@ -563,17 +563,26 @@ static int initiator_cmp(void *priv, const struct list_head *a, { struct memory_initiator *ia; struct memory_initiator *ib; - unsigned long *p_nodes = priv; ia = list_entry(a, struct memory_initiator, node); ib = list_entry(b, struct memory_initiator, node); - set_bit(ia->processor_pxm, p_nodes); - set_bit(ib->processor_pxm, p_nodes); - return ia->processor_pxm - ib->processor_pxm; } +static int initiators_to_nodemask(unsigned long *p_nodes) +{ + struct memory_initiator *initiator; + + if (list_empty(&initiators)) + return -ENXIO; + + list_for_each_entry(initiator, &initiators, node) + set_bit(initiator->processor_pxm, p_nodes); + + return 0; +} + static void hmat_register_target_initiators(struct memory_target *target) { static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); @@ -610,7 +619,10 @@ static void hmat_register_target_initiators(struct memory_target *target) * initiators. */ bitmap_zero(p_nodes, MAX_NUMNODES); - list_sort(p_nodes, &initiators, initiator_cmp); + list_sort(NULL, &initiators, initiator_cmp); + if (initiators_to_nodemask(p_nodes) < 0) + return; + if (!access0done) { for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { loc = localities_types[i]; @@ -644,8 +656,9 @@ static void hmat_register_target_initiators(struct memory_target *target) /* Access 1 ignores Generic Initiators */ bitmap_zero(p_nodes, MAX_NUMNODES); - list_sort(p_nodes, &initiators, initiator_cmp); - best = 0; + if (initiators_to_nodemask(p_nodes) < 0) + return; + for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { loc = localities_types[i]; if (!loc) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e5dd87ddc6b34..59781e765e0e2 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -536,10 +536,27 @@ static void wait_for_freeze(void) /* No delay is needed if we are in guest */ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return; + /* + * Modern (>=Nehalem) Intel systems use ACPI via intel_idle, + * not this code. Assume that any Intel systems using this + * are ancient and may need the dummy wait. This also assumes + * that the motivating chipset issue was Intel-only. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; #endif - /* Dummy wait op - must do something useless after P_LVL2 read - because chipsets cannot guarantee that STPCLK# signal - gets asserted in time to freeze execution properly. */ + /* + * Dummy wait op - must do something useless after P_LVL2 read + * because chipsets cannot guarantee that STPCLK# signal gets + * asserted in time to freeze execution properly + * + * This workaround has been in place since the original ACPI + * implementation was merged, circa 2002. + * + * If a profile is pointing to this instruction, please first + * consider moving your system to a more modern idle + * mechanism. + */ inl(acpi_gbl_FADT.xpm_timer_block.address); } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index e39d59ad64964..b13713199ad94 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -500,6 +500,70 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"), }, }, + /* + * More Tongfang devices with the same issue as the Clevo NL5xRU and + * NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above. + */ + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GMxNGxx", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GMxZGxx", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GMxRGxx", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics * for this do not catch. diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cfb1393a0891a..b403c7f063b00 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2008,15 +2008,21 @@ static void binder_cleanup_transaction(struct binder_transaction *t, /** * binder_get_object() - gets object and checks for valid metadata * @proc: binder_proc owning the buffer + * @u: sender's user pointer to base of buffer * @buffer: binder_buffer that we're parsing. * @offset: offset in the @buffer at which to validate an object. * @object: struct binder_object to read into * - * Return: If there's a valid metadata object at @offset in @buffer, the + * Copy the binder object at the given offset into @object. If @u is + * provided then the copy is from the sender's buffer. If not, then + * it is copied from the target's @buffer. + * + * Return: If there's a valid metadata object at @offset, the * size of that object. Otherwise, it returns zero. The object * is read into the struct binder_object pointed to by @object. */ static size_t binder_get_object(struct binder_proc *proc, + const void __user *u, struct binder_buffer *buffer, unsigned long offset, struct binder_object *object) @@ -2026,10 +2032,16 @@ static size_t binder_get_object(struct binder_proc *proc, size_t object_size = 0; read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); - if (offset > buffer->data_size || read_size < sizeof(*hdr) || - binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, - offset, read_size)) + if (offset > buffer->data_size || read_size < sizeof(*hdr)) return 0; + if (u) { + if (copy_from_user(object, u + offset, read_size)) + return 0; + } else { + if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, + offset, read_size)) + return 0; + } /* Ok, now see if we read a complete object. */ hdr = &object->hdr; @@ -2102,7 +2114,7 @@ static struct binder_buffer_object *binder_validate_ptr( b, buffer_offset, sizeof(object_offset))) return NULL; - object_size = binder_get_object(proc, b, object_offset, object); + object_size = binder_get_object(proc, NULL, b, object_offset, object); if (!object_size || object->hdr.type != BINDER_TYPE_PTR) return NULL; if (object_offsetp) @@ -2167,7 +2179,8 @@ static bool binder_validate_fixup(struct binder_proc *proc, unsigned long buffer_offset; struct binder_object last_object; struct binder_buffer_object *last_bbo; - size_t object_size = binder_get_object(proc, b, last_obj_offset, + size_t object_size = binder_get_object(proc, NULL, b, + last_obj_offset, &last_object); if (object_size != sizeof(*last_bbo)) return false; @@ -2242,7 +2255,7 @@ static void binder_deferred_fd_close(int fd) if (!twcb) return; init_task_work(&twcb->twork, binder_do_fd_close); - __close_fd_get_file(fd, &twcb->file); + close_fd_get_file(fd, &twcb->file); if (twcb->file) { filp_close(twcb->file, current->files); task_work_add(current, &twcb->twork, TWA_RESUME); @@ -2282,7 +2295,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, buffer, buffer_offset, sizeof(object_offset))) - object_size = binder_get_object(proc, buffer, + object_size = binder_get_object(proc, NULL, buffer, object_offset, &object); if (object_size == 0) { pr_err("transaction release %d bad object at offset %lld, size %zd\n", @@ -2620,16 +2633,266 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset, return ret; } -static int binder_translate_fd_array(struct binder_fd_array_object *fda, +/** + * struct binder_ptr_fixup - data to be fixed-up in target buffer + * @offset offset in target buffer to fixup + * @skip_size bytes to skip in copy (fixup will be written later) + * @fixup_data data to write at fixup offset + * @node list node + * + * This is used for the pointer fixup list (pf) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_ptr_fixup { + binder_size_t offset; + size_t skip_size; + binder_uintptr_t fixup_data; + struct list_head node; +}; + +/** + * struct binder_sg_copy - scatter-gather data to be copied + * @offset offset in target buffer + * @sender_uaddr user address in source buffer + * @length bytes to copy + * @node list node + * + * This is used for the sg copy list (sgc) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_sg_copy { + binder_size_t offset; + const void __user *sender_uaddr; + size_t length; + struct list_head node; +}; + +/** + * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data + * @alloc: binder_alloc associated with @buffer + * @buffer: binder buffer in target process + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Processes all elements of @sgc_head, applying fixups from @pf_head + * and copying the scatter-gather data from the source process' user + * buffer to the target's buffer. It is expected that the list creation + * and processing all occurs during binder_transaction() so these lists + * are only accessed in local context. + * + * Return: 0=success, else -errno + */ +static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, + struct binder_buffer *buffer, + struct list_head *sgc_head, + struct list_head *pf_head) +{ + int ret = 0; + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *tmppf; + struct binder_ptr_fixup *pf = + list_first_entry_or_null(pf_head, struct binder_ptr_fixup, + node); + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + size_t bytes_copied = 0; + + while (bytes_copied < sgc->length) { + size_t copy_size; + size_t bytes_left = sgc->length - bytes_copied; + size_t offset = sgc->offset + bytes_copied; + + /* + * We copy up to the fixup (pointed to by pf) + */ + copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) + : bytes_left; + if (!ret && copy_size) + ret = binder_alloc_copy_user_to_buffer( + alloc, buffer, + offset, + sgc->sender_uaddr + bytes_copied, + copy_size); + bytes_copied += copy_size; + if (copy_size != bytes_left) { + BUG_ON(!pf); + /* we stopped at a fixup offset */ + if (pf->skip_size) { + /* + * we are just skipping. This is for + * BINDER_TYPE_FDA where the translated + * fds will be fixed up when we get + * to target context. + */ + bytes_copied += pf->skip_size; + } else { + /* apply the fixup indicated by pf */ + if (!ret) + ret = binder_alloc_copy_to_buffer( + alloc, buffer, + pf->offset, + &pf->fixup_data, + sizeof(pf->fixup_data)); + bytes_copied += sizeof(pf->fixup_data); + } + list_del(&pf->node); + kfree(pf); + pf = list_first_entry_or_null(pf_head, + struct binder_ptr_fixup, node); + } + } + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + BUG_ON(pf->skip_size == 0); + list_del(&pf->node); + kfree(pf); + } + BUG_ON(!list_empty(sgc_head)); + + return ret > 0 ? -EINVAL : ret; +} + +/** + * binder_cleanup_deferred_txn_lists() - free specified lists + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Called to clean up @sgc_head and @pf_head if there is an + * error. + */ +static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, + struct list_head *pf_head) +{ + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *pf, *tmppf; + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + list_del(&pf->node); + kfree(pf); + } +} + +/** + * binder_defer_copy() - queue a scatter-gather buffer for copy + * @sgc_head: list_head of scatter-gather copy list + * @offset: binder buffer offset in target process + * @sender_uaddr: user address in source process + * @length: bytes to copy + * + * Specify a scatter-gather block to be copied. The actual copy must + * be deferred until all the needed fixups are identified and queued. + * Then the copy and fixups are done together so un-translated values + * from the source are never visible in the target buffer. + * + * We are guaranteed that repeated calls to this function will have + * monotonically increasing @offset values so the list will naturally + * be ordered. + * + * Return: 0=success, else -errno + */ +static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, + const void __user *sender_uaddr, size_t length) +{ + struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); + + if (!bc) + return -ENOMEM; + + bc->offset = offset; + bc->sender_uaddr = sender_uaddr; + bc->length = length; + INIT_LIST_HEAD(&bc->node); + + /* + * We are guaranteed that the deferred copies are in-order + * so just add to the tail. + */ + list_add_tail(&bc->node, sgc_head); + + return 0; +} + +/** + * binder_add_fixup() - queue a fixup to be applied to sg copy + * @pf_head: list_head of binder ptr fixup list + * @offset: binder buffer offset in target process + * @fixup: bytes to be copied for fixup + * @skip_size: bytes to skip when copying (fixup will be applied later) + * + * Add the specified fixup to a list ordered by @offset. When copying + * the scatter-gather buffers, the fixup will be copied instead of + * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup + * will be applied later (in target process context), so we just skip + * the bytes specified by @skip_size. If @skip_size is 0, we copy the + * value in @fixup. + * + * This function is called *mostly* in @offset order, but there are + * exceptions. Since out-of-order inserts are relatively uncommon, + * we insert the new element by searching backward from the tail of + * the list. + * + * Return: 0=success, else -errno + */ +static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, + binder_uintptr_t fixup, size_t skip_size) +{ + struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); + struct binder_ptr_fixup *tmppf; + + if (!pf) + return -ENOMEM; + + pf->offset = offset; + pf->fixup_data = fixup; + pf->skip_size = skip_size; + INIT_LIST_HEAD(&pf->node); + + /* Fixups are *mostly* added in-order, but there are some + * exceptions. Look backwards through list for insertion point. + */ + list_for_each_entry_reverse(tmppf, pf_head, node) { + if (tmppf->offset < pf->offset) { + list_add(&pf->node, &tmppf->node); + return 0; + } + } + /* + * if we get here, then the new offset is the lowest so + * insert at the head + */ + list_add(&pf->node, pf_head); + return 0; +} + +static int binder_translate_fd_array(struct list_head *pf_head, + struct binder_fd_array_object *fda, + const void __user *sender_ubuffer, struct binder_buffer_object *parent, + struct binder_buffer_object *sender_uparent, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { binder_size_t fdi, fd_buf_size; binder_size_t fda_offset; + const void __user *sender_ufda_base; struct binder_proc *proc = thread->proc; - struct binder_proc *target_proc = t->to_proc; + int ret; + + if (fda->num_fds == 0) + return 0; fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { @@ -2653,19 +2916,25 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, */ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + fda->parent_offset; - if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { + sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + + fda->parent_offset; + + if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || + !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); return -EINVAL; } + ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); + if (ret) + return ret; + for (fdi = 0; fdi < fda->num_fds; fdi++) { u32 fd; - int ret; binder_size_t offset = fda_offset + fdi * sizeof(fd); + binder_size_t sender_uoffset = fdi * sizeof(fd); - ret = binder_alloc_copy_from_buffer(&target_proc->alloc, - &fd, t->buffer, - offset, sizeof(fd)); + ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); if (!ret) ret = binder_translate_fd(fd, offset, t, thread, in_reply_to); @@ -2675,7 +2944,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, return 0; } -static int binder_fixup_parent(struct binder_transaction *t, +static int binder_fixup_parent(struct list_head *pf_head, + struct binder_transaction *t, struct binder_thread *thread, struct binder_buffer_object *bp, binder_size_t off_start_offset, @@ -2721,14 +2991,7 @@ static int binder_fixup_parent(struct binder_transaction *t, } buffer_offset = bp->parent_offset + (uintptr_t)parent->buffer - (uintptr_t)b->user_data; - if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, - &bp->buffer, sizeof(bp->buffer))) { - binder_user_error("%d:%d got transaction with invalid parent offset\n", - proc->pid, thread->pid); - return -EINVAL; - } - - return 0; + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); } /** @@ -2848,6 +3111,7 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t off_start_offset, off_end_offset; binder_size_t off_min; binder_size_t sg_buf_offset, sg_buf_end_offset; + binder_size_t user_offset = 0; struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; @@ -2862,6 +3126,12 @@ static void binder_transaction(struct binder_proc *proc, int t_debug_id = atomic_inc_return(&binder_last_id); char *secctx = NULL; u32 secctx_sz = 0; + struct list_head sgc_head; + struct list_head pf_head; + const void __user *user_buffer = (const void __user *) + (uintptr_t)tr->data.ptr.buffer; + INIT_LIST_HEAD(&sgc_head); + INIT_LIST_HEAD(&pf_head); e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -3173,19 +3443,6 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, - t->buffer, 0, - (const void __user *) - (uintptr_t)tr->data.ptr.buffer, - tr->data_size)) { - binder_user_error("%d:%d got transaction with invalid data ptr\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - return_error_param = -EFAULT; - return_error_line = __LINE__; - goto err_copy_data_failed; - } if (binder_alloc_copy_user_to_buffer( &target_proc->alloc, t->buffer, @@ -3230,6 +3487,7 @@ static void binder_transaction(struct binder_proc *proc, size_t object_size; struct binder_object object; binder_size_t object_offset; + binder_size_t copy_size; if (binder_alloc_copy_from_buffer(&target_proc->alloc, &object_offset, @@ -3241,8 +3499,27 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - object_size = binder_get_object(target_proc, t->buffer, - object_offset, &object); + + /* + * Copy the source user buffer up to the next object + * that will be processed. + */ + copy_size = object_offset - user_offset; + if (copy_size && (user_offset > object_offset || + binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + copy_size))) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + object_size = binder_get_object(target_proc, user_buffer, + t->buffer, object_offset, &object); if (object_size == 0 || object_offset < off_min) { binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", proc->pid, thread->pid, @@ -3254,6 +3531,11 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } + /* + * Set offset to the next buffer fragment to be + * copied + */ + user_offset = object_offset + object_size; hdr = &object.hdr; off_min = object_offset + object_size; @@ -3316,6 +3598,8 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_FDA: { struct binder_object ptr_object; binder_size_t parent_offset; + struct binder_object user_object; + size_t user_parent_size; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); size_t num_valid = (buffer_offset - off_start_offset) / @@ -3347,11 +3631,35 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_parent; } - ret = binder_translate_fd_array(fda, parent, t, thread, - in_reply_to); - if (ret < 0) { + /* + * We need to read the user version of the parent + * object to get the original user offset + */ + user_parent_size = + binder_get_object(proc, user_buffer, t->buffer, + parent_offset, &user_object); + if (user_parent_size != sizeof(user_object.bbo)) { + binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", + proc->pid, thread->pid, + user_parent_size, + sizeof(user_object.bbo)); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_parent; + } + ret = binder_translate_fd_array(&pf_head, fda, + user_buffer, parent, + &user_object.bbo, t, + thread, in_reply_to); + if (!ret) + ret = binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + fda, sizeof(*fda)); + if (ret) { return_error = BR_FAILED_REPLY; - return_error_param = ret; + return_error_param = ret > 0 ? -EINVAL : ret; return_error_line = __LINE__; goto err_translate_failed; } @@ -3373,19 +3681,14 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, - t->buffer, - sg_buf_offset, - (const void __user *) - (uintptr_t)bp->buffer, - bp->length)) { - binder_user_error("%d:%d got transaction with invalid offsets ptr\n", - proc->pid, thread->pid); - return_error_param = -EFAULT; + ret = binder_defer_copy(&sgc_head, sg_buf_offset, + (const void __user *)(uintptr_t)bp->buffer, + bp->length); + if (ret) { return_error = BR_FAILED_REPLY; + return_error_param = ret; return_error_line = __LINE__; - goto err_copy_data_failed; + goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ bp->buffer = (uintptr_t) @@ -3394,7 +3697,8 @@ static void binder_transaction(struct binder_proc *proc, num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); - ret = binder_fixup_parent(t, thread, bp, + ret = binder_fixup_parent(&pf_head, t, + thread, bp, off_start_offset, num_valid, last_fixup_obj_off, @@ -3421,6 +3725,30 @@ static void binder_transaction(struct binder_proc *proc, goto err_bad_object_type; } } + /* Done processing objects, copy the rest of the buffer */ + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + tr->data_size - user_offset)) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + + ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, + &sgc_head, &pf_head); + if (ret) { + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_copy_data_failed; + } tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; t->work.type = BINDER_WORK_TRANSACTION; @@ -3487,6 +3815,7 @@ static void binder_transaction(struct binder_proc *proc, err_bad_offset: err_bad_parent: err_copy_data_failed: + binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, NULL, t->buffer, diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 95ca4f934d283..a77ed66425f27 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, mm = alloc->vma_vm_mm; if (mm) { - mmap_read_lock(mm); + mmap_write_lock(mm); vma = alloc->vma; } @@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, trace_binder_alloc_page_end(alloc, index); } if (mm) { - mmap_read_unlock(mm); + mmap_write_unlock(mm); mmput(mm); } return 0; @@ -303,7 +303,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } err_no_vma: if (mm) { - mmap_read_unlock(mm); + mmap_write_unlock(mm); mmput(mm); } return vma ? -ENOMEM : -ESRCH; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index ff2add0101fe5..7ca9fa9a75e24 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -83,6 +83,7 @@ enum board_ids { static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void ahci_remove_one(struct pci_dev *dev); static void ahci_shutdown_one(struct pci_dev *dev); +static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, @@ -664,6 +665,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, ahci_save_initial_config(&pdev->dev, hpriv); } +static int ahci_pci_reset_controller(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + struct ahci_host_priv *hpriv = host->private_data; + int rc; + + rc = ahci_reset_controller(host); + if (rc) + return rc; + + /* + * If platform firmware failed to enable ports, try to enable + * them here. + */ + ahci_intel_pcs_quirk(pdev, hpriv); + + return 0; +} + static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; @@ -865,7 +885,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev) struct ata_host *host = pci_get_drvdata(pdev); int rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; ahci_pci_init_controller(host); @@ -900,7 +920,7 @@ static int ahci_pci_device_resume(struct device *dev) ahci_mcp89_apple_enable(pdev); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; @@ -1785,12 +1805,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); - /* - * If platform firmware failed to enable ports, try to enable - * them here. - */ - ahci_intel_pcs_quirk(pdev, hpriv); - /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; @@ -1900,7 +1914,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index d1f284f0c83d9..1ce8973569933 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -254,7 +254,7 @@ enum { PCS_7 = 0x94, /* 7+ port PCS (Denverton) */ /* em constants */ - EM_MAX_SLOTS = 8, + EM_MAX_SLOTS = SATA_PMP_MAX_PORTS, EM_MAX_RETRY = 5, /* em_ctl bits */ diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index 388baf528fa81..189f75d537414 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -1230,4 +1230,4 @@ module_platform_driver(imx_ahci_driver); MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); MODULE_AUTHOR("Richard Zhu "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("ahci:imx"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 0910441321f72..64d6da0a53035 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -451,14 +451,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, } } - hpriv->nports = child_nodes = of_get_child_count(dev->of_node); + /* + * Too many sub-nodes most likely means having something wrong with + * the firmware. + */ + child_nodes = of_get_child_count(dev->of_node); + if (child_nodes > AHCI_MAX_PORTS) { + rc = -EINVAL; + goto err_out; + } /* * If no sub-node was found, we still need to set nports to * one in order to be able to use the * ahci_platform_[en|dis]able_[phys|regulators] functions. */ - if (!child_nodes) + if (child_nodes) + hpriv->nports = child_nodes; + else hpriv->nports = 1; hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2402fa4d8aa55..14150767be444 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3051,7 +3051,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) */ if (spd > 1) mask &= (1 << (spd - 1)) - 1; - else + else if (link->sata_spd) return -EINVAL; /* were we already at the bottom? */ @@ -3936,6 +3936,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + /* These specific Pioneer models have LPM issues */ + { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM }, + { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM }, + /* Crucial BX100 SSD 500GB has broken LPM support */ { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a0e788b648214..f1755efd30a25 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3303,6 +3303,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) case REPORT_LUNS: case REQUEST_SENSE: case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: case REZERO_UNIT: case SEEK_6: case SEEK_10: @@ -3969,6 +3970,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) return ata_scsi_write_same_xlat; case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: if (ata_try_flush_cache(dev)) return ata_scsi_flush_xlat; break; @@ -4030,44 +4032,51 @@ void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd) int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) { + struct ata_port *ap = dev->link->ap; u8 scsi_op = scmd->cmnd[0]; ata_xlat_func_t xlat_func; - int rc = 0; + + /* + * scsi_queue_rq() will defer commands if scsi_host_in_recovery(). + * However, this check is done without holding the ap->lock (a libata + * specific lock), so we can have received an error irq since then, + * therefore we must check if EH is pending, while holding ap->lock. + */ + if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) + return SCSI_MLQUEUE_DEVICE_BUSY; + + if (unlikely(!scmd->cmd_len)) + goto bad_cdb_len; if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { - if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) + if (unlikely(scmd->cmd_len > dev->cdb_len)) goto bad_cdb_len; xlat_func = ata_get_xlat_func(dev, scsi_op); - } else { - if (unlikely(!scmd->cmd_len)) - goto bad_cdb_len; + } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { + /* relay SCSI command to ATAPI device */ + int len = COMMAND_SIZE(scsi_op); - xlat_func = NULL; - if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { - /* relay SCSI command to ATAPI device */ - int len = COMMAND_SIZE(scsi_op); - if (unlikely(len > scmd->cmd_len || - len > dev->cdb_len || - scmd->cmd_len > ATAPI_CDB_LEN)) - goto bad_cdb_len; + if (unlikely(len > scmd->cmd_len || + len > dev->cdb_len || + scmd->cmd_len > ATAPI_CDB_LEN)) + goto bad_cdb_len; - xlat_func = atapi_xlat; - } else { - /* ATA_16 passthru, treat as an ATA command */ - if (unlikely(scmd->cmd_len > 16)) - goto bad_cdb_len; + xlat_func = atapi_xlat; + } else { + /* ATA_16 passthru, treat as an ATA command */ + if (unlikely(scmd->cmd_len > 16)) + goto bad_cdb_len; - xlat_func = ata_get_xlat_func(dev, scsi_op); - } + xlat_func = ata_get_xlat_func(dev, scsi_op); } if (xlat_func) - rc = ata_scsi_translate(dev, scmd, xlat_func); - else - ata_scsi_simulate(dev, scmd); + return ata_scsi_translate(dev, scmd, xlat_func); - return rc; + ata_scsi_simulate(dev, scmd); + + return 0; bad_cdb_len: DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", @@ -4215,6 +4224,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) * turning this into a no-op. */ case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: fallthrough; /* no-op's, complete with success */ diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index b33772df9bc60..31a66fc0c31dc 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -301,7 +301,9 @@ int ata_tport_add(struct device *parent, pm_runtime_enable(dev); pm_runtime_forbid(dev); - transport_add_device(dev); + error = transport_add_device(dev); + if (error) + goto tport_transport_add_err; transport_configure_device(dev); error = ata_tlink_add(&ap->link); @@ -312,12 +314,12 @@ int ata_tport_add(struct device *parent, tport_link_err: transport_remove_device(dev); + tport_transport_add_err: device_del(dev); tport_err: transport_destroy_device(dev); put_device(dev); - ata_host_put(ap->host); return error; } @@ -426,7 +428,9 @@ int ata_tlink_add(struct ata_link *link) goto tlink_err; } - transport_add_device(dev); + error = transport_add_device(dev); + if (error) + goto tlink_transport_err; transport_configure_device(dev); ata_for_each_dev(ata_dev, link, ALL) { @@ -441,6 +445,7 @@ int ata_tlink_add(struct ata_link *link) ata_tdev_delete(ata_dev); } transport_remove_device(dev); + tlink_transport_err: device_del(dev); tlink_err: transport_destroy_device(dev); @@ -678,7 +683,13 @@ static int ata_tdev_add(struct ata_device *ata_dev) return error; } - transport_add_device(dev); + error = transport_add_device(dev); + if (error) { + device_del(dev); + ata_tdev_free(ata_dev); + return error; + } + transport_configure_device(dev); return 0; } diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index abc0e87ca1a8b..43215a4c1e540 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -135,12 +135,12 @@ static void ixp4xx_setup_port(struct ata_port *ap, static int ixp4xx_pata_probe(struct platform_device *pdev) { - unsigned int irq; struct resource *cs0, *cs1; struct ata_host *host; struct ata_port *ap; struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev); int ret; + int irq; cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index d91ba47f2fc44..4405d255e3aa2 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -278,9 +278,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) outb(inb(0x1F4) & 0x07, 0x1F4); rt = inb(0x1F3); - rt &= 0x07 << (3 * adev->devno); + rt &= ~(0x07 << (3 * !adev->devno)); if (pio) - rt |= (1 + 3 * pio) << (3 * adev->devno); + rt |= (1 + 3 * pio) << (3 * !adev->devno); + outb(rt, 0x1F3); udelay(100); outb(inb(0x1F2) | 0x01, 0x1F2); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 8272a3a002a34..51647926e6051 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -596,4 +596,23 @@ void __init init_cpu_topology(void) else if (of_have_populated_dt() && parse_dt_topology()) reset_cpu_topology(); } + +void store_cpu_topology(unsigned int cpuid) +{ + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; + + if (cpuid_topo->package_id != -1) + goto topology_populated; + + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = cpuid; + cpuid_topo->package_id = cpu_to_node(cpuid); + + pr_debug("CPU%u: package %d core %d thread %d\n", + cpuid, cpuid_topo->package_id, cpuid_topo->core_id, + cpuid_topo->thread_id); + +topology_populated: + update_siblings_masks(cpuid); +} #endif diff --git a/drivers/base/class.c b/drivers/base/class.c index c3451481194e8..ef7ff822bc082 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -192,6 +192,11 @@ int __class_register(struct class *cls, struct lock_class_key *key) } error = class_add_groups(class_get(cls), cls->class_groups); class_put(cls); + if (error) { + kobject_del(&cp->subsys.kobj); + kfree_const(cp->subsys.kobj.name); + kfree(cp); + } return error; } EXPORT_SYMBOL_GPL(__class_register); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 72ef9e83a84b2..497e3d4255c41 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -1088,7 +1088,11 @@ static int __driver_attach(struct device *dev, void *data) return 0; } else if (ret < 0) { dev_dbg(dev, "Bus failed to match device: %d\n", ret); - return ret; + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; } /* ret > 0 means positive match */ if (driver_allows_async_probing(drv)) { diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 743268996336d..d0ba5459ce0b9 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2789,6 +2789,10 @@ static int genpd_iterate_idle_states(struct device_node *dn, np = it.node; if (!of_match_node(idle_state_match, np)) continue; + + if (!of_device_is_available(np)) + continue; + if (states) { ret = genpd_parse_state(&states[i], np); if (ret) { diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 835a39e84c1dd..360094692d29e 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -464,7 +464,10 @@ static int rpm_idle(struct device *dev, int rpmflags) /* Pending requests need to be canceled. */ dev->power.request = RPM_REQ_NONE; - if (dev->power.no_callbacks) + callback = RPM_GET_CALLBACK(dev, runtime_idle); + + /* If no callback assume success. */ + if (!callback || dev->power.no_callbacks) goto out; /* Carry out an asynchronous or a synchronous idle notification. */ @@ -480,10 +483,17 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - callback = RPM_GET_CALLBACK(dev, runtime_idle); + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = callback(dev); - if (callback) - retval = __rpm_callback(callback, dev); + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 94665037f4a35..72b7a92337b18 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -120,7 +120,11 @@ static unsigned int read_magic_time(void) struct rtc_time time; unsigned int val; - mc146818_get_time(&time); + if (mc146818_get_time(&time) < 0) { + pr_err("Unable to read current time from RTC\n"); + return 0; + } + pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time); val = time.tm_year; /* 100 years */ if (val > 100) diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c index 3bb7beb127a96..c157a912d6739 100644 --- a/drivers/base/test/test_async_driver_probe.c +++ b/drivers/base/test/test_async_driver_probe.c @@ -146,7 +146,7 @@ static int __init test_async_probe_init(void) calltime = ktime_get(); for_each_online_cpu(cpu) { nid = cpu_to_node(cpu); - pdev = &sync_dev[sync_id]; + pdev = &async_dev[async_id]; *pdev = test_platform_device_register_node("test_async_driver", async_id, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 407527ff6b1f6..420bdaf8c356b 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2720,7 +2720,7 @@ static int init_submitter(struct drbd_device *device) enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor) { struct drbd_resource *resource = adm_ctx->resource; - struct drbd_connection *connection; + struct drbd_connection *connection, *n; struct drbd_device *device; struct drbd_peer_device *peer_device, *tmp_peer_device; struct gendisk *disk; @@ -2819,7 +2819,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig if (init_submitter(device)) { err = ERR_NOMEM; - goto out_idr_remove_vol; + goto out_idr_remove_from_resource; } add_disk(disk); @@ -2836,10 +2836,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_debugfs_device_add(device); return NO_ERROR; -out_idr_remove_vol: - idr_remove(&connection->peer_devices, vnr); out_idr_remove_from_resource: - for_each_connection(connection, resource) { + for_each_connection_safe(connection, n, resource) { peer_device = idr_remove(&connection->peer_devices, vnr); if (peer_device) kref_put(&connection->kref, drbd_destroy_connection); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 4a6b82d434eef..dbcd903ba128f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1342,10 +1342,12 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); - if (ret) + if (ret) { sock_shutdown(nbd); - flush_workqueue(nbd->recv_workq); + nbd_clear_que(nbd); + } + flush_workqueue(nbd->recv_workq); mutex_lock(&nbd->config_lock); nbd_bdev_reset(bdev); /* user requested, ignore socket errors */ @@ -1863,8 +1865,19 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; - if (info->attrs[NBD_ATTR_INDEX]) + if (info->attrs[NBD_ATTR_INDEX]) { index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); + + /* + * Too big first_minor can cause duplicate creation of + * sysfs files/links, since index << part_shift might overflow, or + * MKDEV() expect that the max bits of first_minor is 20. + */ + if (index < 0 || index > MINORMASK >> part_shift) { + printk(KERN_ERR "nbd: illegal input index %d\n", index); + return -EINVAL; + } + } if (!info->attrs[NBD_ATTR_SOCKETS]) { printk(KERN_ERR "nbd: must specify at least one socket\n"); return -EINVAL; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 9d5460f6e0ff1..6f33d62331b1f 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1852,6 +1852,12 @@ static void free_info(struct blkfront_info *info) kfree(info); } +/* Enable the persistent grants feature. */ +static bool feature_persistent = true; +module_param(feature_persistent, bool, 0644); +MODULE_PARM_DESC(feature_persistent, + "Enables the persistent grants feature"); + /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, struct blkfront_info *info) @@ -1943,6 +1949,7 @@ static int talk_to_blkback(struct xenbus_device *dev, message = "writing protocol"; goto abort_transaction; } + info->feature_persistent_parm = feature_persistent; err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", info->feature_persistent_parm); if (err) @@ -2019,12 +2026,6 @@ static int negotiate_mq(struct blkfront_info *info) return 0; } -/* Enable the persistent grants feature. */ -static bool feature_persistent = true; -module_param(feature_persistent, bool, 0644); -MODULE_PARM_DESC(feature_persistent, - "Enables the persistent grants feature"); - /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and @@ -2394,7 +2395,6 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0)) blkfront_setup_discard(info); - info->feature_persistent_parm = feature_persistent; if (info->feature_persistent_parm) info->feature_persistent = !!xenbus_read_unsigned(info->xbdev->otherend, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a699e6166aefe..3d905fda9b29a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -661,13 +661,13 @@ static inline void btusb_free_frags(struct btusb_data *data) spin_lock_irqsave(&data->rxlock, flags); - kfree_skb(data->evt_skb); + dev_kfree_skb_irq(data->evt_skb); data->evt_skb = NULL; - kfree_skb(data->acl_skb); + dev_kfree_skb_irq(data->acl_skb); data->acl_skb = NULL; - kfree_skb(data->sco_skb); + dev_kfree_skb_irq(data->sco_skb); data->sco_skb = NULL; spin_unlock_irqrestore(&data->rxlock, flags); @@ -1833,6 +1833,11 @@ static int btusb_setup_csr(struct hci_dev *hdev) rp = (struct hci_rp_read_local_version *)skb->data; + bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x; LMP ver=%u subver=%04x; manufacturer=%u", + le16_to_cpu(rp->hci_ver), le16_to_cpu(rp->hci_rev), + le16_to_cpu(rp->lmp_ver), le16_to_cpu(rp->lmp_subver), + le16_to_cpu(rp->manufacturer)); + /* Detect a wide host of Chinese controllers that aren't CSR. * * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 @@ -2816,6 +2821,7 @@ enum { enum { BTMTK_WMT_INVALID, BTMTK_WMT_PATCH_UNDONE, + BTMTK_WMT_PATCH_PROGRESS, BTMTK_WMT_PATCH_DONE, BTMTK_WMT_ON_UNDONE, BTMTK_WMT_ON_DONE, @@ -2831,7 +2837,7 @@ struct btmtk_wmt_hdr { struct btmtk_hci_wmt_cmd { struct btmtk_wmt_hdr hdr; - u8 data[256]; + u8 data[]; } __packed; struct btmtk_hci_wmt_evt { @@ -2934,7 +2940,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) * to generate the event. Otherwise, the WMT event cannot return from * the device successfully. */ - udelay(100); + udelay(500); usb_anchor_urb(urb, &data->ctrl_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); @@ -3010,7 +3016,7 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; u32 hlen, status = BTMTK_WMT_INVALID; struct btmtk_hci_wmt_evt *wmt_evt; - struct btmtk_hci_wmt_cmd wc; + struct btmtk_hci_wmt_cmd *wc; struct btmtk_wmt_hdr *hdr; int err; @@ -3019,24 +3025,42 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, if (hlen > 255) return -EINVAL; - hdr = (struct btmtk_wmt_hdr *)&wc; + wc = kzalloc(hlen, GFP_KERNEL); + if (!wc) + return -ENOMEM; + + hdr = &wc->hdr; hdr->dir = 1; hdr->op = wmt_params->op; hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); hdr->flag = wmt_params->flag; - memcpy(wc.data, wmt_params->data, wmt_params->dlen); + memcpy(wc->data, wmt_params->data, wmt_params->dlen); set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); + /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, + * it needs constantly polling control pipe until the host received the + * WMT event, thus, we should require to specifically acquire PM counter + * on the USB to prevent the interface from entering auto suspended + * while WMT cmd/event in progress. + */ + err = usb_autopm_get_interface(data->intf); + if (err < 0) + goto err_free_wc; + + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); if (err < 0) { clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - return err; + usb_autopm_put_interface(data->intf); + goto err_free_wc; } /* Submit control IN URB on demand to process the WMT event */ err = btusb_mtk_submit_wmt_recv_urb(hdev); + + usb_autopm_put_interface(data->intf); + if (err < 0) return err; @@ -3054,13 +3078,14 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, if (err == -EINTR) { bt_dev_err(hdev, "Execution of wmt command interrupted"); clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - return err; + goto err_free_wc; } if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - return -ETIMEDOUT; + err = -ETIMEDOUT; + goto err_free_wc; } /* Parse and handle the return WMT event */ @@ -3096,7 +3121,8 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, err_free_skb: kfree_skb(data->evt_skb); data->evt_skb = NULL; - +err_free_wc: + kfree(wc); return err; } @@ -3238,9 +3264,9 @@ static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val) return err; } -static int btusb_mtk_id_get(struct btusb_data *data, u32 *id) +static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id) { - return btusb_mtk_reg_read(data, 0x80000008, id); + return btusb_mtk_reg_read(data, reg, id); } static int btusb_mtk_setup(struct hci_dev *hdev) @@ -3258,7 +3284,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev) calltime = ktime_get(); - err = btusb_mtk_id_get(data, &dev_id); + err = btusb_mtk_id_get(data, 0x80000008, &dev_id); if (err < 0) { bt_dev_err(hdev, "Failed to get device id (%d)", err); return err; diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index cf4a560958173..8055f63603f45 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) i++; __skb_unlink(skb, &bcsp->unack); - kfree_skb(skb); + dev_kfree_skb_irq(skb); } if (skb_queue_empty(&bcsp->unack)) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 996729e78105a..7f70a677b92b9 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -299,7 +299,7 @@ static void h5_pkt_cull(struct h5 *h5) break; __skb_unlink(skb, &h5->unack); - kfree_skb(skb); + dev_kfree_skb_irq(skb); } if (skb_queue_empty(&h5->unack)) diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 637c5b8c2aa1a..726d5c83c550e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -490,6 +490,11 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_ERR("Can't allocate control structure"); return -ENFILE; } + if (percpu_init_rwsem(&hu->proto_lock)) { + BT_ERR("Can't allocate semaphore structure"); + kfree(hu); + return -ENOMEM; + } tty->disc_data = hu; hu->tty = tty; @@ -502,8 +507,6 @@ static int hci_uart_tty_open(struct tty_struct *tty) INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - percpu_init_rwsem(&hu->proto_lock); - /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 8bfe024d1fcd8..7495ca34c9e7e 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) default: BT_ERR("illegal hcill state: %ld (losing packet)", ll->hcill_state); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index eea18aed17f8a..5347fc465ce89 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -50,6 +50,9 @@ #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000 #define CMD_TRANS_TIMEOUT_MS 100 #define MEMDUMP_TIMEOUT_MS 8000 +#define IBS_DISABLE_SSR_TIMEOUT_MS \ + (MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS) +#define FW_DOWNLOAD_TIMEOUT_MS 3000 /* susclk rate */ #define SUSCLK_RATE_32KHZ 32768 @@ -68,12 +71,14 @@ #define QCA_MEMDUMP_BYTE 0xFB enum qca_flags { - QCA_IBS_ENABLED, + QCA_IBS_DISABLED, QCA_DROP_VENDOR_EVENT, QCA_SUSPENDING, QCA_MEMDUMP_COLLECTION, QCA_HW_ERROR_EVENT, - QCA_SSR_TRIGGERED + QCA_SSR_TRIGGERED, + QCA_BT_OFF, + QCA_ROM_FW }; enum qca_capabilities { @@ -870,7 +875,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) * Out-Of-Band(GPIOs control) sleep is selected. * Don't wake the device up when suspending. */ - if (!test_bit(QCA_IBS_ENABLED, &qca->flags) || + if (test_bit(QCA_IBS_DISABLED, &qca->flags) || test_bit(QCA_SUSPENDING, &qca->flags)) { skb_queue_tail(&qca->txq, skb); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); @@ -905,7 +910,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) default: BT_ERR("Illegal tx state: %d (losing packet)", qca->tx_ibs_state); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; } @@ -1015,7 +1020,7 @@ static void qca_controller_memdump(struct work_struct *work) * the controller to send the dump is 8 seconds. let us * start timer to handle this asynchronous activity. */ - clear_bit(QCA_IBS_ENABLED, &qca->flags); + set_bit(QCA_IBS_DISABLED, &qca->flags); set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); dump = (void *) skb->data; dump_size = __le32_to_cpu(dump->dump_size); @@ -1621,6 +1626,7 @@ static int qca_power_on(struct hci_dev *hdev) struct hci_uart *hu = hci_get_drvdata(hdev); enum qca_btsoc_type soc_type = qca_soc_type(hu); struct qca_serdev *qcadev; + struct qca_data *qca = hu->priv; int ret = 0; /* Non-serdev device usually is powered by external power @@ -1640,6 +1646,7 @@ static int qca_power_on(struct hci_dev *hdev) } } + clear_bit(QCA_BT_OFF, &qca->flags); return ret; } @@ -1658,8 +1665,9 @@ static int qca_setup(struct hci_uart *hu) if (ret) return ret; + clear_bit(QCA_ROM_FW, &qca->flags); /* Patch downloading has to be done without IBS mode */ - clear_bit(QCA_IBS_ENABLED, &qca->flags); + set_bit(QCA_IBS_DISABLED, &qca->flags); /* Enable controller to do both LE scan and BR/EDR inquiry * simultaneously. @@ -1710,18 +1718,20 @@ static int qca_setup(struct hci_uart *hu) ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver, firmware_name); if (!ret) { - set_bit(QCA_IBS_ENABLED, &qca->flags); + clear_bit(QCA_IBS_DISABLED, &qca->flags); qca_debugfs_init(hdev); hu->hdev->hw_error = qca_hw_error; hu->hdev->cmd_timeout = qca_cmd_timeout; } else if (ret == -ENOENT) { /* No patch/nvm-config found, run with original fw/config */ + set_bit(QCA_ROM_FW, &qca->flags); ret = 0; } else if (ret == -EAGAIN) { /* * Userspace firmware loader will return -EAGAIN in case no * patch/nvm-config is found, so run with original fw/config. */ + set_bit(QCA_ROM_FW, &qca->flags); ret = 0; } else { if (retries < MAX_INIT_RETRIES) { @@ -1814,7 +1824,7 @@ static void qca_power_shutdown(struct hci_uart *hu) * data in skb's. */ spin_lock_irqsave(&qca->hci_ibs_lock, flags); - clear_bit(QCA_IBS_ENABLED, &qca->flags); + set_bit(QCA_IBS_DISABLED, &qca->flags); qca_flush(hu); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); @@ -1833,6 +1843,8 @@ static void qca_power_shutdown(struct hci_uart *hu) } else if (qcadev->bt_en) { gpiod_set_value_cansleep(qcadev->bt_en, 0); } + + set_bit(QCA_BT_OFF, &qca->flags); } static int qca_power_off(struct hci_dev *hdev) @@ -2057,10 +2069,17 @@ static void qca_serdev_shutdown(struct device *dev) int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); struct serdev_device *serdev = to_serdev_device(dev); struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); + struct hci_uart *hu = &qcadev->serdev_hu; + struct hci_dev *hdev = hu->hdev; + struct qca_data *qca = hu->priv; const u8 ibs_wake_cmd[] = { 0xFD }; const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 }; if (qcadev->btsoc_type == QCA_QCA6390) { + if (test_bit(QCA_BT_OFF, &qca->flags) || + !test_bit(HCI_RUNNING, &hdev->flags)) + return; + serdev_device_write_flush(serdev); ret = serdev_device_write_buf(serdev, ibs_wake_cmd, sizeof(ibs_wake_cmd)); @@ -2093,13 +2112,44 @@ static int __maybe_unused qca_suspend(struct device *dev) bool tx_pending = false; int ret = 0; u8 cmd; + u32 wait_timeout = 0; set_bit(QCA_SUSPENDING, &qca->flags); - /* Device is downloading patch or doesn't support in-band sleep. */ - if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) + /* if BT SoC is running with default firmware then it does not + * support in-band sleep + */ + if (test_bit(QCA_ROM_FW, &qca->flags)) return 0; + /* During SSR after memory dump collection, controller will be + * powered off and then powered on.If controller is powered off + * during SSR then we should wait until SSR is completed. + */ + if (test_bit(QCA_BT_OFF, &qca->flags) && + !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) + return 0; + + if (test_bit(QCA_IBS_DISABLED, &qca->flags) || + test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { + wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? + IBS_DISABLE_SSR_TIMEOUT_MS : + FW_DOWNLOAD_TIMEOUT_MS; + + /* QCA_IBS_DISABLED flag is set to true, During FW download + * and during memory dump collection. It is reset to false, + * After FW download complete. + */ + wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, + TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout)); + + if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { + bt_dev_err(hu->hdev, "SSR or FW download time out"); + ret = -ETIMEDOUT; + goto error; + } + } + cancel_work_sync(&qca->ws_awake_device); cancel_work_sync(&qca->ws_awake_rx); diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index e9a44ab3812df..f2e2e553d4de7 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -301,11 +301,12 @@ int hci_uart_register_device(struct hci_uart *hu, serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); + if (percpu_init_rwsem(&hu->proto_lock)) + return -ENOMEM; + err = serdev_device_open(hu->serdev); if (err) - return err; - - percpu_init_rwsem(&hu->proto_lock); + goto err_rwsem; err = p->open(hu); if (err) @@ -375,6 +376,8 @@ int hci_uart_register_device(struct hci_uart *hu, p->close(hu); err_open: serdev_device_close(hu->serdev); +err_rwsem: + percpu_free_rwsem(&hu->proto_lock); return err; } EXPORT_SYMBOL_GPL(hci_uart_register_device); @@ -396,5 +399,6 @@ void hci_uart_unregister_device(struct hci_uart *hu) clear_bit(HCI_UART_PROTO_READY, &hu->flags); serdev_device_close(hu->serdev); } + percpu_free_rwsem(&hu->proto_lock); } EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 044dcdd723a70..7d69b740b9f93 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -298,7 +298,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) read_lock_irq(&mhi_chan->lock); /* Only ring DB if ring is not empty */ - if (tre_ring->base && tre_ring->wp != tre_ring->rp) + if (tre_ring->base && tre_ring->wp != tre_ring->rp && + mhi_chan->ch_state == MHI_CH_STATE_ENABLED) mhi_ring_chan_db(mhi_cntrl, mhi_chan); read_unlock_irq(&mhi_chan->lock); } diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 9b1a5e62417cb..98cbb18f17fa3 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -268,6 +268,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register); /* common code that starts a transfer */ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) { + u32 int_mask, status; + bool timeout; + if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) { dev_dbg(rsb->dev, "RSB transfer still in progress\n"); return -EBUSY; @@ -275,13 +278,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) reinit_completion(&rsb->complete); - writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER, - rsb->regs + RSB_INTE); + int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER; + writel(int_mask, rsb->regs + RSB_INTE); writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB, rsb->regs + RSB_CTRL); - if (!wait_for_completion_io_timeout(&rsb->complete, - msecs_to_jiffies(100))) { + if (irqs_disabled()) { + timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS, + status, (status & int_mask), + 10, 100000); + writel(status, rsb->regs + RSB_INTS); + } else { + timeout = !wait_for_completion_io_timeout(&rsb->complete, + msecs_to_jiffies(100)); + status = rsb->status; + } + + if (timeout) { dev_dbg(rsb->dev, "RSB timeout\n"); /* abort the transfer */ @@ -293,18 +306,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) return -ETIMEDOUT; } - if (rsb->status & RSB_INTS_LOAD_BSY) { + if (status & RSB_INTS_LOAD_BSY) { dev_dbg(rsb->dev, "RSB busy\n"); return -EBUSY; } - if (rsb->status & RSB_INTS_TRANS_ERR) { - if (rsb->status & RSB_INTS_TRANS_ERR_ACK) { + if (status & RSB_INTS_TRANS_ERR) { + if (status & RSB_INTS_TRANS_ERR_ACK) { dev_dbg(rsb->dev, "RSB slave nack\n"); return -EINVAL; } - if (rsb->status & RSB_INTS_TRANS_ERR_DATA) { + if (status & RSB_INTS_TRANS_ERR_DATA) { dev_dbg(rsb->dev, "RSB transfer data error\n"); return -EIO; } @@ -768,7 +781,13 @@ static int __init sunxi_rsb_init(void) return ret; } - return platform_driver_register(&sunxi_rsb_driver); + ret = platform_driver_register(&sunxi_rsb_driver); + if (ret) { + bus_unregister(&sunxi_rsb_bus); + return ret; + } + + return 0; } module_init(sunxi_rsb_init); diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 9959c762da2f8..db3dd467194c2 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -143,15 +143,19 @@ static int __init mod_init(void) found: err = pci_read_config_dword(pdev, 0x58, &pmbase); if (err) - return err; + goto put_dev; pmbase &= 0x0000FF00; - if (pmbase == 0) - return -EIO; + if (pmbase == 0) { + err = -EIO; + goto put_dev; + } priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + if (!priv) { + err = -ENOMEM; + goto put_dev; + } if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", @@ -185,6 +189,8 @@ static int __init mod_init(void) release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); out: kfree(priv); +put_dev: + pci_dev_put(pdev); return err; } @@ -200,6 +206,8 @@ static void __exit mod_exit(void) release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); + pci_dev_put(priv->pcidev); + kfree(priv); } diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index e1d421a36a138..207272979f233 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, pci_tbl); +struct amd_geode_priv { + struct pci_dev *pcidev; + void __iomem *membase; +}; static int geode_rng_data_read(struct hwrng *rng, u32 *data) { @@ -90,6 +94,7 @@ static int __init mod_init(void) const struct pci_device_id *ent; void __iomem *mem; unsigned long rng_base; + struct amd_geode_priv *priv; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); @@ -97,17 +102,26 @@ static int __init mod_init(void) goto found; } /* Device not found. */ - goto out; + return err; found: + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto put_dev; + } + rng_base = pci_resource_start(pdev, 0); if (rng_base == 0) - goto out; + goto free_priv; err = -ENOMEM; mem = ioremap(rng_base, 0x58); if (!mem) - goto out; - geode_rng.priv = (unsigned long)mem; + goto free_priv; + + geode_rng.priv = (unsigned long)priv; + priv->membase = mem; + priv->pcidev = pdev; pr_info("AMD Geode RNG detected\n"); err = hwrng_register(&geode_rng); @@ -116,20 +130,26 @@ static int __init mod_init(void) err); goto err_unmap; } -out: return err; err_unmap: iounmap(mem); - goto out; +free_priv: + kfree(priv); +put_dev: + pci_dev_put(pdev); + return err; } static void __exit mod_exit(void) { - void __iomem *mem = (void __iomem *)geode_rng.priv; + struct amd_geode_priv *priv; + priv = (struct amd_geode_priv *)geode_rng.priv; hwrng_unregister(&geode_rng); - iounmap(mem); + iounmap(priv->membase); + pci_dev_put(priv->pcidev); + kfree(priv); } module_init(mod_init); diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 61c844baf26e8..9b182e5bfa87a 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -272,13 +272,6 @@ static int imx_rngc_probe(struct platform_device *pdev) goto err; } - ret = devm_request_irq(&pdev->dev, - irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); - if (ret) { - dev_err(rngc->dev, "Can't get interrupt working.\n"); - goto err; - } - init_completion(&rngc->rng_op_done); rngc->rng.name = pdev->name; @@ -292,6 +285,13 @@ static int imx_rngc_probe(struct platform_device *pdev) imx_rngc_irq_mask_clear(rngc); + ret = devm_request_irq(&pdev->dev, + irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); + if (ret) { + dev_err(rngc->dev, "Can't get interrupt working.\n"); + return ret; + } + if (self_test) { ret = imx_rngc_self_test(rngc); if (ret) { diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 05e7339752ac3..b89f300751b1b 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -1284,6 +1284,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; + struct module *owner; if (!acquire_ipmi_user(user, &i)) { /* @@ -1345,8 +1346,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user) kfree(rcvr); } + owner = intf->owner; kref_put(&intf->refcount, intf_free); - module_put(intf->owner); + module_put(owner); } int ipmi_destroy_user(struct ipmi_user *user) @@ -3540,12 +3542,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf, struct ipmi_smi_msg *msg, unsigned char err) { + int rv; msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = err; msg->rsp_size = 3; - /* It's an error, so it will never requeue, no need to check return. */ - handle_one_recv_msg(intf, msg); + + /* This will never requeue, but it may ask us to free the message. */ + rv = handle_one_recv_msg(intf, msg); + if (rv == 0) + ipmi_free_smi_msg(msg); } static void cleanup_smi_msgs(struct ipmi_smi *intf) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5eac94cf4ff89..7fe68c680d3ee 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2160,6 +2160,20 @@ static int __init init_ipmi_si(void) } module_init(init_ipmi_si); +static void wait_msg_processed(struct smi_info *smi_info) +{ + unsigned long jiffies_now; + long time_diff; + + while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { + jiffies_now = jiffies; + time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) + * SI_USEC_PER_JIFFY); + smi_event_handler(smi_info, time_diff); + schedule_timeout_uninterruptible(1); + } +} + static void shutdown_smi(void *send_info) { struct smi_info *smi_info = send_info; @@ -2194,16 +2208,13 @@ static void shutdown_smi(void *send_info) * in the BMC. Note that timers and CPU interrupts are off, * so no need for locks. */ - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { - poll(smi_info); - schedule_timeout_uninterruptible(1); - } + wait_msg_processed(smi_info); + if (smi_info->handlers) disable_si_irq(smi_info); - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { - poll(smi_info); - schedule_timeout_uninterruptible(1); - } + + wait_msg_processed(smi_info); + if (smi_info->handlers) smi_info->handlers->cleanup(smi_info->si_sm); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 94c2b556cf972..7d483c3323480 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -981,8 +981,8 @@ static const struct memdev { #endif [5] = { "zero", 0666, &zero_fops, 0 }, [7] = { "full", 0666, &full_fops, 0 }, - [8] = { "random", 0666, &random_fops, 0 }, - [9] = { "urandom", 0666, &urandom_fops, 0 }, + [8] = { "random", 0666, &random_fops, FMODE_NOWAIT }, + [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT }, #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif diff --git a/drivers/char/random.c b/drivers/char/random.c index f769d858eda73..b54481e667307 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -895,20 +895,23 @@ void __cold add_bootloader_randomness(const void *buf, size_t len) EXPORT_SYMBOL_GPL(add_bootloader_randomness); struct fast_pool { - struct work_struct mix; unsigned long pool[4]; unsigned long last; unsigned int count; + struct timer_list mix; }; +static void mix_interrupt_randomness(struct timer_list *work); + static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { #ifdef CONFIG_64BIT #define FASTMIX_PERM SIPHASH_PERMUTATION - .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 } + .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }, #else #define FASTMIX_PERM HSIPHASH_PERMUTATION - .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 } + .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }, #endif + .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0) }; /* @@ -950,7 +953,7 @@ int __cold random_online_cpu(unsigned int cpu) } #endif -static void mix_interrupt_randomness(struct work_struct *work) +static void mix_interrupt_randomness(struct timer_list *work) { struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); /* @@ -981,7 +984,7 @@ static void mix_interrupt_randomness(struct work_struct *work) local_irq_enable(); mix_pool_bytes(pool, sizeof(pool)); - credit_init_bits(max(1u, (count & U16_MAX) / 64)); + credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8)); memzero_explicit(pool, sizeof(pool)); } @@ -1004,10 +1007,11 @@ void add_interrupt_randomness(int irq) if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) return; - if (unlikely(!fast_pool->mix.func)) - INIT_WORK(&fast_pool->mix, mix_interrupt_randomness); fast_pool->count |= MIX_INFLIGHT; - queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix); + if (!timer_pending(&fast_pool->mix)) { + fast_pool->mix.expires = jiffies; + add_timer_on(&fast_pool->mix, raw_smp_processor_id()); + } } EXPORT_SYMBOL_GPL(add_interrupt_randomness); @@ -1299,6 +1303,11 @@ static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) { int ret; + if (!crng_ready() && + ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) || + (kiocb->ki_filp->f_flags & O_NONBLOCK))) + return -EAGAIN; + ret = wait_for_random_bytes(); if (ret != 0) return ret; diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index 1b18ce5ebab1e..0913d3eb8d518 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -90,16 +90,21 @@ int tpm_read_log_acpi(struct tpm_chip *chip) return -ENODEV; if (tbl->header.length < - sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) + sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) { + acpi_put_table((struct acpi_table_header *)tbl); return -ENODEV; + } tpm2_phy = (void *)tbl + sizeof(*tbl); len = tpm2_phy->log_area_minimum_length; start = tpm2_phy->log_area_start_address; - if (!start || !len) + if (!start || !len) { + acpi_put_table((struct acpi_table_header *)tbl); return -ENODEV; + } + acpi_put_table((struct acpi_table_header *)tbl); format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; } else { /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ @@ -120,8 +125,10 @@ int tpm_read_log_acpi(struct tpm_chip *chip) break; } + acpi_put_table((struct acpi_table_header *)buff); format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; } + if (!len) { dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__); return -EIO; @@ -156,5 +163,4 @@ int tpm_read_log_acpi(struct tpm_chip *chip) kfree(log->bios_event_log); log->bios_event_log = NULL; return ret; - } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 1621ce8187052..d69905233aff2 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -401,13 +401,14 @@ int tpm_pm_suspend(struct device *dev) !pm_suspend_via_firmware()) goto suspended; - if (!tpm_chip_start(chip)) { + rc = tpm_try_get_ops(chip); + if (!rc) { if (chip->flags & TPM_CHIP_FLAG_TPM2) tpm2_shutdown(chip, TPM2_SU_STATE); else rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); - tpm_chip_stop(chip); + tpm_put_ops(chip); } suspended: diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index a9dcf31eadd21..5fe52a6839b53 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev, iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, TPM2_TIMEOUT_C)) { - dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); + dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n"); return -ETIME; } @@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device) /* Should the FIFO driver handle this? */ sm = buf->start_method; - if (sm == ACPI_TPM2_MEMORY_MAPPED) - return -ENODEV; + if (sm == ACPI_TPM2_MEMORY_MAPPED) { + rc = -ENODEV; + goto out; + } priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + if (!priv) { + rc = -ENOMEM; + goto out; + } if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { @@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device) FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); - return -EINVAL; + rc = -EINVAL; + goto out; } crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); priv->smc_func_id = crb_smc->smc_func_id; @@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device) rc = crb_map_io(device, priv, buf); if (rc) - return rc; + goto out; chip = tpmm_chip_alloc(dev, &tpm_crb); - if (IS_ERR(chip)) - return PTR_ERR(chip); + if (IS_ERR(chip)) { + rc = PTR_ERR(chip); + goto out; + } dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; - return tpm_chip_register(chip); + rc = tpm_chip_register(chip); + +out: + acpi_put_table((struct acpi_table_header *)buf); + return rc; } static int crb_acpi_remove(struct acpi_device *device) diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 6e3235565a4d8..d9daaafdd295c 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -397,7 +397,13 @@ static int __init ftpm_mod_init(void) if (rc) return rc; - return driver_register(&ftpm_tee_driver.driver); + rc = driver_register(&ftpm_tee_driver.driver); + if (rc) { + platform_driver_unregister(&ftpm_tee_plat_driver); + return rc; + } + + return 0; } static void __exit ftpm_mod_exit(void) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 4ed6e660273a4..14fad16d371fb 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -125,6 +125,7 @@ static int check_acpi_tpm2(struct device *dev) const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev); struct acpi_table_tpm2 *tbl; acpi_status st; + int ret = 0; if (!aid || aid->driver_data != DEVICE_IS_TPM2) return 0; @@ -132,8 +133,7 @@ static int check_acpi_tpm2(struct device *dev) /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2 * table is mandatory */ - st = - acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); + st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; @@ -141,9 +141,10 @@ static int check_acpi_tpm2(struct device *dev) /* The tpm2_crb driver handles this device */ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) - return -ENODEV; + ret = -ENODEV; - return 0; + acpi_put_table((struct acpi_table_header *)tbl); + return ret; } #else static int check_acpi_tpm2(struct device *dev) diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c index 2c3d8e6ca63ca..7cc20c0f88655 100644 --- a/drivers/clk/at91/at91rm9200.c +++ b/drivers/clk/at91/at91rm9200.c @@ -38,7 +38,7 @@ static const struct clk_pll_characteristics rm9200_pll_characteristics = { }; static const struct sck at91rm9200_systemck[] = { - { .n = "udpck", .p = "usbck", .id = 2 }, + { .n = "udpck", .p = "usbck", .id = 1 }, { .n = "uhpck", .p = "usbck", .id = 4 }, { .n = "pck0", .p = "prog0", .id = 8 }, { .n = "pck1", .p = "prog1", .id = 9 }, diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c index 4062092d67f90..a6642f3d33d44 100644 --- a/drivers/clk/baikal-t1/ccu-div.c +++ b/drivers/clk/baikal-t1/ccu-div.c @@ -34,6 +34,7 @@ #define CCU_DIV_CTL_CLKDIV_MASK(_width) \ GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD) #define CCU_DIV_CTL_LOCK_SHIFTED BIT(27) +#define CCU_DIV_CTL_GATE_REF_BUF BIT(28) #define CCU_DIV_CTL_LOCK_NORMAL BIT(31) #define CCU_DIV_RST_DELAY_US 1 @@ -170,6 +171,40 @@ static int ccu_div_gate_is_enabled(struct clk_hw *hw) return !!(val & CCU_DIV_CTL_EN); } +static int ccu_div_buf_enable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_GATE_REF_BUF, 0); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static void ccu_div_buf_disable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF); + spin_unlock_irqrestore(&div->lock, flags); +} + +static int ccu_div_buf_is_enabled(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + u32 val = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &val); + + return !(val & CCU_DIV_CTL_GATE_REF_BUF); +} + static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -323,6 +358,7 @@ static const struct ccu_div_dbgfs_bit ccu_div_bits[] = { CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN), CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST), CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV), + CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF), CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL) }; @@ -441,6 +477,9 @@ static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry) continue; } + if (!strcmp("div_buf", name)) + continue; + bits[didx] = ccu_div_bits[bidx]; bits[didx].div = div; @@ -477,6 +516,21 @@ static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry) &ccu_div_dbgfs_fixed_clkdiv_fops); } +static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + struct ccu_div_dbgfs_bit *bit; + + bit = kmalloc(sizeof(*bit), GFP_KERNEL); + if (!bit) + return; + + *bit = ccu_div_bits[3]; + bit->div = div; + debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit, + &ccu_div_dbgfs_bit_fops); +} + static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) { struct ccu_div *div = to_ccu_div(hw); @@ -489,6 +543,7 @@ static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) #define ccu_div_var_debug_init NULL #define ccu_div_gate_debug_init NULL +#define ccu_div_buf_debug_init NULL #define ccu_div_fixed_debug_init NULL #endif /* !CONFIG_DEBUG_FS */ @@ -520,6 +575,13 @@ static const struct clk_ops ccu_div_gate_ops = { .debug_init = ccu_div_gate_debug_init }; +static const struct clk_ops ccu_div_buf_ops = { + .enable = ccu_div_buf_enable, + .disable = ccu_div_buf_disable, + .is_enabled = ccu_div_buf_is_enabled, + .debug_init = ccu_div_buf_debug_init +}; + static const struct clk_ops ccu_div_fixed_ops = { .recalc_rate = ccu_div_fixed_recalc_rate, .round_rate = ccu_div_fixed_round_rate, @@ -566,6 +628,8 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) } else if (div_init->type == CCU_DIV_GATE) { hw_init.ops = &ccu_div_gate_ops; div->divider = div_init->divider; + } else if (div_init->type == CCU_DIV_BUF) { + hw_init.ops = &ccu_div_buf_ops; } else if (div_init->type == CCU_DIV_FIXED) { hw_init.ops = &ccu_div_fixed_ops; div->divider = div_init->divider; @@ -579,6 +643,7 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) goto err_free_div; } parent_data.fw_name = div_init->parent_name; + parent_data.name = div_init->parent_name; hw_init.parent_data = &parent_data; hw_init.num_parents = 1; diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h index 795665caefbdc..4eb49ff4803c6 100644 --- a/drivers/clk/baikal-t1/ccu-div.h +++ b/drivers/clk/baikal-t1/ccu-div.h @@ -13,6 +13,14 @@ #include #include +/* + * CCU Divider private clock IDs + * @CCU_SYS_SATA_CLK: CCU SATA internal clock + * @CCU_SYS_XGMAC_CLK: CCU XGMAC internal clock + */ +#define CCU_SYS_SATA_CLK -1 +#define CCU_SYS_XGMAC_CLK -2 + /* * CCU Divider private flags * @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1. @@ -31,11 +39,13 @@ * enum ccu_div_type - CCU Divider types * @CCU_DIV_VAR: Clocks gate with variable divider. * @CCU_DIV_GATE: Clocks gate with fixed divider. + * @CCU_DIV_BUF: Clock gate with no divider. * @CCU_DIV_FIXED: Ungateable clock with fixed divider. */ enum ccu_div_type { CCU_DIV_VAR, CCU_DIV_GATE, + CCU_DIV_BUF, CCU_DIV_FIXED }; diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c index f141fda12b09a..90f4fda406ee6 100644 --- a/drivers/clk/baikal-t1/clk-ccu-div.c +++ b/drivers/clk/baikal-t1/clk-ccu-div.c @@ -76,6 +76,16 @@ .divider = _divider \ } +#define CCU_DIV_BUF_INFO(_id, _name, _pname, _base, _flags) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .type = CCU_DIV_BUF, \ + .flags = _flags \ + } + #define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \ { \ .id = _id, \ @@ -188,11 +198,14 @@ static const struct ccu_div_rst_map axi_rst_map[] = { * for the SoC devices registers IO-operations. */ static const struct ccu_div_info sys_info[] = { - CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk", + CCU_DIV_VAR_INFO(CCU_SYS_SATA_CLK, "sys_sata_clk", "sata_clk", CCU_SYS_SATA_REF_BASE, 4, CLK_SET_RATE_GATE, CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED | CCU_DIV_RESET_DOMAIN), + CCU_DIV_BUF_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk", + "sys_sata_clk", CCU_SYS_SATA_REF_BASE, + CLK_SET_RATE_PARENT), CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk", "pcie_clk", CCU_SYS_APB_BASE, 5, CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN), @@ -204,10 +217,12 @@ static const struct ccu_div_info sys_info[] = { "eth_clk", CCU_SYS_GMAC1_BASE, 5), CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk", "eth_clk", 10), - CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk", - "eth_clk", CCU_SYS_XGMAC_BASE, 8), + CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_CLK, "sys_xgmac_clk", + "eth_clk", CCU_SYS_XGMAC_BASE, 1), + CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk", + "sys_xgmac_clk", 8), CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk", - "eth_clk", 10), + "sys_xgmac_clk", 8), CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk", "eth_clk", CCU_SYS_USB_BASE, 10), CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk", @@ -396,6 +411,9 @@ static int ccu_div_clk_register(struct ccu_div_data *data) init.base = info->base; init.sys_regs = data->sys_regs; init.divider = info->divider; + } else if (init.type == CCU_DIV_BUF) { + init.base = info->base; + init.sys_regs = data->sys_regs; } else { init.divider = info->divider; } diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 178886823b90c..b7f89873fcf54 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -968,9 +968,9 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw, return div; } -static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock, - unsigned long parent_rate, - u32 div) +static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock, + unsigned long parent_rate, + u32 div) { const struct bcm2835_clock_data *data = clock->data; u64 temp; @@ -1786,7 +1786,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .load_mask = CM_PLLC_LOADPER, .hold_mask = CM_PLLC_HOLDPER, .fixed_divider = 1, - .flags = CLK_SET_RATE_PARENT), + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT), /* * PLLD is the display PLL, used to drive DSI display panels. diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c index 274441e2ddb28..8f0619f362e3b 100644 --- a/drivers/clk/bcm/clk-iproc-pll.c +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -736,6 +736,7 @@ void iproc_pll_clk_setup(struct device_node *node, const char *parent_name; struct iproc_clk *iclk_array; struct clk_hw_onecell_data *clk_data; + const char *clk_name; if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl)) return; @@ -783,7 +784,12 @@ void iproc_pll_clk_setup(struct device_node *node, iclk = &iclk_array[0]; iclk->pll = pll; - init.name = node->name; + ret = of_property_read_string_index(node, "clock-output-names", + 0, &clk_name); + if (WARN_ON(ret)) + goto err_pll_register; + + init.name = clk_name; init.ops = &iproc_pll_ops; init.flags = 0; parent_name = of_clk_get_parent_name(node, 0); @@ -803,13 +809,11 @@ void iproc_pll_clk_setup(struct device_node *node, goto err_pll_register; clk_data->hws[0] = &iclk->hw; + parent_name = clk_name; /* now initialize and register all leaf clocks */ for (i = 1; i < num_clks; i++) { - const char *clk_name; - memset(&init, 0, sizeof(init)); - parent_name = node->name; ret = of_property_read_string_index(node, "clock-output-names", i, &clk_name); diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c index bccdfa00fd373..67a9edbba29c4 100644 --- a/drivers/clk/berlin/bg2.c +++ b/drivers/clk/berlin/bg2.c @@ -500,12 +500,15 @@ static void __init berlin2_clock_setup(struct device_node *np) int n, ret; clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); - if (!clk_data) + if (!clk_data) { + of_node_put(parent_np); return; + } clk_data->num = MAX_CLKS; hws = clk_data->hws; gbase = of_iomap(parent_np, 0); + of_node_put(parent_np); if (!gbase) return; diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c index e9518d35f262e..dd2784bb75b64 100644 --- a/drivers/clk/berlin/bg2q.c +++ b/drivers/clk/berlin/bg2q.c @@ -286,19 +286,23 @@ static void __init berlin2q_clock_setup(struct device_node *np) int n, ret; clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); - if (!clk_data) + if (!clk_data) { + of_node_put(parent_np); return; + } clk_data->num = MAX_CLKS; hws = clk_data->hws; gbase = of_iomap(parent_np, 0); if (!gbase) { + of_node_put(parent_np); pr_err("%pOF: Unable to map global base\n", np); return; } /* BG2Q CPU PLL is not part of global registers */ cpupll_base = of_iomap(parent_np, 1); + of_node_put(parent_np); if (!cpupll_base) { pr_err("%pOF: Unable to map cpupll base\n", np); iounmap(gbase); diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index 24dab2312bc6f..9c3305bcb27ae 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -622,7 +622,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev) regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */ /* P-Bus (BCLK) clock divider */ - hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0, + hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0, ast2600_div_table, &aspeed_g6_clk_lock); diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index f9d5b73343417..4fb4fd4b06bda 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -4,42 +4,101 @@ #include #include +struct devm_clk_state { + struct clk *clk; + void (*exit)(struct clk *clk); +}; + static void devm_clk_release(struct device *dev, void *res) { - clk_put(*(struct clk **)res); + struct devm_clk_state *state = res; + + if (state->exit) + state->exit(state->clk); + + clk_put(state->clk); } -struct clk *devm_clk_get(struct device *dev, const char *id) +static struct clk *__devm_clk_get(struct device *dev, const char *id, + struct clk *(*get)(struct device *dev, const char *id), + int (*init)(struct clk *clk), + void (*exit)(struct clk *clk)) { - struct clk **ptr, *clk; + struct devm_clk_state *state; + struct clk *clk; + int ret; - ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) + state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL); + if (!state) return ERR_PTR(-ENOMEM); - clk = clk_get(dev, id); - if (!IS_ERR(clk)) { - *ptr = clk; - devres_add(dev, ptr); - } else { - devres_free(ptr); + clk = get(dev, id); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto err_clk_get; } + if (init) { + ret = init(clk); + if (ret) + goto err_clk_init; + } + + state->clk = clk; + state->exit = exit; + + devres_add(dev, state); + return clk; + +err_clk_init: + + clk_put(clk); +err_clk_get: + + devres_free(state); + return ERR_PTR(ret); +} + +struct clk *devm_clk_get(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get, NULL, NULL); } EXPORT_SYMBOL(devm_clk_get); -struct clk *devm_clk_get_optional(struct device *dev, const char *id) +struct clk *devm_clk_get_prepared(struct device *dev, const char *id) { - struct clk *clk = devm_clk_get(dev, id); + return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare); +} +EXPORT_SYMBOL_GPL(devm_clk_get_prepared); - if (clk == ERR_PTR(-ENOENT)) - return NULL; +struct clk *devm_clk_get_enabled(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get, + clk_prepare_enable, clk_disable_unprepare); +} +EXPORT_SYMBOL_GPL(devm_clk_get_enabled); - return clk; +struct clk *devm_clk_get_optional(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL); } EXPORT_SYMBOL(devm_clk_get_optional); +struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get_optional, + clk_prepare, clk_unprepare); +} +EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared); + +struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get_optional, + clk_prepare_enable, clk_disable_unprepare); +} +EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled); + struct clk_bulk_devres { struct clk_bulk_data *clks; int num_clks; diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c index 78d5ea669fea7..2fe36f579ac5e 100644 --- a/drivers/clk/clk-oxnas.c +++ b/drivers/clk/clk-oxnas.c @@ -207,7 +207,7 @@ static const struct of_device_id oxnas_stdclk_dt_ids[] = { static int oxnas_stdclk_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct device_node *np = pdev->dev.of_node, *parent_np; const struct oxnas_stdclk_data *data; const struct of_device_id *id; struct regmap *regmap; @@ -219,7 +219,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev) return -ENODEV; data = id->data; - regmap = syscon_node_to_regmap(of_get_parent(np)); + parent_np = of_get_parent(np); + regmap = syscon_node_to_regmap(parent_np); + of_node_put(parent_np); if (IS_ERR(regmap)) { dev_err(&pdev->dev, "failed to have parent regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 46101c6a20f26..585b9ac118818 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -1038,8 +1038,13 @@ static void __init _clockgen_init(struct device_node *np, bool legacy); */ static void __init legacy_init_clockgen(struct device_node *np) { - if (!clockgen.node) - _clockgen_init(of_get_parent(np), true); + if (!clockgen.node) { + struct device_node *parent_np; + + parent_np = of_get_parent(np); + _clockgen_init(parent_np, true); + of_node_put(parent_np); + } } /* Legacy node */ @@ -1134,6 +1139,7 @@ static struct clk * __init create_sysclk(const char *name) sysclk = of_get_child_by_name(clockgen.node, "sysclk"); if (sysclk) { clk = sysclk_from_fixed(sysclk, name); + of_node_put(sysclk); if (!IS_ERR(clk)) return clk; } diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index 4e741f94baf02..eb597ea7bb87b 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -1116,7 +1116,7 @@ static const struct vc5_chip_info idt_5p49v6901_info = { .model = IDT_VC6_5P49V6901, .clk_fod_cnt = 4, .clk_out_cnt = 5, - .flags = VC5_HAS_PFD_FREQ_DBL, + .flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT, }; static const struct vc5_chip_info idt_5p49v6965_info = { diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c index fc1bd23d45834..598f3cf4eba49 100644 --- a/drivers/clk/imx/clk-imx6sx.c +++ b/drivers/clk/imx/clk-imx6sx.c @@ -280,13 +280,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels)); hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels)); hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); - hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels)); hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels)); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index db122d94db583..8a49e072d6e86 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -105,27 +105,27 @@ static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out "sys_pll3_out", "clk_ext4", }; static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext2", "clk_ext3", }; static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext2", "clk_ext3", }; static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 36e8d619e3348..72592e35836b3 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -17,6 +17,7 @@ static u32 share_count_nand; static u32 share_count_media; +static u32 share_count_usb; static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", }; static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", }; @@ -362,7 +363,7 @@ static const char * const imx8mp_media_mipi_phy1_ref_sels[] = {"osc_24m", "sys_p "clk_ext2", "audio_pll2_out", "video_pll1_out", }; -static const char * const imx8mp_media_disp1_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", +static const char * const imx8mp_media_disp_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", }; @@ -411,6 +412,11 @@ static const char * const imx8mp_sai7_sels[] = {"osc_24m", "audio_pll1_out", "au static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; +static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out", + "dummy", "dummy", "gpu_pll_out", "vpu_pll_out", + "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3", + "dummy", "dummy", "osc_24m", "dummy", "osc_32k"}; + static struct clk_hw **hws; static struct clk_hw_onecell_data *clk_hw_data; @@ -532,6 +538,15 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2); hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1); + hws[IMX8MP_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", anatop_base + 0x128, 4, 4, + imx8mp_clkout_sels, ARRAY_SIZE(imx8mp_clkout_sels)); + hws[IMX8MP_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", anatop_base + 0x128, 0, 4); + hws[IMX8MP_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", anatop_base + 0x128, 8); + hws[IMX8MP_CLK_CLKOUT2_SEL] = imx_clk_hw_mux2("clkout2_sel", anatop_base + 0x128, 20, 4, + imx8mp_clkout_sels, ARRAY_SIZE(imx8mp_clkout_sels)); + hws[IMX8MP_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", anatop_base + 0x128, 16, 4); + hws[IMX8MP_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", anatop_base + 0x128, 24); + hws[IMX8MP_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mp_a53_sels, ccm_base + 0x8000); hws[IMX8MP_CLK_A53_SRC] = hws[IMX8MP_CLK_A53_DIV]; hws[IMX8MP_CLK_A53_CG] = hws[IMX8MP_CLK_A53_DIV]; @@ -566,6 +581,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000); hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100); hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200); + hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300); hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1); hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1); @@ -630,7 +646,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80); hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00); hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80); - hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp1_pix_sels, ccm_base + 0xbe00); + hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00); hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80); hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00); hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80); @@ -691,7 +707,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0); hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0); hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0); - hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0); + hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate2_shared2("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0, &share_count_usb); + hws[IMX8MP_CLK_USB_SUSP] = imx_clk_hw_gate2_shared2("usb_suspend_clk", "osc_32k", ccm_base + 0x44d0, 0, &share_count_usb); hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0); hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0); hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0); diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c index 9382dc3aa27e6..1999c114f4656 100644 --- a/drivers/clk/ingenic/tcu.c +++ b/drivers/clk/ingenic/tcu.c @@ -100,15 +100,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw) bool enabled = false; /* - * If the SoC has no global TCU clock, we must ungate the channel's - * clock to be able to access its registers. - * If we have a TCU clock, it will be enabled automatically as it has - * been attached to the regmap. + * According to the programming manual, a timer channel's registers can + * only be accessed when the channel's stop bit is clear. */ - if (!tcu->clk) { - enabled = !!ingenic_tcu_is_enabled(hw); - regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); - } + enabled = !!ingenic_tcu_is_enabled(hw); + regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); return enabled; } @@ -119,8 +115,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw) const struct ingenic_tcu_clk_info *info = tcu_clk->info; struct ingenic_tcu *tcu = tcu_clk->tcu; - if (!tcu->clk) - regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); + regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); } static u8 ingenic_tcu_get_parent(struct clk_hw *hw) diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c index 37b4162c58820..3a33014eee7f7 100644 --- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c @@ -18,9 +18,9 @@ static const struct mtk_gate_regs mfg_cg_regs = { .sta_ofs = 0x0, }; -#define GATE_MFG(_id, _name, _parent, _shift) \ - GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \ - &mtk_clk_gate_ops_setclr) +#define GATE_MFG(_id, _name, _parent, _shift) \ + GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT) static const struct mtk_gate mfg_clks[] = { GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0) diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c index 3a6d84cd66012..67d8a0d30221c 100644 --- a/drivers/clk/meson/meson-aoclk.c +++ b/drivers/clk/meson/meson-aoclk.c @@ -36,6 +36,7 @@ int meson_aoclkc_probe(struct platform_device *pdev) struct meson_aoclk_reset_controller *rstc; struct meson_aoclk_data *data; struct device *dev = &pdev->dev; + struct device_node *np; struct regmap *regmap; int ret, clkid; @@ -47,7 +48,9 @@ int meson_aoclkc_probe(struct platform_device *pdev) if (!rstc) return -ENOMEM; - regmap = syscon_node_to_regmap(of_get_parent(dev->of_node)); + np = of_get_parent(dev->of_node); + regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(regmap)) { dev_err(dev, "failed to get regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c index a7cb1e7aedc46..18ae387872687 100644 --- a/drivers/clk/meson/meson-eeclk.c +++ b/drivers/clk/meson/meson-eeclk.c @@ -17,6 +17,7 @@ int meson_eeclkc_probe(struct platform_device *pdev) { const struct meson_eeclkc_data *data; struct device *dev = &pdev->dev; + struct device_node *np; struct regmap *map; int ret, i; @@ -25,7 +26,9 @@ int meson_eeclkc_probe(struct platform_device *pdev) return -EINVAL; /* Get the hhi system controller node */ - map = syscon_node_to_regmap(of_get_parent(dev->of_node)); + np = of_get_parent(dev->of_node); + map = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(map)) { dev_err(dev, "failed to get HHI regmap\n"); diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 862f0756b50f0..1da9d212f8b77 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -3735,13 +3735,16 @@ static void __init meson8b_clkc_init_common(struct device_node *np, struct clk_hw_onecell_data *clk_hw_onecell_data) { struct meson8b_clk_reset *rstc; + struct device_node *parent_np; const char *notifier_clk_name; struct clk *notifier_clk; void __iomem *clk_base; struct regmap *map; int i, ret; - map = syscon_node_to_regmap(of_get_parent(np)); + parent_np = of_get_parent(np); + map = syscon_node_to_regmap(parent_np); + of_node_put(parent_np); if (IS_ERR(map)) { pr_info("failed to get HHI regmap - Trying obsolete regs\n"); diff --git a/drivers/clk/qcom/apss-ipq6018.c b/drivers/clk/qcom/apss-ipq6018.c index d78ff2f310bfa..b5d93657e1ee3 100644 --- a/drivers/clk/qcom/apss-ipq6018.c +++ b/drivers/clk/qcom/apss-ipq6018.c @@ -57,7 +57,7 @@ static struct clk_branch apcs_alias0_core_clk = { .parent_hws = (const struct clk_hw *[]){ &apcs_alias0_clk_src.clkr.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c index 90046428693c2..e74fc81a14d00 100644 --- a/drivers/clk/qcom/clk-krait.c +++ b/drivers/clk/qcom/clk-krait.c @@ -98,6 +98,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate, if (d->lpl) mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift; + else + mask <<= d->shift; spin_lock_irqsave(&krait_clock_reg_lock, flags); val = krait_get_l2_indirect_reg(d->offset); diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c index ab594a0f0c408..7ec11acc82984 100644 --- a/drivers/clk/qcom/gcc-sm8250.c +++ b/drivers/clk/qcom/gcc-sm8250.c @@ -3268,7 +3268,7 @@ static struct gdsc usb30_prim_gdsc = { .pd = { .name = "usb30_prim_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc usb30_sec_gdsc = { @@ -3276,7 +3276,7 @@ static struct gdsc usb30_sec_gdsc = { .pd = { .name = "usb30_sec_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index 245150a5484a2..285f6ac25372d 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -386,7 +386,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, int error; int index; - while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, + while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++, &clkspec)) { if (clkspec.np != pd->dev.of_node) continue; @@ -399,7 +399,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, if (error) return error; } - i++; } return 0; diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index bbbf9ce428672..d0bd513ff3c33 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -981,6 +981,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, return mux_clk; err_pll: + kfree(pll->rate_table); clk_unregister(mux_clk); mux_clk = pll_clk; err_mux: diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index ac70ad785d8ed..33df20f813d59 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -1390,6 +1390,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, if (ret) { pr_err("%s: failed to register pll clock %s : %d\n", __func__, pll_clk->name, ret); + kfree(pll->rate_table); kfree(pll); return; } diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index cf94a12459ea4..ee2a2d284113c 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -174,21 +174,24 @@ void __init socfpga_gate_init(struct device_node *node) u32 div_reg[3]; u32 clk_phase[2]; u32 fixed_div; - struct clk *clk; + struct clk_hw *hw_clk; struct socfpga_gate_clk *socfpga_clk; const char *clk_name = node->name; const char *parent_name[SOCFPGA_MAX_PARENTS]; struct clk_init_data init; struct clk_ops *ops; int rc; + int err; socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); if (WARN_ON(!socfpga_clk)) return; ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL); - if (WARN_ON(!ops)) + if (WARN_ON(!ops)) { + kfree(socfpga_clk); return; + } rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); if (rc) @@ -238,12 +241,15 @@ void __init socfpga_gate_init(struct device_node *node) init.parent_names = parent_name; socfpga_clk->hw.hw.init = &init; - clk = clk_register(NULL, &socfpga_clk->hw.hw); - if (WARN_ON(IS_ERR(clk))) { + hw_clk = &socfpga_clk->hw.hw; + + err = clk_hw_register(NULL, hw_clk); + if (err) { + kfree(ops); kfree(socfpga_clk); return; } - rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); + rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk); if (WARN_ON(rc)) return; } diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c index 5e0c4b45f77f7..43707e2d72484 100644 --- a/drivers/clk/socfpga/clk-periph.c +++ b/drivers/clk/socfpga/clk-periph.c @@ -51,7 +51,7 @@ static __init void __socfpga_periph_init(struct device_node *node, const struct clk_ops *ops) { u32 reg; - struct clk *clk; + struct clk_hw *hw_clk; struct socfpga_periph_clk *periph_clk; const char *clk_name = node->name; const char *parent_name[SOCFPGA_MAX_PARENTS]; @@ -94,13 +94,13 @@ static __init void __socfpga_periph_init(struct device_node *node, init.parent_names = parent_name; periph_clk->hw.hw.init = &init; + hw_clk = &periph_clk->hw.hw; - clk = clk_register(NULL, &periph_clk->hw.hw); - if (WARN_ON(IS_ERR(clk))) { + if (clk_hw_register(NULL, hw_clk)) { kfree(periph_clk); return; } - rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); + rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk); } void __init socfpga_periph_init(struct device_node *node) diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index e5fb786843f39..dcb573d440346 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -70,17 +70,18 @@ static const struct clk_ops clk_pll_ops = { .get_parent = clk_pll_get_parent, }; -static __init struct clk *__socfpga_pll_init(struct device_node *node, +static __init struct clk_hw *__socfpga_pll_init(struct device_node *node, const struct clk_ops *ops) { u32 reg; - struct clk *clk; + struct clk_hw *hw_clk; struct socfpga_pll *pll_clk; const char *clk_name = node->name; const char *parent_name[SOCFPGA_MAX_PARENTS]; struct clk_init_data init; struct device_node *clkmgr_np; int rc; + int err; of_property_read_u32(node, "reg", ®); @@ -106,13 +107,15 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; - clk = clk_register(NULL, &pll_clk->hw.hw); - if (WARN_ON(IS_ERR(clk))) { + hw_clk = &pll_clk->hw.hw; + + err = clk_hw_register(NULL, hw_clk); + if (err) { kfree(pll_clk); - return NULL; + return ERR_PTR(err); } - rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); - return clk; + rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk); + return hw_clk; } void __init socfpga_pll_init(struct device_node *node) diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c index d620bbbcdfc88..ce81e4087a8fc 100644 --- a/drivers/clk/sprd/common.c +++ b/drivers/clk/sprd/common.c @@ -41,7 +41,7 @@ int sprd_clk_regmap_init(struct platform_device *pdev, { void __iomem *base; struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node; + struct device_node *node = dev->of_node, *np; struct regmap *regmap; if (of_find_property(node, "sprd,syscon", NULL)) { @@ -50,9 +50,10 @@ int sprd_clk_regmap_init(struct platform_device *pdev, pr_err("%s: failed to get syscon regmap\n", __func__); return PTR_ERR(regmap); } - } else if (of_device_is_compatible(of_get_parent(dev->of_node), - "syscon")) { - regmap = device_node_to_regmap(of_get_parent(dev->of_node)); + } else if (of_device_is_compatible(np = of_get_parent(node), "syscon") || + (of_node_put(np), 0)) { + regmap = device_node_to_regmap(np); + of_node_put(np); if (IS_ERR(regmap)) { dev_err(dev, "failed to get regmap from its parent.\n"); return PTR_ERR(regmap); diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index f1adc858b5907..0e58a7cda427f 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -942,9 +942,10 @@ static void __init st_of_quadfs_setup(struct device_node *np, clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, data, reg, lock); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + kfree(lock); goto err_exit; - else + } else pr_debug("%s: parent %s rate %u\n", __clk_get_name(clk), __clk_get_name(clk_get_parent(clk)), diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index bc9e47a4cb60a..4e2b26e3e5738 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -1317,6 +1317,7 @@ static void __init tegra114_clock_init(struct device_node *np) } pmc_base = of_iomap(node, 0); + of_node_put(node); if (!pmc_base) { pr_err("Can't map pmc registers\n"); WARN_ON(1); diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 3efc651b42e3a..d60ee6e318a55 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -1128,6 +1128,7 @@ static void __init tegra20_clock_init(struct device_node *np) } pmc_base = of_iomap(node, 0); + of_node_put(node); if (!pmc_base) { pr_err("Can't map pmc registers\n"); BUG(); diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 68cbb98af567d..1a0016d07f88c 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -3697,6 +3697,7 @@ static void __init tegra210_clock_init(struct device_node *np) } pmc_base = of_iomap(node, 0); + of_node_put(node); if (!pmc_base) { pr_err("Can't map pmc registers\n"); WARN_ON(1); diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c index cbf9922d93d4e..a38c921539793 100644 --- a/drivers/clk/ti/clk-44xx.c +++ b/drivers/clk/ti/clk-44xx.c @@ -56,7 +56,7 @@ static const struct omap_clkctrl_bit_data omap4_aess_bit_data[] __initconst = { }; static const char * const omap4_func_dmic_abe_gfclk_parents[] __initconst = { - "abe-clkctrl:0018:26", + "abe_cm:clk:0018:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -76,7 +76,7 @@ static const struct omap_clkctrl_bit_data omap4_dmic_bit_data[] __initconst = { }; static const char * const omap4_func_mcasp_abe_gfclk_parents[] __initconst = { - "abe-clkctrl:0020:26", + "abe_cm:clk:0020:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -89,7 +89,7 @@ static const struct omap_clkctrl_bit_data omap4_mcasp_bit_data[] __initconst = { }; static const char * const omap4_func_mcbsp1_gfclk_parents[] __initconst = { - "abe-clkctrl:0028:26", + "abe_cm:clk:0028:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -102,7 +102,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp1_bit_data[] __initconst = }; static const char * const omap4_func_mcbsp2_gfclk_parents[] __initconst = { - "abe-clkctrl:0030:26", + "abe_cm:clk:0030:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -115,7 +115,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp2_bit_data[] __initconst = }; static const char * const omap4_func_mcbsp3_gfclk_parents[] __initconst = { - "abe-clkctrl:0038:26", + "abe_cm:clk:0038:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -183,18 +183,18 @@ static const struct omap_clkctrl_bit_data omap4_timer8_bit_data[] __initconst = static const struct omap_clkctrl_reg_data omap4_abe_clkctrl_regs[] __initconst = { { OMAP4_L4_ABE_CLKCTRL, NULL, 0, "ocp_abe_iclk" }, - { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" }, + { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" }, { OMAP4_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" }, - { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" }, - { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe-clkctrl:0020:24" }, - { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" }, - { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" }, - { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" }, - { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0040:8" }, - { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" }, - { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" }, - { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" }, - { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" }, + { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" }, + { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe_cm:clk:0020:24" }, + { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" }, + { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" }, + { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" }, + { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0040:8" }, + { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" }, + { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" }, + { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" }, + { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" }, { OMAP4_WD_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, { 0 }, }; @@ -287,7 +287,7 @@ static const struct omap_clkctrl_bit_data omap4_fdif_bit_data[] __initconst = { static const struct omap_clkctrl_reg_data omap4_iss_clkctrl_regs[] __initconst = { { OMAP4_ISS_CLKCTRL, omap4_iss_bit_data, CLKF_SW_SUP, "ducati_clk_mux_ck" }, - { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss-clkctrl:0008:24" }, + { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss_cm:clk:0008:24" }, { 0 }, }; @@ -320,7 +320,7 @@ static const struct omap_clkctrl_bit_data omap4_dss_core_bit_data[] __initconst }; static const struct omap_clkctrl_reg_data omap4_l3_dss_clkctrl_regs[] __initconst = { - { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3-dss-clkctrl:0000:8" }, + { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3_dss_cm:clk:0000:8" }, { 0 }, }; @@ -336,7 +336,7 @@ static const struct omap_clkctrl_bit_data omap4_gpu_bit_data[] __initconst = { }; static const struct omap_clkctrl_reg_data omap4_l3_gfx_clkctrl_regs[] __initconst = { - { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3-gfx-clkctrl:0000:24" }, + { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3_gfx_cm:clk:0000:24" }, { 0 }, }; @@ -372,12 +372,12 @@ static const struct omap_clkctrl_bit_data omap4_hsi_bit_data[] __initconst = { }; static const char * const omap4_usb_host_hs_utmi_p1_clk_parents[] __initconst = { - "l3-init-clkctrl:0038:24", + "l3_init_cm:clk:0038:24", NULL, }; static const char * const omap4_usb_host_hs_utmi_p2_clk_parents[] __initconst = { - "l3-init-clkctrl:0038:25", + "l3_init_cm:clk:0038:25", NULL, }; @@ -418,7 +418,7 @@ static const struct omap_clkctrl_bit_data omap4_usb_host_hs_bit_data[] __initcon }; static const char * const omap4_usb_otg_hs_xclk_parents[] __initconst = { - "l3-init-clkctrl:0040:24", + "l3_init_cm:clk:0040:24", NULL, }; @@ -452,14 +452,14 @@ static const struct omap_clkctrl_bit_data omap4_ocp2scp_usb_phy_bit_data[] __ini }; static const struct omap_clkctrl_reg_data omap4_l3_init_clkctrl_regs[] __initconst = { - { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0008:24" }, - { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0010:24" }, - { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:0018:24" }, + { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0008:24" }, + { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0010:24" }, + { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:0018:24" }, { OMAP4_USB_HOST_HS_CLKCTRL, omap4_usb_host_hs_bit_data, CLKF_SW_SUP, "init_60m_fclk" }, { OMAP4_USB_OTG_HS_CLKCTRL, omap4_usb_otg_hs_bit_data, CLKF_HW_SUP, "l3_div_ck" }, { OMAP4_USB_TLL_HS_CLKCTRL, omap4_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_div_ck" }, { OMAP4_USB_HOST_FS_CLKCTRL, NULL, CLKF_SW_SUP, "func_48mc_fclk" }, - { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:00c0:8" }, + { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:00c0:8" }, { 0 }, }; @@ -530,7 +530,7 @@ static const struct omap_clkctrl_bit_data omap4_gpio6_bit_data[] __initconst = { }; static const char * const omap4_per_mcbsp4_gfclk_parents[] __initconst = { - "l4-per-clkctrl:00c0:26", + "l4_per_cm:clk:00c0:26", "pad_clks_ck", NULL, }; @@ -570,12 +570,12 @@ static const struct omap_clkctrl_bit_data omap4_slimbus2_bit_data[] __initconst }; static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initconst = { - { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0008:24" }, - { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0010:24" }, - { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0018:24" }, - { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0020:24" }, - { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0028:24" }, - { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0030:24" }, + { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0008:24" }, + { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0010:24" }, + { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0018:24" }, + { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0020:24" }, + { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0028:24" }, + { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0030:24" }, { OMAP4_ELM_CLKCTRL, NULL, 0, "l4_div_ck" }, { OMAP4_GPIO2_CLKCTRL, omap4_gpio2_bit_data, CLKF_HW_SUP, "l4_div_ck" }, { OMAP4_GPIO3_CLKCTRL, omap4_gpio3_bit_data, CLKF_HW_SUP, "l4_div_ck" }, @@ -588,14 +588,14 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons { OMAP4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, { OMAP4_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, { OMAP4_L4_PER_CLKCTRL, NULL, 0, "l4_div_ck" }, - { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:00c0:24" }, + { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:00c0:24" }, { OMAP4_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { OMAP4_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { OMAP4_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { OMAP4_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { OMAP4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { OMAP4_MMC4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, - { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0118:8" }, + { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0118:8" }, { OMAP4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { OMAP4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { OMAP4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, @@ -630,7 +630,7 @@ static const struct omap_clkctrl_reg_data omap4_l4_wkup_clkctrl_regs[] __initcon { OMAP4_L4_WKUP_CLKCTRL, NULL, 0, "l4_wkup_clk_mux_ck" }, { OMAP4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, { OMAP4_GPIO1_CLKCTRL, omap4_gpio1_bit_data, CLKF_HW_SUP, "l4_wkup_clk_mux_ck" }, - { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4-wkup-clkctrl:0020:24" }, + { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0020:24" }, { OMAP4_COUNTER_32K_CLKCTRL, NULL, 0, "sys_32k_ck" }, { OMAP4_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, { 0 }, @@ -644,7 +644,7 @@ static const char * const omap4_pmd_stm_clock_mux_ck_parents[] __initconst = { }; static const char * const omap4_trace_clk_div_div_ck_parents[] __initconst = { - "emu-sys-clkctrl:0000:22", + "emu_sys_cm:clk:0000:22", NULL, }; @@ -662,7 +662,7 @@ static const struct omap_clkctrl_div_data omap4_trace_clk_div_div_ck_data __init }; static const char * const omap4_stm_clk_div_ck_parents[] __initconst = { - "emu-sys-clkctrl:0000:20", + "emu_sys_cm:clk:0000:20", NULL, }; @@ -716,73 +716,73 @@ static struct ti_dt_clk omap44xx_clks[] = { * hwmod support. Once hwmod is removed, these can be removed * also. */ - DT_CLK(NULL, "aess_fclk", "abe-clkctrl:0008:24"), - DT_CLK(NULL, "cm2_dm10_mux", "l4-per-clkctrl:0008:24"), - DT_CLK(NULL, "cm2_dm11_mux", "l4-per-clkctrl:0010:24"), - DT_CLK(NULL, "cm2_dm2_mux", "l4-per-clkctrl:0018:24"), - DT_CLK(NULL, "cm2_dm3_mux", "l4-per-clkctrl:0020:24"), - DT_CLK(NULL, "cm2_dm4_mux", "l4-per-clkctrl:0028:24"), - DT_CLK(NULL, "cm2_dm9_mux", "l4-per-clkctrl:0030:24"), - DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"), - DT_CLK(NULL, "dmt1_clk_mux", "l4-wkup-clkctrl:0020:24"), - DT_CLK(NULL, "dss_48mhz_clk", "l3-dss-clkctrl:0000:9"), - DT_CLK(NULL, "dss_dss_clk", "l3-dss-clkctrl:0000:8"), - DT_CLK(NULL, "dss_sys_clk", "l3-dss-clkctrl:0000:10"), - DT_CLK(NULL, "dss_tv_clk", "l3-dss-clkctrl:0000:11"), - DT_CLK(NULL, "fdif_fck", "iss-clkctrl:0008:24"), - DT_CLK(NULL, "func_dmic_abe_gfclk", "abe-clkctrl:0018:24"), - DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe-clkctrl:0020:24"), - DT_CLK(NULL, "func_mcbsp1_gfclk", "abe-clkctrl:0028:24"), - DT_CLK(NULL, "func_mcbsp2_gfclk", "abe-clkctrl:0030:24"), - DT_CLK(NULL, "func_mcbsp3_gfclk", "abe-clkctrl:0038:24"), - DT_CLK(NULL, "gpio1_dbclk", "l4-wkup-clkctrl:0018:8"), - DT_CLK(NULL, "gpio2_dbclk", "l4-per-clkctrl:0040:8"), - DT_CLK(NULL, "gpio3_dbclk", "l4-per-clkctrl:0048:8"), - DT_CLK(NULL, "gpio4_dbclk", "l4-per-clkctrl:0050:8"), - DT_CLK(NULL, "gpio5_dbclk", "l4-per-clkctrl:0058:8"), - DT_CLK(NULL, "gpio6_dbclk", "l4-per-clkctrl:0060:8"), - DT_CLK(NULL, "hsi_fck", "l3-init-clkctrl:0018:24"), - DT_CLK(NULL, "hsmmc1_fclk", "l3-init-clkctrl:0008:24"), - DT_CLK(NULL, "hsmmc2_fclk", "l3-init-clkctrl:0010:24"), - DT_CLK(NULL, "iss_ctrlclk", "iss-clkctrl:0000:8"), - DT_CLK(NULL, "mcasp_sync_mux_ck", "abe-clkctrl:0020:26"), - DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"), - DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"), - DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"), - DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"), - DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"), - DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"), - DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"), - DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"), - DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"), - DT_CLK(NULL, "sgx_clk_mux", "l3-gfx-clkctrl:0000:24"), - DT_CLK(NULL, "slimbus1_fclk_0", "abe-clkctrl:0040:8"), - DT_CLK(NULL, "slimbus1_fclk_1", "abe-clkctrl:0040:9"), - DT_CLK(NULL, "slimbus1_fclk_2", "abe-clkctrl:0040:10"), - DT_CLK(NULL, "slimbus1_slimbus_clk", "abe-clkctrl:0040:11"), - DT_CLK(NULL, "slimbus2_fclk_0", "l4-per-clkctrl:0118:8"), - DT_CLK(NULL, "slimbus2_fclk_1", "l4-per-clkctrl:0118:9"), - DT_CLK(NULL, "slimbus2_slimbus_clk", "l4-per-clkctrl:0118:10"), - DT_CLK(NULL, "stm_clk_div_ck", "emu-sys-clkctrl:0000:27"), - DT_CLK(NULL, "timer5_sync_mux", "abe-clkctrl:0048:24"), - DT_CLK(NULL, "timer6_sync_mux", "abe-clkctrl:0050:24"), - DT_CLK(NULL, "timer7_sync_mux", "abe-clkctrl:0058:24"), - DT_CLK(NULL, "timer8_sync_mux", "abe-clkctrl:0060:24"), - DT_CLK(NULL, "trace_clk_div_div_ck", "emu-sys-clkctrl:0000:24"), - DT_CLK(NULL, "usb_host_hs_func48mclk", "l3-init-clkctrl:0038:15"), - DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3-init-clkctrl:0038:13"), - DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3-init-clkctrl:0038:14"), - DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3-init-clkctrl:0038:11"), - DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3-init-clkctrl:0038:12"), - DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3-init-clkctrl:0038:8"), - DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3-init-clkctrl:0038:9"), - DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init-clkctrl:0038:10"), - DT_CLK(NULL, "usb_otg_hs_xclk", "l3-init-clkctrl:0040:8"), - DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3-init-clkctrl:0048:8"), - DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3-init-clkctrl:0048:9"), - DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3-init-clkctrl:0048:10"), - DT_CLK(NULL, "utmi_p1_gfclk", "l3-init-clkctrl:0038:24"), - DT_CLK(NULL, "utmi_p2_gfclk", "l3-init-clkctrl:0038:25"), + DT_CLK(NULL, "aess_fclk", "abe_cm:0008:24"), + DT_CLK(NULL, "cm2_dm10_mux", "l4_per_cm:0008:24"), + DT_CLK(NULL, "cm2_dm11_mux", "l4_per_cm:0010:24"), + DT_CLK(NULL, "cm2_dm2_mux", "l4_per_cm:0018:24"), + DT_CLK(NULL, "cm2_dm3_mux", "l4_per_cm:0020:24"), + DT_CLK(NULL, "cm2_dm4_mux", "l4_per_cm:0028:24"), + DT_CLK(NULL, "cm2_dm9_mux", "l4_per_cm:0030:24"), + DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"), + DT_CLK(NULL, "dmt1_clk_mux", "l4_wkup_cm:0020:24"), + DT_CLK(NULL, "dss_48mhz_clk", "l3_dss_cm:0000:9"), + DT_CLK(NULL, "dss_dss_clk", "l3_dss_cm:0000:8"), + DT_CLK(NULL, "dss_sys_clk", "l3_dss_cm:0000:10"), + DT_CLK(NULL, "dss_tv_clk", "l3_dss_cm:0000:11"), + DT_CLK(NULL, "fdif_fck", "iss_cm:0008:24"), + DT_CLK(NULL, "func_dmic_abe_gfclk", "abe_cm:0018:24"), + DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe_cm:0020:24"), + DT_CLK(NULL, "func_mcbsp1_gfclk", "abe_cm:0028:24"), + DT_CLK(NULL, "func_mcbsp2_gfclk", "abe_cm:0030:24"), + DT_CLK(NULL, "func_mcbsp3_gfclk", "abe_cm:0038:24"), + DT_CLK(NULL, "gpio1_dbclk", "l4_wkup_cm:0018:8"), + DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0040:8"), + DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0048:8"), + DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0050:8"), + DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0058:8"), + DT_CLK(NULL, "gpio6_dbclk", "l4_per_cm:0060:8"), + DT_CLK(NULL, "hsi_fck", "l3_init_cm:0018:24"), + DT_CLK(NULL, "hsmmc1_fclk", "l3_init_cm:0008:24"), + DT_CLK(NULL, "hsmmc2_fclk", "l3_init_cm:0010:24"), + DT_CLK(NULL, "iss_ctrlclk", "iss_cm:0000:8"), + DT_CLK(NULL, "mcasp_sync_mux_ck", "abe_cm:0020:26"), + DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"), + DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"), + DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"), + DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4_per_cm:00c0:26"), + DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3_init_cm:00c0:8"), + DT_CLK(NULL, "otg_60m_gfclk", "l3_init_cm:0040:24"), + DT_CLK(NULL, "per_mcbsp4_gfclk", "l4_per_cm:00c0:24"), + DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu_sys_cm:0000:20"), + DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu_sys_cm:0000:22"), + DT_CLK(NULL, "sgx_clk_mux", "l3_gfx_cm:0000:24"), + DT_CLK(NULL, "slimbus1_fclk_0", "abe_cm:0040:8"), + DT_CLK(NULL, "slimbus1_fclk_1", "abe_cm:0040:9"), + DT_CLK(NULL, "slimbus1_fclk_2", "abe_cm:0040:10"), + DT_CLK(NULL, "slimbus1_slimbus_clk", "abe_cm:0040:11"), + DT_CLK(NULL, "slimbus2_fclk_0", "l4_per_cm:0118:8"), + DT_CLK(NULL, "slimbus2_fclk_1", "l4_per_cm:0118:9"), + DT_CLK(NULL, "slimbus2_slimbus_clk", "l4_per_cm:0118:10"), + DT_CLK(NULL, "stm_clk_div_ck", "emu_sys_cm:0000:27"), + DT_CLK(NULL, "timer5_sync_mux", "abe_cm:0048:24"), + DT_CLK(NULL, "timer6_sync_mux", "abe_cm:0050:24"), + DT_CLK(NULL, "timer7_sync_mux", "abe_cm:0058:24"), + DT_CLK(NULL, "timer8_sync_mux", "abe_cm:0060:24"), + DT_CLK(NULL, "trace_clk_div_div_ck", "emu_sys_cm:0000:24"), + DT_CLK(NULL, "usb_host_hs_func48mclk", "l3_init_cm:0038:15"), + DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3_init_cm:0038:13"), + DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3_init_cm:0038:14"), + DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3_init_cm:0038:11"), + DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3_init_cm:0038:12"), + DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3_init_cm:0038:8"), + DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3_init_cm:0038:9"), + DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init_cm:0038:10"), + DT_CLK(NULL, "usb_otg_hs_xclk", "l3_init_cm:0040:8"), + DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3_init_cm:0048:8"), + DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3_init_cm:0048:9"), + DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3_init_cm:0048:10"), + DT_CLK(NULL, "utmi_p1_gfclk", "l3_init_cm:0038:24"), + DT_CLK(NULL, "utmi_p2_gfclk", "l3_init_cm:0038:25"), { .node_name = NULL }, }; diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c index 04a5408085acc..8694bc9f5fc7f 100644 --- a/drivers/clk/ti/clk-54xx.c +++ b/drivers/clk/ti/clk-54xx.c @@ -50,7 +50,7 @@ static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = { }; static const char * const omap5_dmic_gfclk_parents[] __initconst = { - "abe-clkctrl:0018:26", + "abe_cm:clk:0018:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -70,7 +70,7 @@ static const struct omap_clkctrl_bit_data omap5_dmic_bit_data[] __initconst = { }; static const char * const omap5_mcbsp1_gfclk_parents[] __initconst = { - "abe-clkctrl:0028:26", + "abe_cm:clk:0028:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -83,7 +83,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp1_bit_data[] __initconst = }; static const char * const omap5_mcbsp2_gfclk_parents[] __initconst = { - "abe-clkctrl:0030:26", + "abe_cm:clk:0030:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -96,7 +96,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp2_bit_data[] __initconst = }; static const char * const omap5_mcbsp3_gfclk_parents[] __initconst = { - "abe-clkctrl:0038:26", + "abe_cm:clk:0038:26", "pad_clks_ck", "slimbus_clk", NULL, @@ -136,16 +136,16 @@ static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst = static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = { { OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" }, - { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" }, + { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" }, { OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" }, - { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" }, - { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" }, - { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" }, - { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" }, - { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" }, - { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" }, - { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" }, - { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" }, + { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" }, + { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" }, + { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" }, + { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" }, + { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" }, + { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" }, + { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" }, + { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" }, { 0 }, }; @@ -266,12 +266,12 @@ static const struct omap_clkctrl_bit_data omap5_gpio8_bit_data[] __initconst = { }; static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst = { - { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" }, - { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" }, - { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" }, - { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" }, - { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" }, - { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0030:24" }, + { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0008:24" }, + { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0010:24" }, + { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0018:24" }, + { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0020:24" }, + { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" }, + { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" }, { OMAP5_GPIO2_CLKCTRL, omap5_gpio2_bit_data, CLKF_HW_SUP, "l4_root_clk_div" }, { OMAP5_GPIO3_CLKCTRL, omap5_gpio3_bit_data, CLKF_HW_SUP, "l4_root_clk_div" }, { OMAP5_GPIO4_CLKCTRL, omap5_gpio4_bit_data, CLKF_HW_SUP, "l4_root_clk_div" }, @@ -343,7 +343,7 @@ static const struct omap_clkctrl_bit_data omap5_dss_core_bit_data[] __initconst }; static const struct omap_clkctrl_reg_data omap5_dss_clkctrl_regs[] __initconst = { - { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" }, + { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" }, { 0 }, }; @@ -376,7 +376,7 @@ static const struct omap_clkctrl_bit_data omap5_gpu_core_bit_data[] __initconst }; static const struct omap_clkctrl_reg_data omap5_gpu_clkctrl_regs[] __initconst = { - { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24" }, + { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24" }, { 0 }, }; @@ -387,7 +387,7 @@ static const char * const omap5_mmc1_fclk_mux_parents[] __initconst = { }; static const char * const omap5_mmc1_fclk_parents[] __initconst = { - "l3init-clkctrl:0008:24", + "l3init_cm:clk:0008:24", NULL, }; @@ -403,7 +403,7 @@ static const struct omap_clkctrl_bit_data omap5_mmc1_bit_data[] __initconst = { }; static const char * const omap5_mmc2_fclk_parents[] __initconst = { - "l3init-clkctrl:0010:24", + "l3init_cm:clk:0010:24", NULL, }; @@ -428,12 +428,12 @@ static const char * const omap5_usb_host_hs_hsic480m_p3_clk_parents[] __initcons }; static const char * const omap5_usb_host_hs_utmi_p1_clk_parents[] __initconst = { - "l3init-clkctrl:0038:24", + "l3init_cm:clk:0038:24", NULL, }; static const char * const omap5_usb_host_hs_utmi_p2_clk_parents[] __initconst = { - "l3init-clkctrl:0038:25", + "l3init_cm:clk:0038:25", NULL, }; @@ -492,8 +492,8 @@ static const struct omap_clkctrl_bit_data omap5_usb_otg_ss_bit_data[] __initcons }; static const struct omap_clkctrl_reg_data omap5_l3init_clkctrl_regs[] __initconst = { - { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" }, - { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" }, + { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" }, + { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" }, { OMAP5_USB_HOST_HS_CLKCTRL, omap5_usb_host_hs_bit_data, CLKF_SW_SUP, "l3init_60m_fclk" }, { OMAP5_USB_TLL_HS_CLKCTRL, omap5_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_root_clk_div" }, { OMAP5_SATA_CLKCTRL, omap5_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" }, @@ -517,7 +517,7 @@ static const struct omap_clkctrl_reg_data omap5_wkupaon_clkctrl_regs[] __initcon { OMAP5_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, { OMAP5_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, { OMAP5_GPIO1_CLKCTRL, omap5_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" }, - { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" }, + { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" }, { OMAP5_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, { OMAP5_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, { 0 }, @@ -547,58 +547,58 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = { static struct ti_dt_clk omap54xx_clks[] = { DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"), DT_CLK(NULL, "sys_clkin_ck", "sys_clkin"), - DT_CLK(NULL, "dmic_gfclk", "abe-clkctrl:0018:24"), - DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"), - DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"), - DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"), - DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"), - DT_CLK(NULL, "dss_sys_clk", "dss-clkctrl:0000:10"), - DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"), - DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0040:8"), - DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0048:8"), - DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0050:8"), - DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0058:8"), - DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0060:8"), - DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00f0:8"), - DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"), - DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"), - DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"), - DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"), - DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"), - DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"), - DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"), - DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"), - DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"), - DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"), - DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"), - DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"), - DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"), - DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"), - DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"), - DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"), - DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0018:24"), - DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0020:24"), - DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0028:24"), - DT_CLK(NULL, "timer5_gfclk_mux", "abe-clkctrl:0048:24"), - DT_CLK(NULL, "timer6_gfclk_mux", "abe-clkctrl:0050:24"), - DT_CLK(NULL, "timer7_gfclk_mux", "abe-clkctrl:0058:24"), - DT_CLK(NULL, "timer8_gfclk_mux", "abe-clkctrl:0060:24"), - DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0030:24"), - DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init-clkctrl:0038:13"), - DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init-clkctrl:0038:14"), - DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init-clkctrl:0038:7"), - DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init-clkctrl:0038:11"), - DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init-clkctrl:0038:12"), - DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init-clkctrl:0038:6"), - DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init-clkctrl:0038:8"), - DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init-clkctrl:0038:9"), - DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init-clkctrl:0038:10"), - DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init-clkctrl:00d0:8"), - DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init-clkctrl:0048:8"), - DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init-clkctrl:0048:9"), - DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init-clkctrl:0048:10"), - DT_CLK(NULL, "utmi_p1_gfclk", "l3init-clkctrl:0038:24"), - DT_CLK(NULL, "utmi_p2_gfclk", "l3init-clkctrl:0038:25"), + DT_CLK(NULL, "dmic_gfclk", "abe_cm:0018:24"), + DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"), + DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"), + DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"), + DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"), + DT_CLK(NULL, "dss_sys_clk", "dss_cm:0000:10"), + DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"), + DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0040:8"), + DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0048:8"), + DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0050:8"), + DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0058:8"), + DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0060:8"), + DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:00f0:8"), + DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:00f8:8"), + DT_CLK(NULL, "mcbsp1_gfclk", "abe_cm:0028:24"), + DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"), + DT_CLK(NULL, "mcbsp2_gfclk", "abe_cm:0030:24"), + DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"), + DT_CLK(NULL, "mcbsp3_gfclk", "abe_cm:0038:24"), + DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"), + DT_CLK(NULL, "mmc1_32khz_clk", "l3init_cm:0008:8"), + DT_CLK(NULL, "mmc1_fclk", "l3init_cm:0008:25"), + DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"), + DT_CLK(NULL, "mmc2_fclk", "l3init_cm:0010:25"), + DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"), + DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"), + DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0008:24"), + DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0010:24"), + DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"), + DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0018:24"), + DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0020:24"), + DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0028:24"), + DT_CLK(NULL, "timer5_gfclk_mux", "abe_cm:0048:24"), + DT_CLK(NULL, "timer6_gfclk_mux", "abe_cm:0050:24"), + DT_CLK(NULL, "timer7_gfclk_mux", "abe_cm:0058:24"), + DT_CLK(NULL, "timer8_gfclk_mux", "abe_cm:0060:24"), + DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0030:24"), + DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init_cm:0038:13"), + DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init_cm:0038:14"), + DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init_cm:0038:7"), + DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init_cm:0038:11"), + DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init_cm:0038:12"), + DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init_cm:0038:6"), + DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init_cm:0038:8"), + DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init_cm:0038:9"), + DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init_cm:0038:10"), + DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init_cm:00d0:8"), + DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init_cm:0048:8"), + DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init_cm:0048:9"), + DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init_cm:0048:10"), + DT_CLK(NULL, "utmi_p1_gfclk", "l3init_cm:0038:24"), + DT_CLK(NULL, "utmi_p2_gfclk", "l3init_cm:0038:25"), { .node_name = NULL }, }; diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 8d4c08b034bdd..e2e59d78c173f 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -251,14 +251,16 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) if (rc) { pr_err("%s: failed to lookup atl clock %d\n", __func__, i); - return -EINVAL; + ret = -EINVAL; + goto pm_put; } clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { pr_err("%s: failed to get atl clock %d from provider\n", __func__, i); - return PTR_ERR(clk); + ret = PTR_ERR(clk); + goto pm_put; } cdesc = to_atl_desc(__clk_get_hw(clk)); @@ -291,8 +293,9 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) if (cdesc->enabled) atl_clk_enable(__clk_get_hw(clk)); } - pm_runtime_put_sync(cinfo->dev); +pm_put: + pm_runtime_put_sync(cinfo->dev); return ret; } diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 08a85c559f795..864c484bde1b4 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -511,6 +511,10 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node) char *c; u16 soc_mask = 0; + if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) && + of_node_name_eq(node, "clk")) + ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT; + addrp = of_get_address(node, 0, NULL, NULL); addr = (u32)of_translate_address(node, addrp); diff --git a/drivers/clk/x86/Kconfig b/drivers/clk/x86/Kconfig index 69642e15fcc1f..ced99e082e3dd 100644 --- a/drivers/clk/x86/Kconfig +++ b/drivers/clk/x86/Kconfig @@ -1,8 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only config CLK_LGM_CGU depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST) + select MFD_SYSCON select OF_EARLY_FLATTREE bool "Clock driver for Lightning Mountain(LGM) platform" help - Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM) - network processor SoC. + Clock Generation Unit(CGU) driver for MaxLinear's x86 based + Lightning Mountain(LGM) network processor SoC. diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c index 3179557b5f784..409dbf55f4cae 100644 --- a/drivers/clk/x86/clk-cgu-pll.c +++ b/drivers/clk/x86/clk-cgu-pll.c @@ -1,8 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* + * Copyright (C) 2020-2022 MaxLinear, Inc. * Copyright (C) 2020 Intel Corporation. - * Zhu YiXin - * Rahul Tanwar + * Zhu Yixin + * Rahul Tanwar */ #include @@ -40,13 +41,10 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate) { struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); unsigned int div, mult, frac; - unsigned long flags; - spin_lock_irqsave(&pll->lock, flags); mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12); div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6); frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24); - spin_unlock_irqrestore(&pll->lock, flags); if (pll->type == TYPE_LJPLL) div *= 4; @@ -57,12 +55,9 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate) static int lgm_pll_is_enabled(struct clk_hw *hw) { struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); - unsigned long flags; unsigned int ret; - spin_lock_irqsave(&pll->lock, flags); ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1); - spin_unlock_irqrestore(&pll->lock, flags); return ret; } @@ -70,15 +65,13 @@ static int lgm_pll_is_enabled(struct clk_hw *hw) static int lgm_pll_enable(struct clk_hw *hw) { struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); - unsigned long flags; u32 val; int ret; - spin_lock_irqsave(&pll->lock, flags); lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1); - ret = readl_poll_timeout_atomic(pll->membase + pll->reg, - val, (val & 0x1), 1, 100); - spin_unlock_irqrestore(&pll->lock, flags); + ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg, + val, (val & 0x1), 1, 100); + return ret; } @@ -86,11 +79,8 @@ static int lgm_pll_enable(struct clk_hw *hw) static void lgm_pll_disable(struct clk_hw *hw) { struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); - unsigned long flags; - spin_lock_irqsave(&pll->lock, flags); lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0); - spin_unlock_irqrestore(&pll->lock, flags); } static const struct clk_ops lgm_pll_ops = { @@ -121,7 +111,6 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx, return ERR_PTR(-ENOMEM); pll->membase = ctx->membase; - pll->lock = ctx->lock; pll->reg = list->reg; pll->flags = list->flags; pll->type = list->type; diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c index 33de600e0c38e..89b53f280aee0 100644 --- a/drivers/clk/x86/clk-cgu.c +++ b/drivers/clk/x86/clk-cgu.c @@ -1,8 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* + * Copyright (C) 2020-2022 MaxLinear, Inc. * Copyright (C) 2020 Intel Corporation. - * Zhu YiXin - * Rahul Tanwar + * Zhu Yixin + * Rahul Tanwar */ #include #include @@ -24,14 +25,10 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags; - if (list->div_flags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&ctx->lock, flags); + if (list->div_flags & CLOCK_FLAG_VAL_INIT) lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, list->div_width, list->div_val); - spin_unlock_irqrestore(&ctx->lock, flags); - } return clk_hw_register_fixed_rate(NULL, list->name, list->parent_data[0].name, @@ -41,33 +38,27 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, static u8 lgm_clk_mux_get_parent(struct clk_hw *hw) { struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); - unsigned long flags; u32 val; - spin_lock_irqsave(&mux->lock, flags); if (mux->flags & MUX_CLK_SW) val = mux->reg; else val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift, mux->width); - spin_unlock_irqrestore(&mux->lock, flags); return clk_mux_val_to_index(hw, NULL, mux->flags, val); } static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index) { struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); - unsigned long flags; u32 val; val = clk_mux_index_to_val(NULL, mux->flags, index); - spin_lock_irqsave(&mux->lock, flags); if (mux->flags & MUX_CLK_SW) mux->reg = val; else lgm_set_clk_val(mux->membase, mux->reg, mux->shift, mux->width, val); - spin_unlock_irqrestore(&mux->lock, flags); return 0; } @@ -90,7 +81,7 @@ static struct clk_hw * lgm_clk_register_mux(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags, cflags = list->mux_flags; + unsigned long cflags = list->mux_flags; struct device *dev = ctx->dev; u8 shift = list->mux_shift; u8 width = list->mux_width; @@ -111,7 +102,6 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx, init.num_parents = list->num_parents; mux->membase = ctx->membase; - mux->lock = ctx->lock; mux->reg = reg; mux->shift = shift; mux->width = width; @@ -123,11 +113,8 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx, if (ret) return ERR_PTR(ret); - if (cflags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&mux->lock, flags); + if (cflags & CLOCK_FLAG_VAL_INIT) lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val); - spin_unlock_irqrestore(&mux->lock, flags); - } return hw; } @@ -136,13 +123,10 @@ static unsigned long lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); - unsigned long flags; unsigned int val; - spin_lock_irqsave(÷r->lock, flags); val = lgm_get_clk_val(divider->membase, divider->reg, divider->shift, divider->width); - spin_unlock_irqrestore(÷r->lock, flags); return divider_recalc_rate(hw, parent_rate, val, divider->table, divider->flags, divider->width); @@ -163,7 +147,6 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate) { struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); - unsigned long flags; int value; value = divider_get_val(rate, prate, divider->table, @@ -171,10 +154,8 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (value < 0) return value; - spin_lock_irqsave(÷r->lock, flags); lgm_set_clk_val(divider->membase, divider->reg, divider->shift, divider->width, value); - spin_unlock_irqrestore(÷r->lock, flags); return 0; } @@ -182,12 +163,10 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable) { struct lgm_clk_divider *div = to_lgm_clk_divider(hw); - unsigned long flags; - spin_lock_irqsave(&div->lock, flags); - lgm_set_clk_val(div->membase, div->reg, div->shift_gate, - div->width_gate, enable); - spin_unlock_irqrestore(&div->lock, flags); + if (div->flags != DIV_CLK_NO_MASK) + lgm_set_clk_val(div->membase, div->reg, div->shift_gate, + div->width_gate, enable); return 0; } @@ -213,7 +192,7 @@ static struct clk_hw * lgm_clk_register_divider(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags, cflags = list->div_flags; + unsigned long cflags = list->div_flags; struct device *dev = ctx->dev; struct lgm_clk_divider *div; struct clk_init_data init = {}; @@ -236,7 +215,6 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx, init.num_parents = 1; div->membase = ctx->membase; - div->lock = ctx->lock; div->reg = reg; div->shift = shift; div->width = width; @@ -251,11 +229,8 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx, if (ret) return ERR_PTR(ret); - if (cflags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&div->lock, flags); + if (cflags & CLOCK_FLAG_VAL_INIT) lgm_set_clk_val(div->membase, reg, shift, width, list->div_val); - spin_unlock_irqrestore(&div->lock, flags); - } return hw; } @@ -264,7 +239,6 @@ static struct clk_hw * lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags; struct clk_hw *hw; hw = clk_hw_register_fixed_factor(ctx->dev, list->name, @@ -273,12 +247,9 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, if (IS_ERR(hw)) return ERR_CAST(hw); - if (list->div_flags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&ctx->lock, flags); + if (list->div_flags & CLOCK_FLAG_VAL_INIT) lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, list->div_width, list->div_val); - spin_unlock_irqrestore(&ctx->lock, flags); - } return hw; } @@ -286,13 +257,10 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, static int lgm_clk_gate_enable(struct clk_hw *hw) { struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); - unsigned long flags; unsigned int reg; - spin_lock_irqsave(&gate->lock, flags); reg = GATE_HW_REG_EN(gate->reg); lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); - spin_unlock_irqrestore(&gate->lock, flags); return 0; } @@ -300,25 +268,19 @@ static int lgm_clk_gate_enable(struct clk_hw *hw) static void lgm_clk_gate_disable(struct clk_hw *hw) { struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); - unsigned long flags; unsigned int reg; - spin_lock_irqsave(&gate->lock, flags); reg = GATE_HW_REG_DIS(gate->reg); lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); - spin_unlock_irqrestore(&gate->lock, flags); } static int lgm_clk_gate_is_enabled(struct clk_hw *hw) { struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); unsigned int reg, ret; - unsigned long flags; - spin_lock_irqsave(&gate->lock, flags); reg = GATE_HW_REG_STAT(gate->reg); ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1); - spin_unlock_irqrestore(&gate->lock, flags); return ret; } @@ -333,7 +295,7 @@ static struct clk_hw * lgm_clk_register_gate(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags, cflags = list->gate_flags; + unsigned long cflags = list->gate_flags; const char *pname = list->parent_data[0].name; struct device *dev = ctx->dev; u8 shift = list->gate_shift; @@ -354,7 +316,6 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx, init.num_parents = pname ? 1 : 0; gate->membase = ctx->membase; - gate->lock = ctx->lock; gate->reg = reg; gate->shift = shift; gate->flags = cflags; @@ -366,9 +327,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx, return ERR_PTR(ret); if (cflags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&gate->lock, flags); lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val); - spin_unlock_irqrestore(&gate->lock, flags); } return hw; @@ -396,8 +355,22 @@ int lgm_clk_register_branches(struct lgm_clk_provider *ctx, hw = lgm_clk_register_fixed_factor(ctx, list); break; case CLK_TYPE_GATE: - hw = lgm_clk_register_gate(ctx, list); + if (list->gate_flags & GATE_CLK_HW) { + hw = lgm_clk_register_gate(ctx, list); + } else { + /* + * GATE_CLKs can be controlled either from + * CGU clk driver i.e. this driver or directly + * from power management driver/daemon. It is + * dependent on the power policy/profile requirements + * of the end product. To override control of gate + * clks from this driver, provide NULL for this index + * of gate clk provider. + */ + hw = NULL; + } break; + default: dev_err(ctx->dev, "invalid clk type\n"); return -EINVAL; @@ -443,24 +416,18 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) static int lgm_clk_ddiv_enable(struct clk_hw *hw) { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); - unsigned long flags; - spin_lock_irqsave(&ddiv->lock, flags); lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, ddiv->width_gate, 1); - spin_unlock_irqrestore(&ddiv->lock, flags); return 0; } static void lgm_clk_ddiv_disable(struct clk_hw *hw) { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); - unsigned long flags; - spin_lock_irqsave(&ddiv->lock, flags); lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, ddiv->width_gate, 0); - spin_unlock_irqrestore(&ddiv->lock, flags); } static int @@ -497,32 +464,25 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); u32 div, ddiv1, ddiv2; - unsigned long flags; div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate); - spin_lock_irqsave(&ddiv->lock, flags); if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); div = div * 2; } - if (div <= 0) { - spin_unlock_irqrestore(&ddiv->lock, flags); + if (div <= 0) return -EINVAL; - } - if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) { - spin_unlock_irqrestore(&ddiv->lock, flags); + if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) return -EINVAL; - } lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0, ddiv1 - 1); lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1, ddiv2 - 1); - spin_unlock_irqrestore(&ddiv->lock, flags); return 0; } @@ -533,18 +493,15 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); u32 div, ddiv1, ddiv2; - unsigned long flags; u64 rate64; div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); /* if predivide bit is enabled, modify div by factor of 2.5 */ - spin_lock_irqsave(&ddiv->lock, flags); if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { div = div * 2; div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); } - spin_unlock_irqrestore(&ddiv->lock, flags); if (div <= 0) return *prate; @@ -558,12 +515,10 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, do_div(rate64, ddiv2); /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ - spin_lock_irqsave(&ddiv->lock, flags); if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { rate64 = rate64 * 2; rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); } - spin_unlock_irqrestore(&ddiv->lock, flags); return rate64; } @@ -600,7 +555,6 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, init.num_parents = 1; ddiv->membase = ctx->membase; - ddiv->lock = ctx->lock; ddiv->reg = list->reg; ddiv->shift0 = list->shift0; ddiv->width0 = list->width0; diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h index 4e22bfb223128..bcaf8aec94e5d 100644 --- a/drivers/clk/x86/clk-cgu.h +++ b/drivers/clk/x86/clk-cgu.h @@ -1,28 +1,28 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright(c) 2020 Intel Corporation. - * Zhu YiXin - * Rahul Tanwar + * Copyright (C) 2020-2022 MaxLinear, Inc. + * Copyright (C) 2020 Intel Corporation. + * Zhu Yixin + * Rahul Tanwar */ #ifndef __CLK_CGU_H #define __CLK_CGU_H -#include +#include struct lgm_clk_mux { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; u8 shift; u8 width; unsigned long flags; - spinlock_t lock; }; struct lgm_clk_divider { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; u8 shift; u8 width; @@ -30,12 +30,11 @@ struct lgm_clk_divider { u8 width_gate; unsigned long flags; const struct clk_div_table *table; - spinlock_t lock; }; struct lgm_clk_ddiv { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; u8 shift0; u8 width0; @@ -48,16 +47,14 @@ struct lgm_clk_ddiv { unsigned int mult; unsigned int div; unsigned long flags; - spinlock_t lock; }; struct lgm_clk_gate { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; u8 shift; unsigned long flags; - spinlock_t lock; }; enum lgm_clk_type { @@ -77,11 +74,10 @@ enum lgm_clk_type { * @clk_data: array of hw clocks and clk number. */ struct lgm_clk_provider { - void __iomem *membase; + struct regmap *membase; struct device_node *np; struct device *dev; struct clk_hw_onecell_data clk_data; - spinlock_t lock; }; enum pll_type { @@ -92,11 +88,10 @@ enum pll_type { struct lgm_clk_pll { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; unsigned long flags; enum pll_type type; - spinlock_t lock; }; /** @@ -202,6 +197,8 @@ struct lgm_clk_branch { /* clock flags definition */ #define CLOCK_FLAG_VAL_INIT BIT(16) #define MUX_CLK_SW BIT(17) +#define GATE_CLK_HW BIT(18) +#define DIV_CLK_NO_MASK BIT(19) #define LGM_MUX(_id, _name, _pdata, _f, _reg, \ _shift, _width, _cf, _v) \ @@ -300,29 +297,32 @@ struct lgm_clk_branch { .div = _d, \ } -static inline void lgm_set_clk_val(void __iomem *membase, u32 reg, +static inline void lgm_set_clk_val(struct regmap *membase, u32 reg, u8 shift, u8 width, u32 set_val) { u32 mask = (GENMASK(width - 1, 0) << shift); - u32 regval; - regval = readl(membase + reg); - regval = (regval & ~mask) | ((set_val << shift) & mask); - writel(regval, membase + reg); + regmap_update_bits(membase, reg, mask, set_val << shift); } -static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg, +static inline u32 lgm_get_clk_val(struct regmap *membase, u32 reg, u8 shift, u8 width) { u32 mask = (GENMASK(width - 1, 0) << shift); u32 val; - val = readl(membase + reg); + if (regmap_read(membase, reg, &val)) { + WARN_ONCE(1, "Failed to read clk reg: 0x%x\n", reg); + return 0; + } + val = (val & mask) >> shift; return val; } + + int lgm_clk_register_branches(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list, unsigned int nr_clk); diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c index 020f4e83a5ccb..f69455dd1c980 100644 --- a/drivers/clk/x86/clk-lgm.c +++ b/drivers/clk/x86/clk-lgm.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* + * Copyright (C) 2020-2022 MaxLinear, Inc. * Copyright (C) 2020 Intel Corporation. - * Zhu YiXin - * Rahul Tanwar + * Zhu Yixin + * Rahul Tanwar */ #include +#include #include #include #include @@ -253,8 +255,8 @@ static const struct lgm_clk_branch lgm_branch_clks[] = { LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1, 8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2), LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0), - LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR, - 25, 3, 0, 0, 0, 0, dcl_div), + LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", CLK_SET_RATE_PARENT, CGU_PCMCR, + 25, 3, 0, 0, DIV_CLK_NO_MASK, 0, dcl_div), LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR, 0, 1, CLK_MUX_ROUND_CLOSEST, 0), LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr", @@ -433,13 +435,15 @@ static int lgm_cgu_probe(struct platform_device *pdev) ctx->clk_data.num = CLK_NR_CLKS; - ctx->membase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(ctx->membase)) + ctx->membase = syscon_node_to_regmap(np); + if (IS_ERR(ctx->membase)) { + dev_err(dev, "Failed to get clk CGU iomem\n"); return PTR_ERR(ctx->membase); + } + ctx->np = np; ctx->dev = dev; - spin_lock_init(&ctx->lock); ret = lgm_clk_register_plls(ctx, lgm_pll_clks, ARRAY_SIZE(lgm_pll_clks)); diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index db8d0d7161ce2..9c82ae240c407 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -687,6 +687,13 @@ static void zynqmp_get_clock_info(void) FIELD_PREP(CLK_ATTR_NODE_INDEX, i); zynqmp_pm_clock_get_name(clock[i].clk_id, &name); + + /* + * Terminate with NULL character in case name provided by firmware + * is longer and truncated due to size limit. + */ + name.name[sizeof(name.name) - 1] = '\0'; + if (!strcmp(name.name, RESERVED_CLK_NAME)) continue; strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN); diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c index abe6afbf3407b..2ae7f9129b07a 100644 --- a/drivers/clk/zynqmp/pll.c +++ b/drivers/clk/zynqmp/pll.c @@ -99,26 +99,25 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { u32 fbdiv; - long rate_div, f; + u32 mult, div; - /* Enable the fractional mode if needed */ - rate_div = (rate * FRAC_DIV) / *prate; - f = rate_div % FRAC_DIV; - if (f) { - if (rate > PS_PLL_VCO_MAX) { - fbdiv = rate / PS_PLL_VCO_MAX; - rate = rate / (fbdiv + 1); - } - if (rate < PS_PLL_VCO_MIN) { - fbdiv = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate); - rate = rate * fbdiv; - } - return rate; + /* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */ + if (rate > PS_PLL_VCO_MAX) { + div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX); + rate = rate / div; + } + if (rate < PS_PLL_VCO_MIN) { + mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate); + rate = rate * mult; } fbdiv = DIV_ROUND_CLOSEST(rate, *prate); - fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX); - return *prate * fbdiv; + if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) { + fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX); + rate = *prate * fbdiv; + } + + return rate; } /** diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 2acfcc966bb54..66e4872ab34f9 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -116,6 +117,7 @@ struct sh_cmt_device { void __iomem *mapbase; struct clk *clk; unsigned long rate; + unsigned int reg_delay; raw_spinlock_t lock; /* Protect the shared start/stop register */ @@ -235,6 +237,8 @@ static const struct sh_cmt_info sh_cmt_info[] = { #define CMCNT 1 /* channel register */ #define CMCOR 2 /* channel register */ +#define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */ + static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) { if (ch->iostart) @@ -245,10 +249,17 @@ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value) { - if (ch->iostart) - ch->cmt->info->write_control(ch->iostart, 0, value); - else - ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + u32 old_value = sh_cmt_read_cmstr(ch); + + if (value != old_value) { + if (ch->iostart) { + ch->cmt->info->write_control(ch->iostart, 0, value); + udelay(ch->cmt->reg_delay); + } else { + ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + udelay(ch->cmt->reg_delay); + } + } } static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) @@ -258,7 +269,12 @@ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value) { - ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); + u32 old_value = sh_cmt_read_cmcsr(ch); + + if (value != old_value) { + ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); + udelay(ch->cmt->reg_delay); + } } static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) @@ -266,14 +282,33 @@ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) return ch->cmt->info->read_count(ch->ioctrl, CMCNT); } -static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) +static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) { + /* Tests showed that we need to wait 3 clocks here */ + unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2); + u32 reg; + + if (ch->cmt->info->model > SH_CMT_16BIT) { + int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg, + !(reg & SH_CMT32_CMCSR_WRFLG), + 1, cmcnt_delay, false, ch); + if (ret < 0) + return ret; + } + ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); + udelay(cmcnt_delay); + return 0; } static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value) { - ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); + u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR); + + if (value != old_value) { + ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); + udelay(ch->cmt->reg_delay); + } } static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped) @@ -317,7 +352,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) static int sh_cmt_enable(struct sh_cmt_channel *ch) { - int k, ret; + int ret; pm_runtime_get_sync(&ch->cmt->pdev->dev); dev_pm_syscore_device(&ch->cmt->pdev->dev, true); @@ -345,26 +380,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch) } sh_cmt_write_cmcor(ch, 0xffffffff); - sh_cmt_write_cmcnt(ch, 0); + ret = sh_cmt_write_cmcnt(ch, 0); - /* - * According to the sh73a0 user's manual, as CMCNT can be operated - * only by the RCLK (Pseudo 32 kHz), there's one restriction on - * modifying CMCNT register; two RCLK cycles are necessary before - * this register is either read or any modification of the value - * it holds is reflected in the LSI's actual operation. - * - * While at it, we're supposed to clear out the CMCNT as of this - * moment, so make sure it's processed properly here. This will - * take RCLKx2 at maximum. - */ - for (k = 0; k < 100; k++) { - if (!sh_cmt_read_cmcnt(ch)) - break; - udelay(1); - } - - if (sh_cmt_read_cmcnt(ch)) { + if (ret || sh_cmt_read_cmcnt(ch)) { dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", ch->index); ret = -ETIMEDOUT; @@ -849,6 +867,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, unsigned int hwidx, bool clockevent, bool clocksource, struct sh_cmt_device *cmt) { + u32 value; int ret; /* Skip unused channels. */ @@ -878,6 +897,11 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, ch->iostart = cmt->mapbase + ch->hwidx * 0x100; ch->ioctrl = ch->iostart + 0x10; ch->timer_bit = 0; + + /* Enable the clock supply to the channel */ + value = ioread32(cmt->mapbase + CMCLKE); + value |= BIT(hwidx); + iowrite32(value, cmt->mapbase + CMCLKE); break; } @@ -968,8 +992,8 @@ MODULE_DEVICE_TABLE(of, sh_cmt_of_table); static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) { - unsigned int mask; - unsigned int i; + unsigned int mask, i; + unsigned long rate; int ret; cmt->pdev = pdev; @@ -1005,17 +1029,21 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) if (ret < 0) goto err_clk_unprepare; - if (cmt->info->width == 16) - cmt->rate = clk_get_rate(cmt->clk) / 512; - else - cmt->rate = clk_get_rate(cmt->clk) / 8; + rate = clk_get_rate(cmt->clk); + if (!rate) { + ret = -EINVAL; + goto err_clk_disable; + } - clk_disable(cmt->clk); + /* We shall wait 2 input clks after register writes */ + if (cmt->info->model >= SH_CMT_48BIT) + cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate); + cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8); /* Map the memory resource(s). */ ret = sh_cmt_map_memory(cmt); if (ret < 0) - goto err_clk_unprepare; + goto err_clk_disable; /* Allocate and setup the channels. */ cmt->num_channels = hweight8(cmt->hw_channels); @@ -1043,6 +1071,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) mask &= ~(1 << hwidx); } + clk_disable(cmt->clk); + platform_set_drvdata(pdev, cmt); return 0; @@ -1050,6 +1080,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) err_unmap: kfree(cmt->channels); iounmap(cmt->mapbase); +err_clk_disable: + clk_disable(cmt->clk); err_clk_unprepare: clk_unprepare(cmt->clk); err_clk_put: diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 0e7748df4be30..c51c5ed15aa75 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -32,7 +32,7 @@ static int riscv_clock_next_event(unsigned long delta, static unsigned int riscv_clock_event_irq; static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = { .name = "riscv_timer_clockevent", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP, + .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 100, .set_next_event = riscv_clock_next_event, }; diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 2737407ff0698..632523c1232f6 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -345,8 +345,10 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, return error; r = clk_get_rate(clock); - if (!r) + if (!r) { + clk_disable_unprepare(clock); return -ENODEV; + } if (is_ick) t->ick = clock; diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c index 710acc0a37044..85fbbac06d314 100644 --- a/drivers/counter/microchip-tcb-capture.c +++ b/drivers/counter/microchip-tcb-capture.c @@ -29,7 +29,6 @@ struct mchp_tc_data { int qdec_mode; int num_channels; int channel[2]; - bool trig_inverted; }; enum mchp_tc_count_function { @@ -163,7 +162,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter, regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr); - if (priv->trig_inverted) + if (signal->id == 1) sigstatus = (sr & ATMEL_TC_MTIOB); else sigstatus = (sr & ATMEL_TC_MTIOA); @@ -181,6 +180,17 @@ static int mchp_tc_count_action_get(struct counter_device *counter, struct mchp_tc_data *const priv = counter->priv; u32 cmr; + if (priv->qdec_mode) { + *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; + return 0; + } + + /* Only TIOA signal is evaluated in non-QDEC mode */ + if (synapse->signal->id != 0) { + *action = COUNTER_SYNAPSE_ACTION_NONE; + return 0; + } + regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr); switch (cmr & ATMEL_TC_ETRGEDG) { @@ -209,8 +219,8 @@ static int mchp_tc_count_action_set(struct counter_device *counter, struct mchp_tc_data *const priv = counter->priv; u32 edge = ATMEL_TC_ETRGEDG_NONE; - /* QDEC mode is rising edge only */ - if (priv->qdec_mode) + /* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */ + if (priv->qdec_mode || synapse->signal->id != 0) return -EINVAL; switch (action) { diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index 937439635d53f..b084e971a4934 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, /* ensure CMP & ARR registers are properly written */ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, - (val & STM32_LPTIM_CMPOK_ARROK), + (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, 100, 1000); if (ret) return ret; diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index d0b10baf039ab..151771129c7ba 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -124,6 +124,8 @@ static int __init amd_freq_sensitivity_init(void) if (!pcidev) { if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK)) return -ENODEV; + } else { + pci_dev_put(pcidev); } if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index 2de7fd18f66a1..f0be8a43ec496 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -443,7 +443,7 @@ static int __init armada37xx_cpufreq_driver_init(void) return -ENODEV; } - clk = clk_get(cpu_dev, 0); + clk = clk_get(cpu_dev, NULL); if (IS_ERR(clk)) { dev_err(cpu_dev, "Cannot get clock for CPU0\n"); return PTR_ERR(clk); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a3734014db477..aea285651fbaf 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -130,6 +130,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, + { .compatible = "nvidia,tegra234", }, { .compatible = "qcom,apq8096", }, { .compatible = "qcom,msm8996", }, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 30dafe8fc5054..58342390966b7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1211,6 +1211,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) goto err_free_rcpumask; + init_completion(&policy->kobj_unregister); ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, cpufreq_global_kobject, "policy%u", cpu); if (ret) { @@ -1249,7 +1250,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) init_rwsem(&policy->rwsem); spin_lock_init(&policy->transition_lock); init_waitqueue_head(&policy->transition_wait); - init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); policy->cpu = cpu; diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 6de07556665b1..a9880998f8bad 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -158,6 +158,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, } } else if (ret != -ENODEV) { dev_err(cpu_dev, "Invalid opp table in device tree\n"); + kfree(table); return ret; } else { policy->fast_switch_possible = true; diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 7fdd30e92e429..9b3d24721d7b2 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -215,6 +215,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, int speed = 0, pvs = 0, pvs_ver = 0; u8 *speedbin; size_t len; + int ret = 0; speedbin = nvmem_cell_read(speedbin_nvmem, &len); @@ -232,7 +233,8 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, break; default: dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n"); - return -ENODEV; + ret = -ENODEV; + goto len_error; } snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d", @@ -240,8 +242,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, drv->versions = (1 << speed); +len_error: kfree(speedbin); - return 0; + return ret; } static const struct qcom_cpufreq_match_data match_data_kryo = { @@ -264,7 +267,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) struct nvmem_cell *speedbin_nvmem; struct device_node *np; struct device *cpu_dev; - char *pvs_name = "speedXX-pvsXX-vXX"; + char pvs_name_buffer[] = "speedXX-pvsXX-vXX"; + char *pvs_name = pvs_name_buffer; unsigned cpu; const struct of_device_id *match; int ret; diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index 252f2a9686a62..448bc796b0b40 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -223,6 +223,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, * also be 0 on platforms with missing DT idle states or legacy DT * configuration predating the DT idle states bindings. */ - return i; + return state_idx - start_idx; } EXPORT_SYMBOL_GPL(dt_init_idle_driver); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index ff5e85eefbf69..0a3dd0793f30e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -749,7 +749,12 @@ config CRYPTO_DEV_IMGTEC_HASH config CRYPTO_DEV_ROCKCHIP tristate "Rockchip's Cryptographic Engine driver" depends on OF && ARCH_ROCKCHIP + depends on PM + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_DES select CRYPTO_AES + select CRYPTO_ENGINE select CRYPTO_LIB_DES select CRYPTO_MD5 select CRYPTO_SHA1 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index d0954993e2e37..49c7a8b464ddf 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -105,7 +105,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; int i = 0; - u32 a; + dma_addr_t a; int err; rctx->ivlen = ivsize; diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 5bbeff433c8c0..7a5cf1122af9e 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -240,7 +240,6 @@ static int meson_crypto_probe(struct platform_device *pdev) return err; } - mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); for (i = 0; i < MAXFLOW; i++) { mc->irqs[i] = platform_get_irq(pdev, i); if (mc->irqs[i] < 0) diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index dc0f142324a3c..8c0746a1d6d43 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -95,7 +95,7 @@ struct meson_dev { struct device *dev; struct meson_flow *chanlist; atomic_t flow; - int *irqs; + int irqs[MAXFLOW]; #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG struct dentry *dbgfs_dir; #endif diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 7819490274512..d9362199423f2 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -254,6 +254,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) const struct firmware *fw_entry; struct device *dev = &cpt->pdev->dev; struct ucode_header *ucode; + unsigned int code_length; struct microcode *mcode; int j, ret = 0; @@ -264,11 +265,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) ucode = (struct ucode_header *)fw_entry->data; mcode = &cpt->mcode[cpt->next_mc_idx]; memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); - mcode->code_size = ntohl(ucode->code_length) * 2; - if (!mcode->code_size) { + code_length = ntohl(ucode->code_length); + if (code_length == 0 || code_length >= INT_MAX / 2) { ret = -EINVAL; goto fw_release; } + mcode->code_size = code_length * 2; mcode->is_ae = is_ae; mcode->core_mask = 0ULL; diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c index b51b0449b4783..a131dbbbcb869 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -190,6 +190,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev) ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); if (!ndev->iov.pf2vf_wq) { kfree(ndev->iov.vfdev); + ndev->iov.vfdev = NULL; return -ENOMEM; } /* enable pf2vf mailbox interrupts */ diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index b3eea329f840f..b9299defb431d 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -642,6 +642,10 @@ static void ccp_dma_release(struct ccp_device *ccp) for (i = 0; i < ccp->cmd_q_count; i++) { chan = ccp->ccp_dma_chan + i; dma_chan = &chan->dma_chan; + + if (dma_chan->client_count) + dma_release_channel(dma_chan); + tasklet_kill(&chan->cleanup_tasklet); list_del_rcu(&dma_chan->device_node); } @@ -767,8 +771,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) if (!dmaengine) return; - dma_async_device_unregister(dma_dev); ccp_dma_release(ccp); + dma_async_device_unregister(dma_dev); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index 7083767602fcf..8f008f024f8f1 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void) cc_debugfs_dir = debugfs_create_dir("ccree", NULL); } -void __exit cc_debugfs_global_fini(void) +void cc_debugfs_global_fini(void) { debugfs_remove(cc_debugfs_dir); } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 6f519d3e896ca..7924693f58e09 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -614,9 +614,17 @@ static struct platform_driver ccree_driver = { static int __init ccree_init(void) { + int rc; + cc_debugfs_global_init(); - return platform_driver_register(&ccree_driver); + rc = platform_driver_register(&ccree_driver); + if (rc) { + cc_debugfs_global_fini(); + return rc; + } + + return 0; } module_init(ccree_init); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 0420f4ce71979..aaad3d76dc04e 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -289,14 +289,14 @@ struct hisi_qp { static inline int q_num_set(const char *val, const struct kernel_param *kp, unsigned int device) { - struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, - device, NULL); + struct pci_dev *pdev; u32 n, q_num; int ret; if (!val) return -EINVAL; + pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); if (!pdev) { q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); pr_info("No device found currently, suppose queue number is %d\n", @@ -306,6 +306,8 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, q_num = QM_QNUM_V1; else q_num = QM_QNUM_V2; + + pci_dev_put(pdev); } ret = kstrtou32(val, 10, &n); diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 08b4660b014c6..5db7cdea994ae 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -107,12 +107,12 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX) return -EINVAL; - return param_set_int(val, kp); + return param_set_ushort(val, kp); } static const struct kernel_param_ops sgl_sge_nr_ops = { .set = sgl_sge_nr_set, - .get = param_get_int, + .get = param_get_ushort, }; static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 91f555ccbb319..cecae50d0f58d 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -357,12 +357,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) static void img_hash_dma_task(unsigned long d) { struct img_hash_dev *hdev = (struct img_hash_dev *)d; - struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + struct img_hash_request_ctx *ctx; u8 *addr; size_t nbytes, bleft, wsend, len, tbc; struct scatterlist tsg; - if (!hdev->req || !ctx->sg) + if (!hdev->req) + return; + + ctx = ahash_request_ctx(hdev->req); + if (!ctx->sg) return; addr = sg_virt(ctx->sg); diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 56d5ccb5cc004..1c9af02eb63b6 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -381,7 +381,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, u32 x; x = ipad[i] ^ ipad[i + 4]; - cache[i] ^= swab(x); + cache[i] ^= swab32(x); } } cache_len = AES_BLOCK_SIZE; @@ -819,7 +819,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) u32 *result = (void *)areq->result; /* K3 */ - result[i] = swab(ctx->base.ipad.word[i + 4]); + result[i] = swab32(ctx->base.ipad.word[i + 4]); } areq->result[0] ^= 0x80; // 10- padding crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result); @@ -2104,7 +2104,7 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE, "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++) - ctx->base.ipad.word[i] = swab(key_tmp[i]); + ctx->base.ipad.word[i] = swab32(key_tmp[i]); crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & @@ -2187,7 +2187,7 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, return ret; for (i = 0; i < len / sizeof(u32); i++) - ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]); + ctx->base.ipad.word[i + 8] = swab32(aes.key_enc[i]); /* precompute the CMAC key material */ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index 40b482198ebc5..a765eefb18c2f 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev, struct tar_ucode_info_t *tar_info; struct otx_cpt_ucode_hdr *ucode_hdr; int ucode_type, ucode_size; + unsigned int code_length; /* * If size is less than microcode header size then don't report @@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev, if (get_ucode_type(ucode_hdr, &ucode_type)) return 0; - ucode_size = ntohl(ucode_hdr->code_length) * 2; + code_length = ntohl(ucode_hdr->code_length); + if (code_length >= INT_MAX / 2) { + dev_err(dev, "Invalid code_length %u\n", code_length); + return -EINVAL; + } + + ucode_size = code_length * 2; if (!ucode_size || (size < round_up(ucode_size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { dev_err(dev, "Ucode %s invalid size\n", filename); @@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, { struct otx_cpt_ucode_hdr *ucode_hdr; const struct firmware *fw; + unsigned int code_length; int ret; set_ucode_filename(ucode, ucode_filename); @@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data; memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); ucode->ver_num = ucode_hdr->ver_num; - ucode->size = ntohl(ucode_hdr->code_length) * 2; + code_length = ntohl(ucode_hdr->code_length); + if (code_length >= INT_MAX / 2) { + dev_err(dev, "Ucode invalid code_length %u\n", code_length); + ret = -EINVAL; + goto release_fw; + } + ucode->size = code_length * 2; if (!ucode->size || (fw->size < round_up(ucode->size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { dev_err(dev, "Ucode %s invalid size\n", ucode_filename); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 3642bf83d8094..8c4149c7d7bef 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1228,6 +1228,7 @@ struct n2_hash_tmpl { const u8 *hash_init; u8 hw_op_hashsz; u8 digest_size; + u8 statesize; u8 block_size; u8 auth_type; u8 hmac_type; @@ -1259,6 +1260,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_MD5, .hw_op_hashsz = MD5_DIGEST_SIZE, .digest_size = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), .block_size = MD5_HMAC_BLOCK_SIZE }, { .name = "sha1", .hash_zero = sha1_zero_message_hash, @@ -1267,6 +1269,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_SHA1, .hw_op_hashsz = SHA1_DIGEST_SIZE, .digest_size = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), .block_size = SHA1_BLOCK_SIZE }, { .name = "sha256", .hash_zero = sha256_zero_message_hash, @@ -1275,6 +1278,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_SHA256, .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .block_size = SHA256_BLOCK_SIZE }, { .name = "sha224", .hash_zero = sha224_zero_message_hash, @@ -1283,6 +1287,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_RESERVED, .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .block_size = SHA224_BLOCK_SIZE }, }; #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) @@ -1423,6 +1428,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) halg = &ahash->halg; halg->digestsize = tmpl->digest_size; + halg->statesize = tmpl->statesize; base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 48f78e34cf8dd..5a57617441b81 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2130,7 +2130,7 @@ static int omap_sham_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_irq_safe(dev); - err = pm_runtime_get_sync(dev); + err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "failed to get sync: %d\n", err); goto err_pm; diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 06abe1e2074e9..5b71768fc0c7f 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -34,19 +34,6 @@ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; -struct qat_alg_buf { - u32 len; - u32 resrvd; - u64 addr; -} __packed; - -struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; - struct qat_alg_buf bufers[]; -} __packed __aligned(64); - /* Common content descriptor */ struct qat_alg_cd { union { @@ -637,14 +624,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, dma_addr_t blpout = qat_req->buf.bloutp; size_t sz = qat_req->buf.sz; size_t sz_out = qat_req->buf.sz_out; + int bl_dma_dir; int i; + bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + for (i = 0; i < bl->num_bufs; i++) dma_unmap_single(dev, bl->bufers[i].addr, - bl->bufers[i].len, DMA_BIDIRECTIONAL); + bl->bufers[i].len, bl_dma_dir); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - kfree(bl); + + if (!qat_req->buf.sgl_src_valid) + kfree(bl); + if (blp != blpout) { /* If out of place operation dma unmap only data */ int bufless = blout->num_bufs - blout->num_mapped_bufs; @@ -652,10 +645,12 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, for (i = bufless; i < blout->num_bufs; i++) { dma_unmap_single(dev, blout->bufers[i].addr, blout->bufers[i].len, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); - kfree(blout); + + if (!qat_req->buf.sgl_dst_valid) + kfree(blout); } } @@ -669,26 +664,34 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, int n = sg_nents(sgl); struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; - dma_addr_t blp; - dma_addr_t bloutp; + dma_addr_t blp = DMA_MAPPING_ERROR; + dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; - size_t sz_out, sz = struct_size(bufl, bufers, n + 1); + size_t sz_out, sz = struct_size(bufl, bufers, n); + int node = dev_to_node(&GET_DEV(inst->accel_dev)); + int bufl_dma_dir; if (unlikely(!n)) return -EINVAL; - bufl = kzalloc_node(sz, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!bufl)) - return -ENOMEM; + qat_req->buf.sgl_src_valid = false; + qat_req->buf.sgl_dst_valid = false; + + if (n > QAT_MAX_BUFF_DESC) { + bufl = kzalloc_node(sz, GFP_ATOMIC, node); + if (unlikely(!bufl)) + return -ENOMEM; + } else { + bufl = &qat_req->buf.sgl_src.sgl_hdr; + memset(bufl, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_src_valid = true; + } + + bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; for_each_sg(sgl, sg, n, i) bufl->bufers[i].addr = DMA_MAPPING_ERROR; - blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, blp))) - goto err_in; - for_each_sg(sgl, sg, n, i) { int y = sg_nctr; @@ -697,13 +700,16 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, - DMA_BIDIRECTIONAL); + bufl_dma_dir); bufl->bufers[y].len = sg->length; if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) goto err_in; sg_nctr++; } bufl->num_bufs = sg_nctr; + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err_in; qat_req->buf.bl = bufl; qat_req->buf.blp = blp; qat_req->buf.sz = sz; @@ -712,20 +718,23 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct qat_alg_buf *bufers; n = sg_nents(sglout); - sz_out = struct_size(buflout, bufers, n + 1); + sz_out = struct_size(buflout, bufers, n); sg_nctr = 0; - buflout = kzalloc_node(sz_out, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!buflout)) - goto err_in; + + if (n > QAT_MAX_BUFF_DESC) { + buflout = kzalloc_node(sz_out, GFP_ATOMIC, node); + if (unlikely(!buflout)) + goto err_in; + } else { + buflout = &qat_req->buf.sgl_dst.sgl_hdr; + memset(buflout, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_dst_valid = true; + } bufers = buflout->bufers; for_each_sg(sglout, sg, n, i) bufers[i].addr = DMA_MAPPING_ERROR; - bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, bloutp))) - goto err_out; for_each_sg(sglout, sg, n, i) { int y = sg_nctr; @@ -734,7 +743,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, bufers[y].addr))) goto err_out; bufers[y].len = sg->length; @@ -742,6 +751,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, } buflout->num_bufs = sg_nctr; buflout->num_mapped_bufs = sg_nctr; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err_out; qat_req->buf.blout = buflout; qat_req->buf.bloutp = bloutp; qat_req->buf.sz_out = sz_out; @@ -753,27 +765,32 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, return 0; err_out: + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + n = sg_nents(sglout); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, buflout->bufers[i].addr)) dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, - DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, bloutp)) - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); - kfree(buflout); + DMA_FROM_DEVICE); + + if (!qat_req->buf.sgl_dst_valid) + kfree(buflout); err_in: + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + n = sg_nents(sgl); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->bufers[i].addr)) dma_unmap_single(dev, bufl->bufers[i].addr, bufl->bufers[i].len, - DMA_BIDIRECTIONAL); + bufl_dma_dir); - if (!dma_mapping_error(dev, blp)) - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - kfree(bufl); + if (!qat_req->buf.sgl_src_valid) + kfree(bufl); dev_err(dev, "Failed to map buf for dma\n"); return -ENOMEM; diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index 12682d1e9f5f3..5f9328201ba46 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -20,6 +20,26 @@ struct qat_crypto_instance { atomic_t refctr; }; +#define QAT_MAX_BUFF_DESC 4 + +struct qat_alg_buf { + u32 len; + u32 resrvd; + u64 addr; +} __packed; + +struct qat_alg_buf_list { + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + struct qat_alg_buf bufers[]; +} __packed; + +struct qat_alg_fixed_buf_list { + struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; +} __packed __aligned(64); + struct qat_crypto_request_buffs { struct qat_alg_buf_list *bl; dma_addr_t blp; @@ -27,6 +47,10 @@ struct qat_crypto_request_buffs { dma_addr_t bloutp; size_t sz; size_t sz_out; + bool sgl_src_valid; + bool sgl_dst_valid; + struct qat_alg_fixed_buf_list sgl_src; + struct qat_alg_fixed_buf_list sgl_dst; }; struct qat_crypto_request; diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 35d73061d1569..14a0aef18ab13 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -65,186 +65,24 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev) clk_disable_unprepare(dev->sclk); } -static int check_alignment(struct scatterlist *sg_src, - struct scatterlist *sg_dst, - int align_mask) -{ - int in, out, align; - - in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && - IS_ALIGNED((uint32_t)sg_src->length, align_mask); - if (!sg_dst) - return in; - out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && - IS_ALIGNED((uint32_t)sg_dst->length, align_mask); - align = in && out; - - return (align && (sg_src->length == sg_dst->length)); -} - -static int rk_load_data(struct rk_crypto_info *dev, - struct scatterlist *sg_src, - struct scatterlist *sg_dst) -{ - unsigned int count; - - dev->aligned = dev->aligned ? - check_alignment(sg_src, sg_dst, dev->align_size) : - dev->aligned; - if (dev->aligned) { - count = min(dev->left_bytes, sg_src->length); - dev->left_bytes -= count; - - if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { - dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", - __func__, __LINE__); - return -EINVAL; - } - dev->addr_in = sg_dma_address(sg_src); - - if (sg_dst) { - if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { - dev_err(dev->dev, - "[%s:%d] dma_map_sg(dst) error\n", - __func__, __LINE__); - dma_unmap_sg(dev->dev, sg_src, 1, - DMA_TO_DEVICE); - return -EINVAL; - } - dev->addr_out = sg_dma_address(sg_dst); - } - } else { - count = (dev->left_bytes > PAGE_SIZE) ? - PAGE_SIZE : dev->left_bytes; - - if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, - dev->addr_vir, count, - dev->total - dev->left_bytes)) { - dev_err(dev->dev, "[%s:%d] pcopy err\n", - __func__, __LINE__); - return -EINVAL; - } - dev->left_bytes -= count; - sg_init_one(&dev->sg_tmp, dev->addr_vir, count); - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { - dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", - __func__, __LINE__); - return -ENOMEM; - } - dev->addr_in = sg_dma_address(&dev->sg_tmp); - - if (sg_dst) { - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, - DMA_FROM_DEVICE)) { - dev_err(dev->dev, - "[%s:%d] dma_map_sg(sg_tmp) error\n", - __func__, __LINE__); - dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, - DMA_TO_DEVICE); - return -ENOMEM; - } - dev->addr_out = sg_dma_address(&dev->sg_tmp); - } - } - dev->count = count; - return 0; -} - -static void rk_unload_data(struct rk_crypto_info *dev) -{ - struct scatterlist *sg_in, *sg_out; - - sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; - dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); - - if (dev->sg_dst) { - sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; - dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); - } -} - static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) { struct rk_crypto_info *dev = platform_get_drvdata(dev_id); u32 interrupt_status; - spin_lock(&dev->lock); interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + dev->status = 1; if (interrupt_status & 0x0a) { dev_warn(dev->dev, "DMA Error\n"); - dev->err = -EFAULT; + dev->status = 0; } - tasklet_schedule(&dev->done_task); + complete(&dev->complete); - spin_unlock(&dev->lock); return IRQ_HANDLED; } -static int rk_crypto_enqueue(struct rk_crypto_info *dev, - struct crypto_async_request *async_req) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&dev->lock, flags); - ret = crypto_enqueue_request(&dev->queue, async_req); - if (dev->busy) { - spin_unlock_irqrestore(&dev->lock, flags); - return ret; - } - dev->busy = true; - spin_unlock_irqrestore(&dev->lock, flags); - tasklet_schedule(&dev->queue_task); - - return ret; -} - -static void rk_crypto_queue_task_cb(unsigned long data) -{ - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; - struct crypto_async_request *async_req, *backlog; - unsigned long flags; - int err = 0; - - dev->err = 0; - spin_lock_irqsave(&dev->lock, flags); - backlog = crypto_get_backlog(&dev->queue); - async_req = crypto_dequeue_request(&dev->queue); - - if (!async_req) { - dev->busy = false; - spin_unlock_irqrestore(&dev->lock, flags); - return; - } - spin_unlock_irqrestore(&dev->lock, flags); - - if (backlog) { - backlog->complete(backlog, -EINPROGRESS); - backlog = NULL; - } - - dev->async_req = async_req; - err = dev->start(dev); - if (err) - dev->complete(dev->async_req, err); -} - -static void rk_crypto_done_task_cb(unsigned long data) -{ - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; - - if (dev->err) { - dev->complete(dev->async_req, dev->err); - return; - } - - dev->err = dev->update(dev); - if (dev->err) - dev->complete(dev->async_req, dev->err); -} - static struct rk_crypto_tmp *rk_cipher_algs[] = { &rk_ecb_aes_alg, &rk_cbc_aes_alg, @@ -337,8 +175,6 @@ static int rk_crypto_probe(struct platform_device *pdev) if (err) goto err_crypto; - spin_lock_init(&crypto_info->lock); - crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(crypto_info->reg)) { err = PTR_ERR(crypto_info->reg); @@ -389,18 +225,11 @@ static int rk_crypto_probe(struct platform_device *pdev) crypto_info->dev = &pdev->dev; platform_set_drvdata(pdev, crypto_info); - tasklet_init(&crypto_info->queue_task, - rk_crypto_queue_task_cb, (unsigned long)crypto_info); - tasklet_init(&crypto_info->done_task, - rk_crypto_done_task_cb, (unsigned long)crypto_info); - crypto_init_queue(&crypto_info->queue, 50); + crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); + crypto_engine_start(crypto_info->engine); + init_completion(&crypto_info->complete); - crypto_info->enable_clk = rk_crypto_enable_clk; - crypto_info->disable_clk = rk_crypto_disable_clk; - crypto_info->load_data = rk_load_data; - crypto_info->unload_data = rk_unload_data; - crypto_info->enqueue = rk_crypto_enqueue; - crypto_info->busy = false; + rk_crypto_enable_clk(crypto_info); err = rk_crypto_register(crypto_info); if (err) { @@ -412,9 +241,9 @@ static int rk_crypto_probe(struct platform_device *pdev) return 0; err_register_alg: - tasklet_kill(&crypto_info->queue_task); - tasklet_kill(&crypto_info->done_task); + crypto_engine_exit(crypto_info->engine); err_crypto: + dev_err(dev, "Crypto Accelerator not successfully registered\n"); return err; } @@ -423,8 +252,8 @@ static int rk_crypto_remove(struct platform_device *pdev) struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); rk_crypto_unregister(); - tasklet_kill(&crypto_tmp->done_task); - tasklet_kill(&crypto_tmp->queue_task); + rk_crypto_disable_clk(crypto_tmp); + crypto_engine_exit(crypto_tmp->engine); return 0; } diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index 3db595570c9c2..6b1413c0359b1 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -5,9 +5,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -192,45 +194,15 @@ struct rk_crypto_info { struct reset_control *rst; void __iomem *reg; int irq; - struct crypto_queue queue; - struct tasklet_struct queue_task; - struct tasklet_struct done_task; - struct crypto_async_request *async_req; - int err; - /* device lock */ - spinlock_t lock; - - /* the public variable */ - struct scatterlist *sg_src; - struct scatterlist *sg_dst; - struct scatterlist sg_tmp; - struct scatterlist *first; - unsigned int left_bytes; - void *addr_vir; - int aligned; - int align_size; - size_t src_nents; - size_t dst_nents; - unsigned int total; - unsigned int count; - dma_addr_t addr_in; - dma_addr_t addr_out; - bool busy; - int (*start)(struct rk_crypto_info *dev); - int (*update)(struct rk_crypto_info *dev); - void (*complete)(struct crypto_async_request *base, int err); - int (*enable_clk)(struct rk_crypto_info *dev); - void (*disable_clk)(struct rk_crypto_info *dev); - int (*load_data)(struct rk_crypto_info *dev, - struct scatterlist *sg_src, - struct scatterlist *sg_dst); - void (*unload_data)(struct rk_crypto_info *dev); - int (*enqueue)(struct rk_crypto_info *dev, - struct crypto_async_request *async_req); + + struct crypto_engine *engine; + struct completion complete; + int status; }; /* the private variable of hash */ struct rk_ahash_ctx { + struct crypto_engine_ctx enginectx; struct rk_crypto_info *dev; /* for fallback */ struct crypto_ahash *fallback_tfm; @@ -240,14 +212,23 @@ struct rk_ahash_ctx { struct rk_ahash_rctx { struct ahash_request fallback_req; u32 mode; + int nrsg; }; /* the private variable of cipher */ struct rk_cipher_ctx { + struct crypto_engine_ctx enginectx; struct rk_crypto_info *dev; unsigned int keylen; - u32 mode; + u8 key[AES_MAX_KEY_SIZE]; u8 iv[AES_BLOCK_SIZE]; + struct crypto_skcipher *fallback_tfm; +}; + +struct rk_cipher_rctx { + u8 backup_iv[AES_BLOCK_SIZE]; + u32 mode; + struct skcipher_request fallback_req; // keep at the end }; enum alg_type { diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 81befe7febaa4..edd40e16a3f0a 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -9,6 +9,7 @@ * Some ideas are from marvell/cesa.c and s5p-sss.c driver. */ #include +#include #include "rk3288_crypto.h" /* @@ -16,6 +17,40 @@ * so we put the fixed hash out when met zero message. */ +static bool rk_ahash_need_fallback(struct ahash_request *req) +{ + struct scatterlist *sg; + + sg = req->src; + while (sg) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + return true; + } + if (sg->length % 4) { + return true; + } + sg = sg_next(sg); + } + return false; +} + +static int rk_ahash_digest_fb(struct ahash_request *areq) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; + + return crypto_ahash_digest(&rctx->fallback_req); +} + static int zero_message_process(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -38,17 +73,13 @@ static int zero_message_process(struct ahash_request *req) return 0; } -static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err) -{ - if (base->complete) - base->complete(base, err); -} - -static void rk_ahash_reg_init(struct rk_crypto_info *dev) +static void rk_ahash_reg_init(struct ahash_request *req) { - struct ahash_request *req = ahash_request_cast(dev->async_req); struct rk_ahash_rctx *rctx = ahash_request_ctx(req); - int reg_status = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + struct rk_crypto_info *dev = tctx->dev; + int reg_status; reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16); @@ -74,7 +105,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) RK_CRYPTO_BYTESWAP_BRFIFO | RK_CRYPTO_BYTESWAP_BTFIFO); - CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes); } static int rk_ahash_init(struct ahash_request *req) @@ -167,48 +198,64 @@ static int rk_ahash_digest(struct ahash_request *req) struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct rk_crypto_info *dev = tctx->dev; + if (rk_ahash_need_fallback(req)) + return rk_ahash_digest_fb(req); + if (!req->nbytes) return zero_message_process(req); - else - return dev->enqueue(dev, &req->base); + + return crypto_transfer_hash_request_to_engine(dev->engine, req); } -static void crypto_ahash_dma_start(struct rk_crypto_info *dev) +static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) { - CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); - CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg)); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | (RK_CRYPTO_HASH_START << 16)); } -static int rk_ahash_set_data_start(struct rk_crypto_info *dev) +static int rk_hash_prepare(struct crypto_engine *engine, void *breq) +{ + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + int ret; + + ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + if (ret <= 0) + return -EINVAL; + + rctx->nrsg = ret; + + return 0; +} + +static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) { - int err; + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); - err = dev->load_data(dev, dev->sg_src, NULL); - if (!err) - crypto_ahash_dma_start(dev); - return err; + dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); + return 0; } -static int rk_ahash_start(struct rk_crypto_info *dev) +static int rk_hash_run(struct crypto_engine *engine, void *breq) { - struct ahash_request *req = ahash_request_cast(dev->async_req); - struct crypto_ahash *tfm; - struct rk_ahash_rctx *rctx; - - dev->total = req->nbytes; - dev->left_bytes = req->nbytes; - dev->aligned = 0; - dev->align_size = 4; - dev->sg_dst = NULL; - dev->sg_src = req->src; - dev->first = req->src; - dev->src_nents = sg_nents(req->src); - rctx = ahash_request_ctx(req); + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + struct scatterlist *sg = areq->src; + int err = 0; + int i; + u32 v; + rctx->mode = 0; - tfm = crypto_ahash_reqtfm(req); switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: rctx->mode = RK_CRYPTO_HASH_SHA1; @@ -220,32 +267,26 @@ static int rk_ahash_start(struct rk_crypto_info *dev) rctx->mode = RK_CRYPTO_HASH_MD5; break; default: - return -EINVAL; + err = -EINVAL; + goto theend; } - rk_ahash_reg_init(dev); - return rk_ahash_set_data_start(dev); -} - -static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) -{ - int err = 0; - struct ahash_request *req = ahash_request_cast(dev->async_req); - struct crypto_ahash *tfm; - - dev->unload_data(dev); - if (dev->left_bytes) { - if (dev->aligned) { - if (sg_is_last(dev->sg_src)) { - dev_warn(dev->dev, "[%s:%d], Lack of data\n", - __func__, __LINE__); - err = -ENOMEM; - goto out_rx; - } - dev->sg_src = sg_next(dev->sg_src); + rk_ahash_reg_init(areq); + + while (sg) { + reinit_completion(&tctx->dev->complete); + tctx->dev->status = 0; + crypto_ahash_dma_start(tctx->dev, sg); + wait_for_completion_interruptible_timeout(&tctx->dev->complete, + msecs_to_jiffies(2000)); + if (!tctx->dev->status) { + dev_err(tctx->dev->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; } - err = rk_ahash_set_data_start(dev); - } else { + sg = sg_next(sg); + } + /* * it will take some time to process date after last dma * transmission. @@ -256,18 +297,20 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) * efficiency, and make it response quickly when dma * complete. */ - while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) - udelay(10); - - tfm = crypto_ahash_reqtfm(req); - memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, - crypto_ahash_digestsize(tfm)); - dev->complete(dev->async_req, 0); - tasklet_schedule(&dev->queue_task); + while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS)) + udelay(10); + + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { + v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); + put_unaligned_le32(v, areq->result + i * 4); } -out_rx: - return err; +theend: + local_bh_disable(); + crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); + + return 0; } static int rk_cra_hash_init(struct crypto_tfm *tfm) @@ -281,14 +324,6 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) algt = container_of(alg, struct rk_crypto_tmp, alg.hash); tctx->dev = algt->dev; - tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); - if (!tctx->dev->addr_vir) { - dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); - return -ENOMEM; - } - tctx->dev->start = rk_ahash_start; - tctx->dev->update = rk_ahash_crypto_rx; - tctx->dev->complete = rk_ahash_crypto_complete; /* for fallback */ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, @@ -297,19 +332,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); return PTR_ERR(tctx->fallback_tfm); } + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct rk_ahash_rctx) + crypto_ahash_reqsize(tctx->fallback_tfm)); - return tctx->dev->enable_clk(tctx->dev); + tctx->enginectx.op.do_one_request = rk_hash_run; + tctx->enginectx.op.prepare_request = rk_hash_prepare; + tctx->enginectx.op.unprepare_request = rk_hash_unprepare; + + return 0; } static void rk_cra_hash_exit(struct crypto_tfm *tfm) { struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); - free_page((unsigned long)tctx->dev->addr_vir); - return tctx->dev->disable_clk(tctx->dev); + crypto_free_ahash(tctx->fallback_tfm); } struct rk_crypto_tmp rk_ahash_sha1 = { diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 5bbf0d2722e11..67a7e05d5ae31 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -9,23 +9,77 @@ * Some ideas are from marvell-cesa.c and s5p-sss.c driver. */ #include +#include #include "rk3288_crypto.h" #define RK_CRYPTO_DEC BIT(0) -static void rk_crypto_complete(struct crypto_async_request *base, int err) +static int rk_cipher_need_fallback(struct skcipher_request *req) { - if (base->complete) - base->complete(base, err); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + unsigned int bs = crypto_skcipher_blocksize(tfm); + struct scatterlist *sgs, *sgd; + unsigned int stodo, dtodo, len; + + if (!req->cryptlen) + return true; + + len = req->cryptlen; + sgs = req->src; + sgd = req->dst; + while (sgs && sgd) { + if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { + return true; + } + if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { + return true; + } + stodo = min(len, sgs->length); + if (stodo % bs) { + return true; + } + dtodo = min(len, sgd->length); + if (dtodo % bs) { + return true; + } + if (stodo != dtodo) { + return true; + } + len -= stodo; + sgs = sg_next(sgs); + sgd = sg_next(sgd); + } + return false; +} + +static int rk_cipher_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + int err; + + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + if (rctx->mode & RK_CRYPTO_DEC) + err = crypto_skcipher_decrypt(&rctx->fallback_req); + else + err = crypto_skcipher_encrypt(&rctx->fallback_req); + return err; } static int rk_handle_req(struct rk_crypto_info *dev, struct skcipher_request *req) { - if (!IS_ALIGNED(req->cryptlen, dev->align_size)) - return -EINVAL; - else - return dev->enqueue(dev, &req->base); + struct crypto_engine *engine = dev->engine; + + if (rk_cipher_need_fallback(req)) + return rk_cipher_fallback(req); + + return crypto_transfer_skcipher_request_to_engine(engine, req); } static int rk_aes_setkey(struct crypto_skcipher *cipher, @@ -38,8 +92,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher, keylen != AES_KEYSIZE_256) return -EINVAL; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_des_setkey(struct crypto_skcipher *cipher, @@ -53,8 +108,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher, return err; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_tdes_setkey(struct crypto_skcipher *cipher, @@ -68,17 +124,19 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher, return err; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_aes_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_ECB_MODE; + rctx->mode = RK_CRYPTO_AES_ECB_MODE; return rk_handle_req(dev, req); } @@ -86,9 +144,10 @@ static int rk_aes_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -96,9 +155,10 @@ static int rk_aes_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_CBC_MODE; + rctx->mode = RK_CRYPTO_AES_CBC_MODE; return rk_handle_req(dev, req); } @@ -106,9 +166,10 @@ static int rk_aes_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -116,9 +177,10 @@ static int rk_des_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = 0; + rctx->mode = 0; return rk_handle_req(dev, req); } @@ -126,9 +188,10 @@ static int rk_des_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -136,9 +199,10 @@ static int rk_des_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_handle_req(dev, req); } @@ -146,9 +210,10 @@ static int rk_des_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -156,9 +221,10 @@ static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT; + rctx->mode = RK_CRYPTO_TDES_SELECT; return rk_handle_req(dev, req); } @@ -166,9 +232,10 @@ static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -176,9 +243,10 @@ static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_handle_req(dev, req); } @@ -186,43 +254,42 @@ static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } -static void rk_ablk_hw_init(struct rk_crypto_info *dev) +static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) { - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); - u32 ivsize, block, conf_reg = 0; + u32 block, conf_reg = 0; block = crypto_tfm_alg_blocksize(tfm); - ivsize = crypto_skcipher_ivsize(cipher); if (block == DES_BLOCK_SIZE) { - ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | + rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | RK_CRYPTO_TDES_BYTESWAP_KEY | RK_CRYPTO_TDES_BYTESWAP_IV; - CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode); - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); conf_reg = RK_CRYPTO_DESSEL; } else { - ctx->mode |= RK_CRYPTO_AES_FIFO_MODE | + rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | RK_CRYPTO_AES_KEY_CHANGE | RK_CRYPTO_AES_BYTESWAP_KEY | RK_CRYPTO_AES_BYTESWAP_IV; if (ctx->keylen == AES_KEYSIZE_192) - ctx->mode |= RK_CRYPTO_AES_192BIT_key; + rctx->mode |= RK_CRYPTO_AES_192BIT_key; else if (ctx->keylen == AES_KEYSIZE_256) - ctx->mode |= RK_CRYPTO_AES_256BIT_key; - CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode); - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); + rctx->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); } conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | RK_CRYPTO_BYTESWAP_BRFIFO; @@ -231,146 +298,138 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev) RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); } -static void crypto_dma_start(struct rk_crypto_info *dev) +static void crypto_dma_start(struct rk_crypto_info *dev, + struct scatterlist *sgs, + struct scatterlist *sgd, unsigned int todo) { - CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); - CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); - CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs)); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo); + CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd)); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | _SBF(RK_CRYPTO_BLOCK_START, 16)); } -static int rk_set_data_start(struct rk_crypto_info *dev) +static int rk_cipher_run(struct crypto_engine *engine, void *async_req) { - int err; - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + - dev->sg_src->offset + dev->sg_src->length - ivsize; - - /* Store the iv that need to be updated in chain mode. - * And update the IV buffer to contain the next IV for decryption mode. - */ - if (ctx->mode & RK_CRYPTO_DEC) { - memcpy(ctx->iv, src_last_blk, ivsize); - sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv, - ivsize, dev->total - ivsize); - } - - err = dev->load_data(dev, dev->sg_src, dev->sg_dst); - if (!err) - crypto_dma_start(dev); - return err; -} - -static int rk_ablk_start(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - unsigned long flags; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sgs, *sgd; int err = 0; + int ivsize = crypto_skcipher_ivsize(tfm); + int offset; + u8 iv[AES_BLOCK_SIZE]; + u8 biv[AES_BLOCK_SIZE]; + u8 *ivtouse = areq->iv; + unsigned int len = areq->cryptlen; + unsigned int todo; + + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + if (rctx->mode & RK_CRYPTO_DEC) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(rctx->backup_iv, areq->src, + offset, ivsize, 0); + } + } - dev->left_bytes = req->cryptlen; - dev->total = req->cryptlen; - dev->sg_src = req->src; - dev->first = req->src; - dev->src_nents = sg_nents(req->src); - dev->sg_dst = req->dst; - dev->dst_nents = sg_nents(req->dst); - dev->aligned = 1; - - spin_lock_irqsave(&dev->lock, flags); - rk_ablk_hw_init(dev); - err = rk_set_data_start(dev); - spin_unlock_irqrestore(&dev->lock, flags); - return err; -} + sgs = areq->src; + sgd = areq->dst; -static void rk_iv_copyback(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - - /* Update the IV buffer to contain the next IV for encryption mode. */ - if (!(ctx->mode & RK_CRYPTO_DEC)) { - if (dev->aligned) { - memcpy(req->iv, sg_virt(dev->sg_dst) + - dev->sg_dst->length - ivsize, ivsize); + while (sgs && sgd && len) { + if (!sgs->length) { + sgs = sg_next(sgs); + sgd = sg_next(sgd); + continue; + } + if (rctx->mode & RK_CRYPTO_DEC) { + /* we backup last block of source to be used as IV at next step */ + offset = sgs->length - ivsize; + scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); + } + if (sgs == sgd) { + err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + } else { + err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_sgs; + } + } + err = 0; + rk_ablk_hw_init(ctx->dev, areq); + if (ivsize) { + if (ivsize == DES_BLOCK_SIZE) + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); + else + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); + } + reinit_completion(&ctx->dev->complete); + ctx->dev->status = 0; + + todo = min(sg_dma_len(sgs), len); + len -= todo; + crypto_dma_start(ctx->dev, sgs, sgd, todo / 4); + wait_for_completion_interruptible_timeout(&ctx->dev->complete, + msecs_to_jiffies(2000)); + if (!ctx->dev->status) { + dev_err(ctx->dev->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; + } + if (sgs == sgd) { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); + } + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(iv, biv, ivsize); + ivtouse = iv; } else { - memcpy(req->iv, dev->addr_vir + - dev->count - ivsize, ivsize); + offset = sgd->length - ivsize; + scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0); + ivtouse = iv; } + sgs = sg_next(sgs); + sgd = sg_next(sgd); } -} - -static void rk_update_iv(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - u8 *new_iv = NULL; - if (ctx->mode & RK_CRYPTO_DEC) { - new_iv = ctx->iv; - } else { - new_iv = page_address(sg_page(dev->sg_dst)) + - dev->sg_dst->offset + dev->sg_dst->length - ivsize; + if (areq->iv && ivsize > 0) { + offset = areq->cryptlen - ivsize; + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(areq->iv, rctx->backup_iv, ivsize); + memzero_explicit(rctx->backup_iv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); + } } - if (ivsize == DES_BLOCK_SIZE) - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); - else if (ivsize == AES_BLOCK_SIZE) - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); -} +theend: + local_bh_disable(); + crypto_finalize_skcipher_request(engine, areq, err); + local_bh_enable(); + return 0; -/* return: - * true some err was occurred - * fault no err, continue - */ -static int rk_ablk_rx(struct rk_crypto_info *dev) -{ - int err = 0; - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - - dev->unload_data(dev); - if (!dev->aligned) { - if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, - dev->addr_vir, dev->count, - dev->total - dev->left_bytes - - dev->count)) { - err = -EINVAL; - goto out_rx; - } - } - if (dev->left_bytes) { - rk_update_iv(dev); - if (dev->aligned) { - if (sg_is_last(dev->sg_src)) { - dev_err(dev->dev, "[%s:%d] Lack of data\n", - __func__, __LINE__); - err = -ENOMEM; - goto out_rx; - } - dev->sg_src = sg_next(dev->sg_src); - dev->sg_dst = sg_next(dev->sg_dst); - } - err = rk_set_data_start(dev); +theend_sgs: + if (sgs == sgd) { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); } else { - rk_iv_copyback(dev); - /* here show the calculation is over without any err */ - dev->complete(dev->async_req, 0); - tasklet_schedule(&dev->queue_task); + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); } -out_rx: +theend_iv: return err; } @@ -378,26 +437,34 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); struct rk_crypto_tmp *algt; algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); ctx->dev = algt->dev; - ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1; - ctx->dev->start = rk_ablk_start; - ctx->dev->update = rk_ablk_rx; - ctx->dev->complete = rk_crypto_complete; - ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); - return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { + dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(ctx->fallback_tfm)); + return PTR_ERR(ctx->fallback_tfm); + } + + tfm->reqsize = sizeof(struct rk_cipher_rctx) + + crypto_skcipher_reqsize(ctx->fallback_tfm); + + ctx->enginectx.op.do_one_request = rk_cipher_run; + + return 0; } static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - free_page((unsigned long)ctx->dev->addr_vir); - ctx->dev->disable_clk(ctx->dev); + memzero_explicit(ctx->key, ctx->keylen); + crypto_free_skcipher(ctx->fallback_tfm); } struct rk_crypto_tmp rk_ecb_aes_alg = { @@ -406,7 +473,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, @@ -428,7 +495,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, @@ -451,7 +518,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -473,7 +540,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -496,7 +563,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3-ede-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -518,7 +585,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3-ede-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index d60679c798224..2043dd0611217 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -25,10 +25,10 @@ #include #include #include -#include #include #include #include +#include #define SHA_BUFFER_LEN PAGE_SIZE #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE @@ -195,7 +195,7 @@ struct sahara_dev { void __iomem *regs_base; struct clk *clk_ipg; struct clk *clk_ahb; - struct mutex queue_mutex; + spinlock_t queue_spinlock; struct task_struct *kthread; struct completion dma_completion; @@ -641,9 +641,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) rctx->mode = mode; - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); err = crypto_enqueue_request(&dev->queue, &req->base); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); wake_up_process(dev->kthread); @@ -1042,10 +1042,10 @@ static int sahara_queue_manage(void *data) do { __set_current_state(TASK_INTERRUPTIBLE); - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -1091,9 +1091,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last) rctx->first = 1; } - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); ret = crypto_enqueue_request(&dev->queue, &req->base); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); wake_up_process(dev->kthread); @@ -1454,7 +1454,7 @@ static int sahara_probe(struct platform_device *pdev) crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); - mutex_init(&dev->queue_mutex); + spin_lock_init(&dev->queue_spinlock); dev_ptr = dev; diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c index cb6401c9e9a4f..acf31cc1dbcca 100644 --- a/drivers/dax/hmem/device.c +++ b/drivers/dax/hmem/device.c @@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r) .start = r->start, .end = r->end, .flags = IORESOURCE_MEM, + .desc = IORES_DESC_SOFT_RESERVED, }; struct platform_device *pdev; struct memregion_info info; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 829128c0cc68c..c6f460550f5e9 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -740,8 +740,7 @@ static void devfreq_dev_release(struct device *dev) * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. - * @data: private data for the governor. The devfreq framework does not - * touch this value. + * @data: devfreq driver pass to governors, governor should not change it. */ struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, @@ -953,8 +952,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res) * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. - * @data: private data for the governor. The devfreq framework does not - * touch this value. + * @data: devfreq driver pass to governors, governor should not change it. * * This function manages automatically the memory of devfreq device using device * resource management and simplify the free operation for memory of devfreq diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index 0fd6c48510711..8a9cf8220808e 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -21,7 +21,7 @@ struct userspace_data { static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) { - struct userspace_data *data = df->data; + struct userspace_data *data = df->governor_data; if (data->valid) *freq = data->user_frequency; @@ -40,7 +40,7 @@ static ssize_t store_freq(struct device *dev, struct device_attribute *attr, int err = 0; mutex_lock(&devfreq->lock); - data = devfreq->data; + data = devfreq->governor_data; sscanf(buf, "%lu", &wanted); data->user_frequency = wanted; @@ -60,7 +60,7 @@ static ssize_t show_freq(struct device *dev, struct device_attribute *attr, int err = 0; mutex_lock(&devfreq->lock); - data = devfreq->data; + data = devfreq->governor_data; if (data->valid) err = sprintf(buf, "%lu\n", data->user_frequency); @@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq) goto out; } data->valid = false; - devfreq->data = data; + devfreq->governor_data = data; err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); out: @@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq) if (devfreq->dev.kobj.sd) sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); - kfree(devfreq->data); - devfreq->data = NULL; + kfree(devfreq->governor_data); + devfreq->governor_data = NULL; } static int devfreq_userspace_handler(struct devfreq *devfreq, diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index 193b40e7aec03..1414a1c81834a 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c @@ -110,6 +110,12 @@ static char dio_no_name[] = { 0 }; #endif /* CONFIG_DIO_CONSTANTS */ +static void dio_dev_release(struct device *dev) +{ + struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev); + kfree(ddev); +} + int __init dio_find(int deviceid) { /* Called to find a DIO device before the full bus scan has run. @@ -224,6 +230,7 @@ static int __init dio_init(void) dev->bus = &dio_bus; dev->dev.parent = &dio_bus.dev; dev->dev.bus = &dio_bus_type; + dev->dev.release = dio_dev_release; dev->scode = scode; dev->resource.start = pa; dev->resource.end = pa + DIO_SIZE(scode, va); @@ -251,6 +258,7 @@ static int __init dio_init(void) if (error) { pr_err("DIO: Error registering device %s\n", dev->name); + put_device(&dev->dev); continue; } error = dio_create_sysfs_dev_files(dev); diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 798f86fcd50fa..dcbb023acc455 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -209,18 +209,6 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) return ERR_PTR(-EINVAL); } - /* check the name is unique */ - mutex_lock(&heap_list_lock); - list_for_each_entry(h, &heap_list, list) { - if (!strcmp(h->name, exp_info->name)) { - mutex_unlock(&heap_list_lock); - pr_err("dma_heap: Already registered heap named %s\n", - exp_info->name); - return ERR_PTR(-EINVAL); - } - } - mutex_unlock(&heap_list_lock); - heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); @@ -259,13 +247,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) err_ret = ERR_CAST(dev_ret); goto err2; } - /* Add heap to the list */ + mutex_lock(&heap_list_lock); + /* check the name is unique */ + list_for_each_entry(h, &heap_list, list) { + if (!strcmp(h->name, exp_info->name)) { + mutex_unlock(&heap_list_lock); + pr_err("dma_heap: Already registered heap named %s\n", + exp_info->name); + err_ret = ERR_PTR(-EINVAL); + goto err3; + } + } + + /* Add heap to the list */ list_add(&heap->list, &heap_list); mutex_unlock(&heap_list_lock); return heap; +err3: + device_destroy(dma_heap_class, heap->heap_devt); err2: cdev_del(&heap->heap_cdev); err1: diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index b624f3d8f0e64..e359c5c6c4df2 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -118,17 +118,20 @@ static int begin_cpu_udmabuf(struct dma_buf *buf, { struct udmabuf *ubuf = buf->priv; struct device *dev = ubuf->device->this_device; + int ret = 0; if (!ubuf->sg) { ubuf->sg = get_sg_table(dev, buf, direction); - if (IS_ERR(ubuf->sg)) - return PTR_ERR(ubuf->sg); + if (IS_ERR(ubuf->sg)) { + ret = PTR_ERR(ubuf->sg); + ubuf->sg = NULL; + } } else { dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); } - return 0; + return ret; } static int end_cpu_udmabuf(struct dma_buf *buf, diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 7eaee5b705b1b..6a4f9697b5742 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -237,6 +237,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) ATC_SPIP_BOUNDARY(first->boundary)); channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | ATC_DPIP_BOUNDARY(first->boundary)); + /* Don't allow CPU to reorder channel enable. */ + wmb(); dma_writel(atdma, CHER, atchan->mask); vdbg_dump_regs(atchan); @@ -297,7 +299,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr, trials; + u32 ctrla, dscr; + unsigned int i; /* * If the cookie doesn't match to the currently running transfer then @@ -367,7 +370,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) dscr = channel_readl(atchan, DSCR); rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); - for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { u32 new_dscr; rmb(); /* ensure DSCR is read after CTRLA */ @@ -393,7 +396,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); } - if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + if (unlikely(i == ATC_MAX_DSCR_TRIALS)) return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ @@ -443,18 +446,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) if (!atc_chan_is_cyclic(atchan)) dma_cookie_complete(txd); - /* If the transfer was a memset, free our temporary buffer */ - if (desc->memset_buffer) { - dma_pool_free(atdma->memset_pool, desc->memset_vaddr, - desc->memset_paddr); - desc->memset_buffer = false; - } - - /* move children to free_list */ - list_splice_init(&desc->tx_list, &atchan->free_list); - /* move myself to free_list */ - list_move(&desc->desc_node, &atchan->free_list); - spin_unlock_irqrestore(&atchan->lock, flags); dma_descriptor_unmap(txd); @@ -464,42 +455,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) dmaengine_desc_get_callback_invoke(txd, NULL); dma_run_dependencies(txd); -} - -/** - * atc_complete_all - finish work for all transactions - * @atchan: channel to complete transactions for - * - * Eventually submit queued descriptors if any - * - * Assume channel is idle while calling this function - * Called with atchan->lock held and bh disabled - */ -static void atc_complete_all(struct at_dma_chan *atchan) -{ - struct at_desc *desc, *_desc; - LIST_HEAD(list); - unsigned long flags; - - dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); spin_lock_irqsave(&atchan->lock, flags); - - /* - * Submit queued descriptors ASAP, i.e. before we go through - * the completed ones. - */ - if (!list_empty(&atchan->queue)) - atc_dostart(atchan, atc_first_queued(atchan)); - /* empty active_list now it is completed */ - list_splice_init(&atchan->active_list, &list); - /* empty queue list by moving descriptors (if any) to active_list */ - list_splice_init(&atchan->queue, &atchan->active_list); - + /* move children to free_list */ + list_splice_init(&desc->tx_list, &atchan->free_list); + /* add myself to free_list */ + list_add(&desc->desc_node, &atchan->free_list); spin_unlock_irqrestore(&atchan->lock, flags); - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + /* If the transfer was a memset, free our temporary buffer */ + if (desc->memset_buffer) { + dma_pool_free(atdma->memset_pool, desc->memset_vaddr, + desc->memset_paddr); + desc->memset_buffer = false; + } } /** @@ -508,26 +477,28 @@ static void atc_complete_all(struct at_dma_chan *atchan) */ static void atc_advance_work(struct at_dma_chan *atchan) { + struct at_desc *desc; unsigned long flags; - int ret; dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); spin_lock_irqsave(&atchan->lock, flags); - ret = atc_chan_is_enabled(atchan); - spin_unlock_irqrestore(&atchan->lock, flags); - if (ret) - return; - - if (list_empty(&atchan->active_list) || - list_is_singular(&atchan->active_list)) - return atc_complete_all(atchan); + if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) + return spin_unlock_irqrestore(&atchan->lock, flags); - atc_chain_complete(atchan, atc_first_active(atchan)); + desc = atc_first_active(atchan); + /* Remove the transfer node from the active list. */ + list_del_init(&desc->desc_node); + spin_unlock_irqrestore(&atchan->lock, flags); + atc_chain_complete(atchan, desc); /* advance work */ spin_lock_irqsave(&atchan->lock, flags); - atc_dostart(atchan, atc_first_active(atchan)); + if (!list_empty(&atchan->active_list)) { + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + } spin_unlock_irqrestore(&atchan->lock, flags); } @@ -539,6 +510,7 @@ static void atc_advance_work(struct at_dma_chan *atchan) static void atc_handle_error(struct at_dma_chan *atchan) { struct at_desc *bad_desc; + struct at_desc *desc; struct at_desc *child; unsigned long flags; @@ -551,13 +523,12 @@ static void atc_handle_error(struct at_dma_chan *atchan) bad_desc = atc_first_active(atchan); list_del_init(&bad_desc->desc_node); - /* As we are stopped, take advantage to push queued descriptors - * in active_list */ - list_splice_init(&atchan->queue, atchan->active_list.prev); - /* Try to restart the controller */ - if (!list_empty(&atchan->active_list)) - atc_dostart(atchan, atc_first_active(atchan)); + if (!list_empty(&atchan->active_list)) { + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + } /* * KERN_CRITICAL may seem harsh, but since this only happens @@ -672,19 +643,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, flags); cookie = dma_cookie_assign(tx); - if (list_empty(&atchan->active_list)) { - dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", - desc->txd.cookie); - atc_dostart(atchan, desc); - list_add_tail(&desc->desc_node, &atchan->active_list); - } else { - dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", - desc->txd.cookie); - list_add_tail(&desc->desc_node, &atchan->queue); - } - + list_add_tail(&desc->desc_node, &atchan->queue); spin_unlock_irqrestore(&atchan->lock, flags); + dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", + desc->txd.cookie); return cookie; } @@ -1418,11 +1381,8 @@ static int atc_terminate_all(struct dma_chan *chan) struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); int chan_id = atchan->chan_common.chan_id; - struct at_desc *desc, *_desc; unsigned long flags; - LIST_HEAD(list); - dev_vdbg(chan2dev(chan), "%s\n", __func__); /* @@ -1441,19 +1401,15 @@ static int atc_terminate_all(struct dma_chan *chan) cpu_relax(); /* active_list entries will end up before queued entries */ - list_splice_init(&atchan->queue, &list); - list_splice_init(&atchan->active_list, &list); - - spin_unlock_irqrestore(&atchan->lock, flags); - - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + list_splice_tail_init(&atchan->queue, &atchan->free_list); + list_splice_tail_init(&atchan->active_list, &atchan->free_list); clear_bit(ATC_IS_PAUSED, &atchan->status); /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); + spin_unlock_irqrestore(&atchan->lock, flags); + return 0; } @@ -1508,20 +1464,26 @@ atc_tx_status(struct dma_chan *chan, } /** - * atc_issue_pending - try to finish work + * atc_issue_pending - takes the first transaction descriptor in the pending + * queue and starts the transfer. * @chan: target DMA channel */ static void atc_issue_pending(struct dma_chan *chan) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_desc *desc; + unsigned long flags; dev_vdbg(chan2dev(chan), "issue_pending\n"); - /* Not needed for cyclic transfers */ - if (atc_chan_is_cyclic(atchan)) - return; + spin_lock_irqsave(&atchan->lock, flags); + if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) + return spin_unlock_irqrestore(&atchan->lock, flags); - atc_advance_work(atchan); + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + spin_unlock_irqrestore(&atchan->lock, flags); } /** @@ -1939,7 +1901,11 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", plat_dat->nr_channels); - dma_async_device_register(&atdma->dma_common); + err = dma_async_device_register(&atdma->dma_common); + if (err) { + dev_err(&pdev->dev, "Unable to register: %d.\n", err); + goto err_dma_async_device_register; + } /* * Do not return an error if the dmac node is not present in order to @@ -1959,6 +1925,7 @@ static int __init at_dma_probe(struct platform_device *pdev) err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); +err_dma_async_device_register: dma_pool_destroy(atdma->memset_pool); err_memset_pool_create: dma_pool_destroy(atdma->dma_desc_pool); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 80fc2fe8c77ea..8dc82c7b1dcf0 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -164,13 +164,13 @@ /* LLI == Linked List Item; aka DMA buffer descriptor */ struct at_lli { /* values that are not changed by hardware */ - dma_addr_t saddr; - dma_addr_t daddr; + u32 saddr; + u32 daddr; /* value that may get written back: */ - u32 ctrla; + u32 ctrla; /* more values that are not changed by hardware */ - u32 ctrlb; - dma_addr_t dscr; /* chain to next lli */ + u32 ctrlb; + u32 dscr; /* chain to next lli */ }; /** diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c index 2fd87f83cf90b..e169f18da551f 100644 --- a/drivers/dma/bestcomm/ata.c +++ b/drivers/dma/bestcomm/ata.c @@ -133,7 +133,7 @@ void bcom_ata_reset_bd(struct bcom_task *tsk) struct bcom_ata_var *var; /* Reset all BD */ - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); tsk->index = 0; tsk->outdex = 0; diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index d91cbbe7a48fb..8c42e5ca00a99 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -95,7 +95,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); if (!tsk->bd) goto error; - memset(tsk->bd, 0x00, bd_count * bd_size); + memset_io(tsk->bd, 0x00, bd_count * bd_size); tsk->num_bd = bd_count; tsk->bd_size = bd_size; @@ -186,16 +186,16 @@ bcom_load_image(int task, u32 *task_image) inc = bcom_task_inc(task); /* Clear & copy */ - memset(var, 0x00, BCOM_VAR_SIZE); - memset(inc, 0x00, BCOM_INC_SIZE); + memset_io(var, 0x00, BCOM_VAR_SIZE); + memset_io(inc, 0x00, BCOM_INC_SIZE); desc_src = (u32 *)(hdr + 1); var_src = desc_src + hdr->desc_size; inc_src = var_src + hdr->var_size; - memcpy(desc, desc_src, hdr->desc_size * sizeof(u32)); - memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); - memcpy(inc, inc_src, hdr->inc_size * sizeof(u32)); + memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32)); + memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); + memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32)); return 0; } @@ -302,13 +302,13 @@ static int bcom_engine_init(void) return -ENOMEM; } - memset(bcom_eng->tdt, 0x00, tdt_size); - memset(bcom_eng->ctx, 0x00, ctx_size); - memset(bcom_eng->var, 0x00, var_size); - memset(bcom_eng->fdt, 0x00, fdt_size); + memset_io(bcom_eng->tdt, 0x00, tdt_size); + memset_io(bcom_eng->ctx, 0x00, ctx_size); + memset_io(bcom_eng->var, 0x00, var_size); + memset_io(bcom_eng->fdt, 0x00, fdt_size); /* Copy the FDT for the EU#3 */ - memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); + memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); /* Initialize Task base structure */ for (task=0; taskindex = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA); @@ -241,7 +241,7 @@ bcom_fec_tx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA); diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c index 906ddba6a6f5d..8a24a5cbc2633 100644 --- a/drivers/dma/bestcomm/gen_bd.c +++ b/drivers/dma/bestcomm/gen_bd.c @@ -142,7 +142,7 @@ bcom_gen_bd_rx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA); @@ -226,7 +226,7 @@ bcom_gen_bd_tx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index af3ee288bc117..4ec7bb58c195f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan) /* The channel is already in use, update client count */ if (chan->client_count) { __module_get(owner); - goto out; + chan->client_count++; + return 0; } if (!try_module_get(owner)) @@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan) goto err_out; } + chan->client_count++; + if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) balance_ref_count(chan); -out: - chan->client_count++; return 0; err_out: diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 14c1ac26f8664..b8b5d91b7c1a2 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -551,6 +551,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) /* The bad descriptor currently is in the head of vc list */ vd = vchan_next_desc(&chan->vc); + if (!vd) { + dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n", + axi_chan_name(chan)); + goto out; + } /* Remove the completed descriptor from issued list */ list_del(&vd->node); @@ -565,6 +570,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) /* Try to restart the controller */ axi_chan_start_first_queued(chan); +out: spin_unlock_irqrestore(&chan->vc.lock, flags); } diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c index 3e83769615d1c..8f16513673100 100644 --- a/drivers/dma/hisi_dma.c +++ b/drivers/dma/hisi_dma.c @@ -185,7 +185,8 @@ static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0); } -static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) +static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan, + bool disable) { struct hisi_dma_dev *hdma_dev = chan->hdma_dev; u32 index = chan->qp_num, tmp; @@ -206,8 +207,11 @@ static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) hisi_dma_do_reset(hdma_dev, index); hisi_dma_reset_qp_point(hdma_dev, index); hisi_dma_pause_dma(hdma_dev, index, false); - hisi_dma_enable_dma(hdma_dev, index, true); - hisi_dma_unmask_irq(hdma_dev, index); + + if (!disable) { + hisi_dma_enable_dma(hdma_dev, index, true); + hisi_dma_unmask_irq(hdma_dev, index); + } ret = readl_relaxed_poll_timeout(hdma_dev->base + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, @@ -223,7 +227,7 @@ static void hisi_dma_free_chan_resources(struct dma_chan *c) struct hisi_dma_chan *chan = to_hisi_dma_chan(c); struct hisi_dma_dev *hdma_dev = chan->hdma_dev; - hisi_dma_reset_hw_chan(chan); + hisi_dma_reset_or_disable_hw_chan(chan, false); vchan_free_chan_resources(&chan->vc); memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); @@ -272,7 +276,6 @@ static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) vd = vchan_next_desc(&chan->vc); if (!vd) { - dev_err(&hdma_dev->pdev->dev, "no issued task!\n"); chan->desc = NULL; return; } @@ -304,7 +307,7 @@ static void hisi_dma_issue_pending(struct dma_chan *c) spin_lock_irqsave(&chan->vc.lock, flags); - if (vchan_issue_pending(&chan->vc)) + if (vchan_issue_pending(&chan->vc) && !chan->desc) hisi_dma_start_transfer(chan); spin_unlock_irqrestore(&chan->vc.lock, flags); @@ -399,7 +402,7 @@ static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) { - hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]); + hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true); } static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) @@ -438,18 +441,15 @@ static irqreturn_t hisi_dma_irq(int irq, void *data) desc = chan->desc; cqe = chan->cq + chan->cq_head; if (desc) { + chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth; + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, + chan->qp_num, chan->cq_head); if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { - chan->cq_head = (chan->cq_head + 1) % - hdma_dev->chan_depth; - hisi_dma_chan_write(hdma_dev->base, - HISI_DMA_CQ_HEAD_PTR, chan->qp_num, - chan->cq_head); vchan_cookie_complete(&desc->vd); + hisi_dma_start_transfer(chan); } else { dev_err(&hdma_dev->pdev->dev, "task error!\n"); } - - chan->desc = NULL; } spin_unlock_irqrestore(&chan->vc.lock, flags); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 2283dcd8bf91d..6514db824473c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1363,10 +1363,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, sdma_config_ownership(sdmac, false, true, false); if (sdma_load_context(sdmac)) - goto err_desc_out; + goto err_bd_out; return desc; +err_bd_out: + sdma_free_bd(desc); err_desc_out: kfree(desc); err_out: diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 37ff4ec7db76f..e2070df6cad28 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -656,7 +656,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) if (active - i == 0) { dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", __func__); - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } /* microsecond delay by sysfs variable per pending descriptor */ @@ -682,7 +682,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan) if (chanerr & (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) { - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); ioat_eh(ioat_chan); } } @@ -879,7 +879,7 @@ static void check_active(struct ioatdma_chan *ioat_chan) } if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 9b0d463f89bbd..4800c596433ad 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -899,6 +899,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev) tasklet_kill(&xor_dev->irq_tasklet); clk_disable_unprepare(xor_dev->clk); + clk_disable_unprepare(xor_dev->reg_clk); return 0; } diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 65f816b40c328..dc147cc2436e9 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -167,29 +167,11 @@ static struct mxs_dma_type mxs_dma_types[] = { } }; -static const struct platform_device_id mxs_dma_ids[] = { - { - .name = "imx23-dma-apbh", - .driver_data = (kernel_ulong_t) &mxs_dma_types[0], - }, { - .name = "imx23-dma-apbx", - .driver_data = (kernel_ulong_t) &mxs_dma_types[1], - }, { - .name = "imx28-dma-apbh", - .driver_data = (kernel_ulong_t) &mxs_dma_types[2], - }, { - .name = "imx28-dma-apbx", - .driver_data = (kernel_ulong_t) &mxs_dma_types[3], - }, { - /* end of list */ - } -}; - static const struct of_device_id mxs_dma_dt_ids[] = { - { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, - { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, - { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, - { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, + { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_types[0], }, + { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_types[1], }, + { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_types[2], }, + { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_types[3], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); @@ -688,7 +670,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, return mxs_chan->status; } -static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) +static int mxs_dma_init(struct mxs_dma_engine *mxs_dma) { int ret; @@ -759,11 +741,9 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, ofdma->of_node); } -static int __init mxs_dma_probe(struct platform_device *pdev) +static int mxs_dma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct platform_device_id *id_entry; - const struct of_device_id *of_id; const struct mxs_dma_type *dma_type; struct mxs_dma_engine *mxs_dma; struct resource *iores; @@ -779,13 +759,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev) return ret; } - of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); - if (of_id) - id_entry = of_id->data; - else - id_entry = platform_get_device_id(pdev); - - dma_type = (struct mxs_dma_type *)id_entry->driver_data; + dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev); mxs_dma->type = dma_type->type; mxs_dma->dev_id = dma_type->id; @@ -865,11 +839,7 @@ static struct platform_driver mxs_dma_driver = { .name = "mxs-dma", .of_match_table = mxs_dma_dt_ids, }, - .id_table = mxs_dma_ids, + .probe = mxs_dma_probe, }; -static int __init mxs_dma_module_init(void) -{ - return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); -} -subsys_initcall(mxs_dma_module_init); +builtin_platform_driver(mxs_dma_driver); diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index b4ef4f19f7dec..68d9d60c051d9 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1249,14 +1249,14 @@ static int pxad_init_phys(struct platform_device *op, return -ENOMEM; for (i = 0; i < nb_phy_chans; i++) - if (platform_get_irq(op, i) > 0) + if (platform_get_irq_optional(op, i) > 0) nr_irq++; for (i = 0; i < nb_phy_chans; i++) { phy = &pdev->phys[i]; phy->base = pdev->base; phy->idx = i; - irq = platform_get_irq(op, i); + irq = platform_get_irq_optional(op, i); if ((nr_irq > 1) && (irq > 0)) ret = devm_request_irq(&op->dev, irq, pxad_chan_handler, diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index c5fa2ef74abc7..d84010c2e4bf1 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -224,7 +224,7 @@ static int tegra_adma_init(struct tegra_adma *tdma) int ret; /* Clear any interrupts */ - tdma_write(tdma, tdma->cdata->global_int_clear, 0x1); + tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1); /* Assert soft reset */ tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1); diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c index 8563a392f30bf..dadab2feca080 100644 --- a/drivers/dma/ti/k3-udma-private.c +++ b/drivers/dma/ti/k3-udma-private.c @@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) } pdev = of_find_device_by_node(udma_node); + if (np != udma_node) + of_node_put(udma_node); + if (!pdev) { pr_debug("UDMA device not found\n"); return ERR_PTR(-EPROBE_DEFER); } - if (np != udma_node) - of_node_put(udma_node); - ud = platform_get_drvdata(pdev); if (!ud) { pr_debug("UDMA has not been probed\n"); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index cab4719e4cf9c..12ad4bb3c5f28 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -3020,9 +3020,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Request and map I/O memory */ xdev->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(xdev->regs)) - return PTR_ERR(xdev->regs); - + if (IS_ERR(xdev->regs)) { + err = PTR_ERR(xdev->regs); + goto disable_clks; + } /* Retrieve the DMA engine properties from the device tree */ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2; @@ -3050,7 +3051,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) if (err < 0) { dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); - return err; + goto disable_clks; } err = of_property_read_u32(node, "xlnx,flush-fsync", @@ -3070,7 +3071,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->ext_addr = false; /* Set the dma mask bits */ - dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); + err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); + if (err < 0) { + dev_err(xdev->dev, "DMA mask error %d\n", err); + goto disable_clks; + } /* Initialize the DMA engine */ xdev->common.dev = &pdev->dev; @@ -3114,8 +3119,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Initialize the channels */ for_each_child_of_node(node, child) { err = xilinx_dma_child_probe(xdev, child); - if (err < 0) - goto disable_clks; + if (err < 0) { + of_node_put(child); + goto error; + } } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -3150,12 +3157,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) return 0; -disable_clks: - xdma_disable_allclks(xdev); error: for (i = 0; i < xdev->dma_config->max_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); +disable_clks: + xdma_disable_allclks(xdev); return err; } diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 8c4d947fb8486..85c229985f905 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -34,6 +34,9 @@ static DEFINE_MUTEX(device_ctls_mutex); static LIST_HEAD(edac_device_list); +/* Default workqueue processing interval on this instance, in msecs */ +#define DEFAULT_POLL_INTERVAL 1000 + #ifdef CONFIG_EDAC_DEBUG static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) { @@ -366,7 +369,7 @@ static void edac_device_workq_function(struct work_struct *work_req) * whole one second to save timers firing all over the period * between integral seconds */ - if (edac_dev->poll_msec == 1000) + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_queue_work(&edac_dev->work, edac_dev->delay); @@ -396,7 +399,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, * timers firing on sub-second basis, while they are happy * to fire together on the 1 second exactly */ - if (edac_dev->poll_msec == 1000) + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_queue_work(&edac_dev->work, edac_dev->delay); @@ -424,17 +427,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) * Then restart the workq on the new delay */ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, - unsigned long value) + unsigned long msec) { - unsigned long jiffs = msecs_to_jiffies(value); - - if (value == 1000) - jiffs = round_jiffies_relative(value); - - edac_dev->poll_msec = value; - edac_dev->delay = jiffs; + edac_dev->poll_msec = msec; + edac_dev->delay = msecs_to_jiffies(msec); - edac_mod_work(&edac_dev->work, jiffs); + /* See comment in edac_device_workq_setup() above */ + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) + edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); + else + edac_mod_work(&edac_dev->work, edac_dev->delay); } int edac_device_alloc_index(void) @@ -473,11 +475,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) /* This instance is NOW RUNNING */ edac_dev->op_state = OP_RUNNING_POLL; - /* - * enable workq processing on this instance, - * default = 1000 msec - */ - edac_device_workq_setup(edac_dev, 1000); + edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL); } else { edac_dev->op_state = OP_RUNNING_INTERRUPT; } diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index aa1f91688eb8e..841d238bc3f18 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -56,7 +56,7 @@ bool edac_stop_work(struct delayed_work *work); bool edac_mod_work(struct delayed_work *work, unsigned long delay); extern void edac_device_reset_delay_period(struct edac_device_ctl_info - *edac_dev, unsigned long value); + *edac_dev, unsigned long msec); extern void edac_mc_reset_delay_period(unsigned long value); extern void *edac_align_ptr(void **p, unsigned size, int n_elems); diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c index 61b76ec226af1..19fba258ae108 100644 --- a/drivers/edac/highbank_mc_edac.c +++ b/drivers/edac/highbank_mc_edac.c @@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev) drvdata = mci->pvt_info; platform_set_drvdata(pdev, mci); - if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) - return -ENOMEM; + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + res = -ENOMEM; + goto free; + } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -243,6 +245,7 @@ static int highbank_mc_probe(struct platform_device *pdev) edac_mc_del_mc(&pdev->dev); err: devres_release_group(&pdev->dev, NULL); +free: edac_mc_free(mci); return res; } diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 3a7362f968c9f..43dbea6b6e30a 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -53,11 +53,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus, if (unlikely(pci_enable_device(pdev) < 0)) { edac_dbg(2, "Failed to enable device %02x:%02x.%x\n", bus, dev, fun); + pci_dev_put(pdev); return NULL; } - pci_dev_get(pdev); - return pdev; } diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index 97a27e42dd610..c45519f59dc11 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -252,7 +252,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type) static int dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank) { - struct llcc_drv_data *drv = edev_ctl->pvt_info; + struct llcc_drv_data *drv = edev_ctl->dev->platform_data; int ret; ret = dump_syn_reg_values(drv, bank, err_type); @@ -289,7 +289,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl) { struct edac_device_ctl_info *edac_dev_ctl = edev_ctl; - struct llcc_drv_data *drv = edac_dev_ctl->pvt_info; + struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data; irqreturn_t irq_rc = IRQ_NONE; u32 drp_error, trp_error, i; int ret; @@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev) edev_ctl->dev_name = dev_name(dev); edev_ctl->ctl_name = "llcc"; edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE; - edev_ctl->pvt_info = llcc_driv_data; rc = edac_device_add_device(edev_ctl); if (rc) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index b0cc3f1e9bb00..16ea847ade5fd 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -818,8 +818,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) r = container_of(resource, struct inbound_transaction_resource, resource); - if (is_fcp_request(r->request)) + if (is_fcp_request(r->request)) { + kfree(r->data); goto out; + } if (a->length != fw_get_response_length(r->request)) { ret = -EINVAL; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 745b7f9eb3351..8d24082848a83 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -711,8 +711,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) { int ret = scmi_chan_setup(info, dev, prot_id, true); - if (!ret) /* Rx is optional, hence no error check */ - scmi_chan_setup(info, dev, prot_id, false); + if (!ret) { + /* Rx is optional, report only memory errors */ + ret = scmi_chan_setup(info, dev, prot_id, false); + if (ret && ret != -ENOMEM) + ret = 0; + } return ret; } @@ -942,6 +946,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match); static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", + .suppress_bind_attrs = true, .of_match_table = scmi_of_match, .dev_groups = versions_groups, }, diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index a4e4aa9a35426..af74e521f89f3 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -106,9 +106,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd_data->domains = domains; scmi_pd_data->num_domains = num_domains; + dev_set_drvdata(dev, scmi_pd_data); + return of_genpd_add_provider_onecell(np, scmi_pd_data); } +static void scmi_pm_domain_remove(struct scmi_device *sdev) +{ + int i; + struct genpd_onecell_data *scmi_pd_data; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + + of_genpd_del_provider(np); + + scmi_pd_data = dev_get_drvdata(dev); + for (i = 0; i < scmi_pd_data->num_domains; i++) { + if (!scmi_pd_data->domains[i]) + continue; + pm_genpd_remove(scmi_pd_data->domains[i]); + } +} + static const struct scmi_device_id scmi_id_table[] = { { SCMI_PROTOCOL_POWER, "genpd" }, { }, @@ -118,6 +137,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table); static struct scmi_driver scmi_power_domain_driver = { .name = "scmi-power-domain", .probe = scmi_pm_domain_probe, + .remove = scmi_pm_domain_remove, .id_table = scmi_id_table, }; module_scmi_driver(scmi_power_domain_driver); diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 0e3eaea5d8526..56a1f61aa3ff2 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -58,10 +58,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer) { + size_t len = ioread32(&shmem->length); + xfer->hdr.status = ioread32(shmem->msg_payload); /* Skip the length of header and status in shmem area i.e 8 bytes */ - xfer->rx.len = min_t(size_t, xfer->rx.len, - ioread32(&shmem->length) - 8); + xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); /* Take a copy to the rx buffer.. */ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); @@ -70,8 +71,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, size_t max_len, struct scmi_xfer *xfer) { + size_t len = ioread32(&shmem->length); + /* Skip only the length of header in shmem area i.e 4 bytes */ - xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4); + xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); /* Take a copy to the rx buffer.. */ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index 4dde8edd53b62..3e8d4b51a8140 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -242,29 +242,6 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff, return ret; } -/** - * efi_capsule_flush - called by file close or file flush - * @file: file pointer - * @id: not used - * - * If a capsule is being partially uploaded then calling this function - * will be treated as upload termination and will free those completed - * buffer pages and -ECANCELED will be returned. - **/ -static int efi_capsule_flush(struct file *file, fl_owner_t id) -{ - int ret = 0; - struct capsule_info *cap_info = file->private_data; - - if (cap_info->index > 0) { - pr_err("capsule upload not complete\n"); - efi_free_all_buff_pages(cap_info); - ret = -ECANCELED; - } - - return ret; -} - /** * efi_capsule_release - called by file close * @inode: not used @@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file) { struct capsule_info *cap_info = file->private_data; + if (cap_info->index > 0 && + (cap_info->header.headersize == 0 || + cap_info->count < cap_info->total_size)) { + pr_err("capsule upload not complete\n"); + efi_free_all_buff_pages(cap_info); + } + kfree(cap_info->pages); kfree(cap_info->phys); kfree(file->private_data); @@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = { .owner = THIS_MODULE, .open = efi_capsule_open, .write = efi_capsule_write, - .flush = efi_capsule_flush, .release = efi_capsule_release, .llseek = no_llseek, }; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index e3df82d5d37a8..332739f3eded5 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -385,8 +385,8 @@ static int __init efisubsys_init(void) efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { pr_err("efi: Firmware registration failed.\n"); - destroy_workqueue(efi_rts_wq); - return -ENOMEM; + error = -ENOMEM; + goto err_destroy_wq; } if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | @@ -429,7 +429,10 @@ static int __init efisubsys_init(void) generic_ops_unregister(); err_put: kobject_put(efi_kobj); - destroy_workqueue(efi_rts_wq); +err_destroy_wq: + if (efi_rts_wq) + destroy_workqueue(efi_rts_wq); + return error; } @@ -590,7 +593,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed)); if (seed != NULL) { - size = READ_ONCE(seed->size); + size = min_t(u32, seed->size, SZ_1K); // sanity check early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); @@ -599,8 +602,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed) + size); if (seed != NULL) { - pr_notice("seeding entropy pool\n"); add_bootloader_randomness(seed->bits, size); + memzero_explicit(seed->bits, size); early_memunmap(seed, sizeof(*seed) + size); } else { pr_err("Could not map UEFI random seed!\n"); @@ -947,6 +950,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) /* first try to find a slot in an existing linked list entry */ for (prsv = efi_memreserve_root->next; prsv; ) { rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); + if (!rsv) + return -ENOMEM; index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); if (index < rsv->size) { rsv->entry[index].base = addr; diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index a2ae9c3b95793..433e11dab4a87 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -37,6 +37,13 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ $(call cc-option,-fno-addrsig) \ -D__DISABLE_EXPORTS +# +# struct randomization only makes sense for Linux internal types, which the EFI +# stub code never touches, so let's turn off struct randomization for the stub +# altogether +# +KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS)) + # remove SCS flags from all objects in this directory KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 415a971e76947..7f4bafcd9d335 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -24,7 +24,7 @@ efi_status_t check_platform_features(void) return EFI_SUCCESS; tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; - if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { + if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 2d7abcd99de9b..c5e0c6e99d20c 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -767,6 +767,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed); +efi_status_t efi_random_get_seed(void); + efi_status_t check_platform_features(void); void *get_efi_config_table(efi_guid_t guid); diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 368cd60000eec..d48b0de05b62f 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -281,14 +281,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle, goto fail; } - /* - * Now that we have done our final memory allocation (and free) - * we can get the memory map key needed for exit_boot_services(). - */ - status = efi_get_memory_map(&map); - if (status != EFI_SUCCESS) - goto fail_free_new_fdt; - status = update_fdt((void *)fdt_addr, fdt_size, (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr, initrd_addr, initrd_size); diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 24aa375353724..f85d2c0668777 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -67,22 +67,43 @@ efi_status_t efi_random_get_seed(void) efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; + struct linux_efi_random_seed *prev_seed, *seed = NULL; + int prev_seed_size = 0, seed_size = EFI_RANDOM_SEED_SIZE; efi_rng_protocol_t *rng = NULL; - struct linux_efi_random_seed *seed = NULL; efi_status_t status; status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; - status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, - sizeof(*seed) + EFI_RANDOM_SEED_SIZE, + /* + * Check whether a seed was provided by a prior boot stage. In that + * case, instead of overwriting it, let's create a new buffer that can + * hold both, and concatenate the existing and the new seeds. + * Note that we should read the seed size with caution, in case the + * table got corrupted in memory somehow. + */ + prev_seed = get_efi_config_table(LINUX_EFI_RANDOM_SEED_TABLE_GUID); + if (prev_seed && prev_seed->size <= 512U) { + prev_seed_size = prev_seed->size; + seed_size += prev_seed_size; + } + + /* + * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the + * allocation will survive a kexec reboot (although we refresh the seed + * beforehand) + */ + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, + struct_size(seed, bits, seed_size), (void **)&seed); - if (status != EFI_SUCCESS) - return status; + if (status != EFI_SUCCESS) { + efi_warn("Failed to allocate memory for RNG seed.\n"); + goto err_warn; + } status = efi_call_proto(rng, get_rng, &rng_algo_raw, - EFI_RANDOM_SEED_SIZE, seed->bits); + EFI_RANDOM_SEED_SIZE, seed->bits); if (status == EFI_UNSUPPORTED) /* @@ -95,14 +116,28 @@ efi_status_t efi_random_get_seed(void) if (status != EFI_SUCCESS) goto err_freepool; - seed->size = EFI_RANDOM_SEED_SIZE; + seed->size = seed_size; + if (prev_seed_size) + memcpy(seed->bits + EFI_RANDOM_SEED_SIZE, prev_seed->bits, + prev_seed_size); + status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); if (status != EFI_SUCCESS) goto err_freepool; + if (prev_seed_size) { + /* wipe and free the old seed if we managed to install the new one */ + memzero_explicit(prev_seed->bits, prev_seed_size); + efi_bs_call(free_pool, prev_seed); + } return EFI_SUCCESS; err_freepool: + memzero_explicit(seed, struct_size(seed, bits, seed_size)); efi_bs_call(free_pool, seed); + efi_warn("Failed to obtain seed from EFI_RNG_PROTOCOL\n"); +err_warn: + if (prev_seed) + efi_warn("Retaining bootloader-supplied seed only"); return status; } diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 5efc524b14bef..a2be3a71bcf8e 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -19,7 +19,7 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode"; /* SHIM variables */ static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; -static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; +static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT"; /* * Determine whether we're in secure boot mode. @@ -53,8 +53,8 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* * See if a user has put the shim into insecure mode. If so, and if the - * variable doesn't have the runtime attribute set, we might as well - * honor that. + * variable doesn't have the non-volatile attribute set, we might as + * well honor that. */ size = sizeof(moksbstate); status = get_efi_var(shim_MokSBState_name, &shim_guid, @@ -63,7 +63,7 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* If it fails, we don't care why. Default to secure */ if (status != EFI_SUCCESS) goto secure_boot_enabled; - if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) + if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1) return efi_secureboot_mode_disabled; secure_boot_enabled: diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 3672539cb96eb..5d0f1b1966fc6 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -414,6 +414,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; + /* + * Disregard any setup data that was provided by the bootloader: + * setup_data could be pointing anywhere, and we have no way of + * authenticating or validating the payload. + */ + hdr->setup_data = 0; + efi_stub_entry(handle, sys_table_arg, boot_params); /* not reached */ diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index 0a9aba5f9ceff..f178b2984dfb2 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -33,7 +33,7 @@ int __init efi_memattr_init(void) return -ENOMEM; } - if (tbl->version > 1) { + if (tbl->version > 2) { pr_warn("Unexpected EFI Memory Attributes table version %d\n", tbl->version); goto unmap; diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index f3e54f6616f02..60075e0e4943a 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work; \ if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \ pr_warn_once("EFI Runtime Services are disabled!\n"); \ + efi_rts_work.status = EFI_DEVICE_ERROR; \ goto exit; \ } \ \ diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 8f665678e9e39..e8d69bd548f3f 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -97,7 +97,7 @@ int __init efi_tpm_eventlog_init(void) goto out_calc; } - memblock_reserve((unsigned long)final_tbl, + memblock_reserve(efi.tpm_final_log, tbl_size + sizeof(*final_tbl)); efi_tpm_final_log_size = tbl_size; diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index 0205987a4fd4f..568074148f62b 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -152,12 +152,8 @@ static int coreboot_table_probe(struct platform_device *pdev) if (!ptr) return -ENOMEM; - ret = bus_register(&coreboot_bus_type); - if (!ret) { - ret = coreboot_table_populate(dev, ptr); - if (ret) - bus_unregister(&coreboot_bus_type); - } + ret = coreboot_table_populate(dev, ptr); + memunmap(ptr); return ret; @@ -172,7 +168,6 @@ static int __cb_dev_unregister(struct device *dev, void *dummy) static int coreboot_table_remove(struct platform_device *pdev) { bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister); - bus_unregister(&coreboot_bus_type); return 0; } @@ -202,6 +197,32 @@ static struct platform_driver coreboot_table_driver = { .of_match_table = of_match_ptr(coreboot_of_match), }, }; -module_platform_driver(coreboot_table_driver); + +static int __init coreboot_table_driver_init(void) +{ + int ret; + + ret = bus_register(&coreboot_bus_type); + if (ret) + return ret; + + ret = platform_driver_register(&coreboot_table_driver); + if (ret) { + bus_unregister(&coreboot_bus_type); + return ret; + } + + return 0; +} + +static void __exit coreboot_table_driver_exit(void) +{ + platform_driver_unregister(&coreboot_table_driver); + bus_unregister(&coreboot_bus_type); +} + +module_init(coreboot_table_driver_init); +module_exit(coreboot_table_driver_exit); + MODULE_AUTHOR("Google, Inc."); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 7d9367b220107..407cac71c77de 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -360,9 +360,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name, memcpy(data, gsmi_dev.data_buf->start, *data_size); /* All variables are have the following attributes */ - *attr = EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS; + if (attr) + *attr = EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS; } spin_unlock_irqrestore(&gsmi_dev.lock, flags); @@ -680,6 +681,15 @@ static struct notifier_block gsmi_die_notifier = { static int gsmi_panic_callback(struct notifier_block *nb, unsigned long reason, void *arg) { + + /* + * Panic callbacks are executed with all other CPUs stopped, + * so we must not attempt to spin waiting for gsmi_dev.lock + * to be released. + */ + if (spin_is_locked(&gsmi_dev.lock)) + return NOTIFY_DONE; + gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC); return NOTIFY_DONE; } diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 1d965c1252cac..9eef49da47e04 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -265,6 +265,7 @@ static int rpi_firmware_probe(struct platform_device *pdev) int ret = PTR_ERR(fw->chan); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get mbox channel: %d\n", ret); + kfree(fw); return ret; } diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index b450870b75ed8..eb8a6e329af9b 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -1857,7 +1857,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev, return -EINVAL; fds = memdup_user((void __user *)(arg + sizeof(hdr)), - hdr.count * sizeof(s32)); + array_size(hdr.count, sizeof(s32))); if (IS_ERR(fds)) return PTR_ERR(fds); diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index 9e34bbbce26e2..7a8269b52723d 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr, /* Allocate buffers from the service layer's pool. */ for (i = 0; i < NUM_SVC_BUFS; i++) { kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE); - if (!kbuf) { + if (IS_ERR(kbuf)) { s10_free_buffers(mgr); - ret = -ENOMEM; + ret = PTR_ERR(kbuf); goto init_done; } diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c index 59ddc9fd5bca4..92e6eebd1851e 100644 --- a/drivers/fsi/fsi-core.c +++ b/drivers/fsi/fsi-core.c @@ -1309,6 +1309,9 @@ int fsi_master_register(struct fsi_master *master) mutex_init(&master->scan_lock); master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL); + if (master->idx < 0) + return master->idx; + dev_set_name(&master->dev, "fsi%d", master->idx); master->dev.class = &fsi_master_class; diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c index 84cb965bfed5c..97045a8d94224 100644 --- a/drivers/fsi/fsi-sbefifo.c +++ b/drivers/fsi/fsi-sbefifo.c @@ -640,7 +640,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo) } ffdc_iov.iov_base = ffdc; ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE; - iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); + iov_iter_kvec(&ffdc_iter, READ, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); cmd[0] = cpu_to_be32(2); cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC); rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter); @@ -737,7 +737,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len, rbytes = (*resp_len) * sizeof(__be32); resp_iov.iov_base = response; resp_iov.iov_len = rbytes; - iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes); + iov_iter_kvec(&resp_iter, READ, &resp_iov, 1, rbytes); /* Perform the command */ mutex_lock(&sbefifo->lock); @@ -817,7 +817,7 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf, /* Prepare iov iterator */ resp_iov.iov_base = buf; resp_iov.iov_len = len; - iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len); + iov_iter_init(&resp_iter, READ, &resp_iov, 1, len); /* Perform the command */ mutex_lock(&sbefifo->lock); diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c index fdcebe59510dd..68d95051dd0e6 100644 --- a/drivers/gpio/gpio-amd8111.c +++ b/drivers/gpio/gpio-amd8111.c @@ -231,7 +231,10 @@ static int __init amd_gpio_init(void) ioport_unmap(gp.pm); goto out; } + return 0; + out: + pci_dev_put(pdev); return err; } @@ -239,6 +242,7 @@ static void __exit amd_gpio_exit(void) { gpiochip_remove(&gp.chip); ioport_unmap(gp.pm); + pci_dev_put(gp.pdev); } module_init(amd_gpio_init); diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 67ed4f238d437..876027fdefc95 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -375,6 +375,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev, } } +static void gpio_mockup_debugfs_cleanup(void *data) +{ + struct gpio_mockup_chip *chip = data; + + debugfs_remove_recursive(chip->dbg_dir); +} + static void gpio_mockup_dispose_mappings(void *data) { struct gpio_mockup_chip *chip = data; @@ -457,7 +464,7 @@ static int gpio_mockup_probe(struct platform_device *pdev) gpio_mockup_debugfs_setup(dev, chip); - return 0; + return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip); } static struct platform_driver gpio_mockup_driver = { @@ -597,9 +604,9 @@ static int __init gpio_mockup_init(void) static void __exit gpio_mockup_exit(void) { + gpio_mockup_unregister_pdevs(); debugfs_remove_recursive(gpio_mockup_dbg_dir); platform_driver_unregister(&gpio_mockup_driver); - gpio_mockup_unregister_pdevs(); } module_init(gpio_mockup_init); diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index d60d5520707dc..60c2533a39a5f 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_LEVEL_LOW: raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR, gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR) diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index ba6ed2a413f51..0d5a9fee3c70a 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -231,7 +231,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type) writel(1 << gpio_idx, port->base + GPIO_ISR); - return 0; + return port->gc.direction_input(&port->gc, gpio_idx); } static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index 4f28fa73450c1..a42ffb9f30574 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -195,6 +195,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) return -ENODEV; } parent = irq_find_host(irq_parent); + of_node_put(irq_parent); if (!parent) { dev_err(dev, "no IRQ parent domain\n"); return -ENODEV; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 2613881a66e66..40d0196d8bdcc 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -197,16 +197,18 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, void __user *ip = (void __user *)arg; struct gpiohandle_data ghd; DECLARE_BITMAP(vals, GPIOHANDLES_MAX); - int i; + unsigned int i; + int ret; - if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { - /* NOTE: It's ok to read values of output lines. */ - int ret = gpiod_get_array_value_complex(false, - true, - lh->num_descs, - lh->descs, - NULL, - vals); + if (!lh->gdev->chip) + return -ENODEV; + + switch (cmd) { + case GPIOHANDLE_GET_LINE_VALUES_IOCTL: + /* NOTE: It's okay to read values of output lines */ + ret = gpiod_get_array_value_complex(false, true, + lh->num_descs, lh->descs, + NULL, vals); if (ret) return ret; @@ -218,7 +220,7 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, return -EFAULT; return 0; - } else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) { + case GPIOHANDLE_SET_LINE_VALUES_IOCTL: /* * All line descriptors were created at once with the same * flags so just check if the first one is really output. @@ -240,10 +242,11 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, lh->descs, NULL, vals); - } else if (cmd == GPIOHANDLE_SET_CONFIG_IOCTL) { + case GPIOHANDLE_SET_CONFIG_IOCTL: return linehandle_set_config(lh, ip); + default: + return -EINVAL; } - return -EINVAL; } #ifdef CONFIG_COMPAT @@ -1165,14 +1168,19 @@ static long linereq_ioctl(struct file *file, unsigned int cmd, struct linereq *lr = file->private_data; void __user *ip = (void __user *)arg; - if (cmd == GPIO_V2_LINE_GET_VALUES_IOCTL) + if (!lr->gdev->chip) + return -ENODEV; + + switch (cmd) { + case GPIO_V2_LINE_GET_VALUES_IOCTL: return linereq_get_values(lr, ip); - else if (cmd == GPIO_V2_LINE_SET_VALUES_IOCTL) + case GPIO_V2_LINE_SET_VALUES_IOCTL: return linereq_set_values(lr, ip); - else if (cmd == GPIO_V2_LINE_SET_CONFIG_IOCTL) + case GPIO_V2_LINE_SET_CONFIG_IOCTL: return linereq_set_config(lr, ip); - - return -EINVAL; + default: + return -EINVAL; + } } #ifdef CONFIG_COMPAT @@ -1189,6 +1197,9 @@ static __poll_t linereq_poll(struct file *file, struct linereq *lr = file->private_data; __poll_t events = 0; + if (!lr->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &lr->wait, wait); if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, @@ -1208,6 +1219,9 @@ static ssize_t linereq_read(struct file *file, ssize_t bytes_read = 0; int ret; + if (!lr->gdev->chip) + return -ENODEV; + if (count < sizeof(le)) return -EINVAL; @@ -1473,6 +1487,9 @@ static __poll_t lineevent_poll(struct file *file, struct lineevent_state *le = file->private_data; __poll_t events = 0; + if (!le->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &le->wait, wait); if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) @@ -1508,6 +1525,9 @@ static ssize_t lineevent_read(struct file *file, ssize_t ge_size; int ret; + if (!le->gdev->chip) + return -ENODEV; + /* * When compatible system call is being used the struct gpioevent_data, * in case of at least ia32, has different size due to the alignment @@ -1586,6 +1606,9 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd, void __user *ip = (void __user *)arg; struct gpiohandle_data ghd; + if (!le->gdev->chip) + return -ENODEV; + /* * We can get the value for an event line but not set it, * because it is input by definition. @@ -1769,7 +1792,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) ret = -ENODEV; goto out_free_le; } - le->irq = irq; if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? @@ -1783,7 +1805,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) init_waitqueue_head(&le->wait); /* Request a thread to read the events */ - ret = request_threaded_irq(le->irq, + ret = request_threaded_irq(irq, lineevent_irq_handler, lineevent_irq_thread, irqflags, @@ -1792,6 +1814,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_le; + le->irq = irq; + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); if (fd < 0) { ret = fd; @@ -2094,28 +2118,30 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -ENODEV; /* Fill in the struct and pass to userspace */ - if (cmd == GPIO_GET_CHIPINFO_IOCTL) { + switch (cmd) { + case GPIO_GET_CHIPINFO_IOCTL: return chipinfo_get(cdev, ip); #ifdef CONFIG_GPIO_CDEV_V1 - } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) { + case GPIO_GET_LINEHANDLE_IOCTL: return linehandle_create(gdev, ip); - } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) { + case GPIO_GET_LINEEVENT_IOCTL: return lineevent_create(gdev, ip); - } else if (cmd == GPIO_GET_LINEINFO_IOCTL || - cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) { - return lineinfo_get_v1(cdev, ip, - cmd == GPIO_GET_LINEINFO_WATCH_IOCTL); + case GPIO_GET_LINEINFO_IOCTL: + return lineinfo_get_v1(cdev, ip, false); + case GPIO_GET_LINEINFO_WATCH_IOCTL: + return lineinfo_get_v1(cdev, ip, true); #endif /* CONFIG_GPIO_CDEV_V1 */ - } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL || - cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) { - return lineinfo_get(cdev, ip, - cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL); - } else if (cmd == GPIO_V2_GET_LINE_IOCTL) { + case GPIO_V2_GET_LINEINFO_IOCTL: + return lineinfo_get(cdev, ip, false); + case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: + return lineinfo_get(cdev, ip, true); + case GPIO_V2_GET_LINE_IOCTL: return linereq_create(gdev, ip); - } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) { + case GPIO_GET_LINEINFO_UNWATCH_IOCTL: return lineinfo_unwatch(cdev, ip); + default: + return -EINVAL; } - return -EINVAL; } #ifdef CONFIG_COMPAT @@ -2163,6 +2189,9 @@ static __poll_t lineinfo_watch_poll(struct file *file, struct gpio_chardev_data *cdev = file->private_data; __poll_t events = 0; + if (!cdev->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &cdev->wait, pollt); if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, @@ -2181,6 +2210,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, int ret; size_t event_size; + if (!cdev->gdev->chip) + return -ENODEV; + #ifndef CONFIG_GPIO_CDEV_V1 event_size = sizeof(struct gpio_v2_line_info_changed); if (count < event_size) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 59d8affad343a..3e01a3ac652d1 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -186,9 +186,8 @@ static int gpiochip_find_base(int ngpio) /* found a free space? */ if (gdev->base + gdev->ngpio <= base) break; - else - /* nope, check the space right before the chip */ - base = gdev->base - ngpio; + /* nope, check the space right before the chip */ + base = gdev->base - ngpio; } if (gpio_is_valid(base)) { @@ -2481,8 +2480,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) ret = gpiod_direction_input(desc); goto set_output_flag; } - } - else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { + } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE); if (!ret) goto set_output_value; @@ -2656,9 +2654,9 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) static int gpio_chip_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { - if (gc->get_multiple) { + if (gc->get_multiple) return gc->get_multiple(gc, mask, bits); - } else if (gc->get) { + if (gc->get) { int i, value; for_each_set_bit(i, mask, gc->ngpio) { diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index ca868271f4c43..4e9b3a95fa7c7 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -30,6 +30,7 @@ menuconfig DRM config DRM_MIPI_DBI tristate depends on DRM + select DRM_KMS_HELPER config DRM_MIPI_DSI bool diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 6333cada1e096..4b568ee932435 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -313,6 +313,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) if (!found) return false; + pci_dev_put(pdev); adev->bios = kmalloc(size, GFP_KERNEL); if (!adev->bios) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index df1f9b88a53f9..aabfe5705bb8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -315,8 +315,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector) if (!amdgpu_connector->edid) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || - (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) + (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) { amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev); + drm_connector_update_edid_property(connector, amdgpu_connector->edid); + } } } @@ -1671,10 +1673,12 @@ amdgpu_connector_add(struct amdgpu_device *adev, adev->mode_info.dither_property, AMDGPU_FMT_DITHER_DISABLE); - if (amdgpu_audio != 0) + if (amdgpu_audio != 0) { drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.audio_property, AMDGPU_AUDIO_AUTO); + amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; + } subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; @@ -1796,6 +1800,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.audio_property, AMDGPU_AUDIO_AUTO); + amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; } drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.dither_property, @@ -1849,6 +1854,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.audio_property, AMDGPU_AUDIO_AUTO); + amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; } drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.dither_property, @@ -1899,6 +1905,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.audio_property, AMDGPU_AUDIO_AUTO); + amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; } drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.dither_property, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f262c4e7a48a2..8bd887fb6e631 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2047,6 +2047,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); return r; } + + /*get pf2vf msg info at it's earliest time*/ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + } } @@ -2176,6 +2181,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) /* need to do gmc hw init early so we can allocate gpu mem */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + /* Try to reserve bad pages early */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_exchange_data(adev); + r = amdgpu_device_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); @@ -4434,6 +4443,8 @@ static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) pm_runtime_enable(&(p->dev)); pm_runtime_resume(&(p->dev)); } + + pci_dev_put(p); } static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) @@ -4472,6 +4483,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) if (expires < ktime_get_mono_fast_ns()) { dev_warn(adev->dev, "failed to suspend display audio\n"); + pci_dev_put(p); /* TODO: abort the succeeding gpu reset? */ return -ETIMEDOUT; } @@ -4479,6 +4491,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) pm_runtime_disable(&(p->dev)); + pci_dev_put(p); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 30659c1776e81..0d8c1c318ce89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1121,6 +1121,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, "See modparam exp_hw_support\n"); return -ENODEV; } + /* differentiate between P10 and P11 asics with the same DID */ + if (pdev->device == 0x67FF && + (pdev->revision == 0xE3 || + pdev->revision == 0xE7 || + pdev->revision == 0xF3 || + pdev->revision == 0xF7)) { + flags &= ~AMD_ASIC_MASK; + flags |= CHIP_POLARIS10; + } /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, * however, SME requires an indirect IOMMU mapping because the encryption @@ -1190,12 +1199,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ddev->pdev = pdev; pci_set_drvdata(pdev, ddev); - ret = amdgpu_driver_load_kms(adev, ent->driver_data); + ret = amdgpu_driver_load_kms(adev, flags); if (ret) goto err_pci; retry_init: - ret = drm_dev_register(ddev, ent->driver_data); + ret = drm_dev_register(ddev, flags); if (ret == -EAGAIN && ++retry <= 3) { DRM_INFO("retry init %d\n", retry); /* Don't request EX mode too frequently which is attacking */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e8c76bd8c501f..6aa9fd9cb83b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -341,11 +341,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object; - if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { - r = amdgpu_mn_register(bo, args->addr); - if (r) - goto release_object; - } + r = amdgpu_mn_register(bo, args->addr); + if (r) + goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 2f47f81a74a57..8a2abcfd5a889 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1921,7 +1921,7 @@ static int psp_load_smu_fw(struct psp_context *psp) static bool fw_load_skip_check(struct psp_context *psp, struct amdgpu_firmware_info *ucode) { - if (!ucode->fw) + if (!ucode->fw || !ucode->ucode_size) return true; if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && @@ -2146,6 +2146,9 @@ static int psp_hw_fini(void *handle) psp_rap_terminate(psp); psp_dtm_terminate(psp); psp_hdcp_terminate(psp); + + if (adev->gmc.xgmi.num_physical_nodes > 1) + psp_xgmi_terminate(psp); } psp_asd_unload(psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index eb22a190c2423..3638f0e12a2b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1979,15 +1979,12 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, return 0; } -static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev) +static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { - if (adev->asic_type != CHIP_VEGA10 && - adev->asic_type != CHIP_VEGA20 && - adev->asic_type != CHIP_ARCTURUS && - adev->asic_type != CHIP_SIENNA_CICHLID) - return 1; - else - return 0; + return adev->asic_type == CHIP_VEGA10 || + adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS || + adev->asic_type == CHIP_SIENNA_CICHLID; } /* @@ -2006,7 +2003,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *supported = 0; if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || - amdgpu_ras_check_asic_type(adev)) + !amdgpu_ras_asic_supported(adev)) return; if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e7678ba8fdcf8..d6f2951035959 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -580,16 +580,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) { - uint64_t bp_block_offset = 0; - uint32_t bp_block_size = 0; - struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; - adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; if (adev->mman.fw_vram_usage_va != NULL) { - adev->virt.vf2pf_update_interval_ms = 2000; + /* go through this logic in ip_init and reset to init workqueue*/ + amdgpu_virt_exchange_data(adev); + + INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); + schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); + } else if (adev->bios != NULL) { + /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + } +} + + +void amdgpu_virt_exchange_data(struct amdgpu_device *adev) +{ + uint64_t bp_block_offset = 0; + uint32_t bp_block_size = 0; + struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; + + if (adev->mman.fw_vram_usage_va != NULL) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) @@ -616,13 +634,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); } } - - if (adev->virt.vf2pf_update_interval_ms != 0) { - INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); - schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); - } } + void amdgpu_detect_virtualization(struct amdgpu_device *adev) { uint32_t reg; @@ -656,6 +670,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } + if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) + /* VF MMIO access (except mailbox range) from CPU + * will be blocked during sriov runtime + */ + adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; + /* we have the ability to check now */ if (amdgpu_sriov_vf(adev)) { switch (adev->asic_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 8dd624c20f895..fbd92fff8b06c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -31,6 +31,7 @@ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ /* all asic after AI use this offset */ #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 @@ -61,6 +62,8 @@ struct amdgpu_vf_error_buffer { uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; }; +enum idh_request; + /** * struct amdgpu_virt_ops - amdgpu device virt operations */ @@ -70,7 +73,8 @@ struct amdgpu_virt_ops { int (*req_init_data)(struct amdgpu_device *adev); int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); - void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); + void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, + u32 data1, u32 data2, u32 data3); }; /* @@ -241,6 +245,9 @@ struct amdgpu_virt { #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) +#define amdgpu_sriov_vf_mmio_access_protection(adev) \ +((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) + static inline bool is_virtual_machine(void) { #ifdef CONFIG_X86 @@ -271,6 +278,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); void amdgpu_detect_virtualization(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 635601d8b1310..45b1f00c59680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -3200,7 +3200,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) */ #ifdef CONFIG_X86_64 if (amdgpu_vm_update_mode == -1) { - if (amdgpu_gmc_vram_full_visible(&adev->gmc)) + /* For asic with VF MMIO access protection + * avoid using CPU for VM table updates + */ + if (amdgpu_gmc_vram_full_visible(&adev->gmc) && + !amdgpu_sriov_vf_mmio_access_protection(adev)) adev->vm_manager.vm_update_mode = AMDGPU_VM_USE_CPU_FOR_COMPUTE; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 042c85fc528bb..def0b7092438f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -622,7 +622,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) amdgpu_put_xgmi_hive(hive); } - return psp_xgmi_terminate(&adev->psp); + return 0; } int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 405bb3efa2a96..38f4c7474487b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2570,7 +2570,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) gfx_v9_0_tiling_mode_table_init(adev); - gfx_v9_0_setup_rb(adev); + if (adev->gfx.num_gfx_rings) + gfx_v9_0_setup_rb(adev); gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index f84701c562bf2..97441f373531f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -178,6 +178,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); + tmp = mmVM_L2_CNTL3_DEFAULT; if (adev->gmc.translate_further) { tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index f3274eb6b3418..6c4cba09d23b6 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -5,6 +5,7 @@ menu "Display Engine Configuration" config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y + depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64 select SND_HDA_COMPONENT if SND_HDA_CORE select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help @@ -12,6 +13,12 @@ config DRM_AMD_DC support for AMDGPU. This adds required support for Vega and Raven ASICs. + calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || ARM64) + architectures built with Clang (all released versions), whereby the stack + frame gets blown up to well over 5k. This would cause an immediate kernel + panic on most architectures. We'll revert this when the following bug report + has been resolved: https://github.com/llvm/llvm-project/issues/41896. + config DRM_AMD_DC_DCN def_bool n help diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 55ecc67592ebc..fbe15f4b75fd5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2348,13 +2348,12 @@ void amdgpu_dm_update_connector_after_detect( aconnector->edid = (struct edid *)sink->dc_edid.raw_edid; - drm_connector_update_edid_property(connector, - aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, aconnector->edid); } + drm_connector_update_edid_property(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); update_connector_ext_caps(aconnector); } else { @@ -4568,8 +4567,6 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; timing_out->aspect_ratio = get_aspect_ratio(mode_in); - stream->output_color_space = get_output_color_space(timing_out); - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { @@ -4580,6 +4577,8 @@ static void fill_stream_properties_from_drm_display_mode( adjust_colour_depth_from_display_info(timing_out, info); } } + + stream->output_color_space = get_output_color_space(timing_out); } static void fill_audio_info(struct audio_info *audio_info, @@ -8784,8 +8783,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - if (dm_old_con_state->abm_level != - dm_new_con_state->abm_level) + if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || + dm_old_con_state->scaling != dm_new_con_state->scaling) new_crtc_state->connectors_changed = true; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 29d64e7e304f4..930d2b7d34489 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -352,6 +352,7 @@ static enum bp_result get_gpio_i2c_info( uint32_t count = 0; unsigned int table_index = 0; bool find_valid = false; + struct atom_gpio_pin_assignment *pin; if (!info) return BP_RESULT_BADINPUT; @@ -379,20 +380,17 @@ static enum bp_result get_gpio_i2c_info( - sizeof(struct atom_common_table_header)) / sizeof(struct atom_gpio_pin_assignment); + pin = (struct atom_gpio_pin_assignment *) header->gpio_pin; + for (table_index = 0; table_index < count; table_index++) { - if (((record->i2c_id & I2C_HW_CAP) == ( - header->gpio_pin[table_index].gpio_id & - I2C_HW_CAP)) && - ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_ENGINE_ID_MASK)) && - ((record->i2c_id & I2C_HW_LANE_MUX) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_LANE_MUX))) { + if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) && + ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) && + ((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) { /* still valid */ find_valid = true; break; } + pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment)); } /* If we don't find the entry that we are looking for then diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c index 6ca288fb5fb9e..2d46bc527b218 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c @@ -26,12 +26,12 @@ #include "bw_fixed.h" -#define MIN_I64 \ - (int64_t)(-(1LL << 63)) - #define MAX_I64 \ (int64_t)((1ULL << 63) - 1) +#define MIN_I64 \ + (-MAX_I64 - 1) + #define FRACTIONAL_PART_MASK \ ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 93f5229c303e7..99887bcfada04 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2202,11 +2202,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->abm_level) stream->abm_level = *update->abm_level; - if (update->periodic_interrupt0) - stream->periodic_interrupt0 = *update->periodic_interrupt0; - - if (update->periodic_interrupt1) - stream->periodic_interrupt1 = *update->periodic_interrupt1; + if (update->periodic_interrupt) + stream->periodic_interrupt = *update->periodic_interrupt; if (update->gamut_remap) stream->gamut_remap_matrix = *update->gamut_remap; @@ -2288,13 +2285,8 @@ static void commit_planes_do_stream_update(struct dc *dc, if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { - if (stream_update->periodic_interrupt0 && - dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); - - if (stream_update->periodic_interrupt1 && - dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); + if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) + dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 2a9080400bdde..86f3ea4edb319 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -92,8 +92,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = { { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3, 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }, { COLOR_SPACE_YCBCR2020_TYPE, - { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2, - 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} }, + { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2, + 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} }, { COLOR_SPACE_YCBCR709_BLACK_TYPE, { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} }, diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 205bedd1b1966..0487c1b8957c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -179,8 +179,7 @@ struct dc_stream_state { /* DMCU info */ unsigned int abm_level; - struct periodic_interrupt_config periodic_interrupt0; - struct periodic_interrupt_config periodic_interrupt1; + struct periodic_interrupt_config periodic_interrupt; /* from core_stream struct */ struct dc_context *ctx; @@ -244,8 +243,7 @@ struct dc_stream_update { struct dc_info_packet *hdr_static_metadata; unsigned int *abm_level; - struct periodic_interrupt_config *periodic_interrupt0; - struct periodic_interrupt_config *periodic_interrupt1; + struct periodic_interrupt_config *periodic_interrupt; struct dc_info_packet *vrr_infopacket; struct dc_info_packet *vsc_infopacket; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 3ac6c7b65a45a..e33fe0207b9e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2047,7 +2047,8 @@ static void dce110_setup_audio_dto( continue; if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) continue; - if (pipe_ctx->stream_res.audio != NULL) { + if (pipe_ctx->stream_res.audio != NULL && + pipe_ctx->stream_res.audio->enabled == false) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); @@ -2075,7 +2076,8 @@ static void dce110_setup_audio_dto( if (!dc_is_dp_signal(pipe_ctx->stream->signal)) continue; - if (pipe_ctx->stream_res.audio != NULL) { + if (pipe_ctx->stream_res.audio != NULL && + pipe_ctx->stream_res.audio->enabled == false) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 8f362e8c17870..be6d43c9979c8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -361,7 +361,8 @@ static const struct dce_audio_registers audio_regs[] = { audio_regs(2), audio_regs(3), audio_regs(4), - audio_regs(5) + audio_regs(5), + audio_regs(6), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c index 5a5a9cb77acbf..bcdd8a958fc01 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c @@ -1132,6 +1132,7 @@ struct resource_pool *dce60_create_resource_pool( if (dce60_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1329,6 +1330,7 @@ struct resource_pool *dce61_create_resource_pool( if (dce61_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1522,6 +1524,7 @@ struct resource_pool *dce64_create_resource_pool( if (dce64_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index a19be9de2df7d..2eefa07762ae1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -1141,6 +1141,7 @@ struct resource_pool *dce80_create_resource_pool( if (dce80_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1338,6 +1339,7 @@ struct resource_pool *dce81_create_resource_pool( if (dce81_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 31a13daf4289c..71a85c5306ed0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3611,7 +3611,7 @@ void dcn10_calc_vupdate_position( { const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; int vline_int_offset_from_vupdate = - pipe_ctx->stream->periodic_interrupt0.lines_offset; + pipe_ctx->stream->periodic_interrupt.lines_offset; int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); int start_position; @@ -3636,18 +3636,10 @@ void dcn10_calc_vupdate_position( static void dcn10_cal_vline_position( struct dc *dc, struct pipe_ctx *pipe_ctx, - enum vline_select vline, uint32_t *start_line, uint32_t *end_line) { - enum vertical_interrupt_ref_point ref_point = INVALID_POINT; - - if (vline == VLINE0) - ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point; - else if (vline == VLINE1) - ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point; - - switch (ref_point) { + switch (pipe_ctx->stream->periodic_interrupt.ref_point) { case START_V_UPDATE: dcn10_calc_vupdate_position( dc, @@ -3656,7 +3648,9 @@ static void dcn10_cal_vline_position( end_line); break; case START_V_SYNC: - // Suppose to do nothing because vsync is 0; + // vsync is line 0 so start_line is just the requested line offset + *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset; + *end_line = *start_line + 2; break; default: ASSERT(0); @@ -3666,24 +3660,15 @@ static void dcn10_cal_vline_position( void dcn10_setup_periodic_interrupt( struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum vline_select vline) + struct pipe_ctx *pipe_ctx) { struct timing_generator *tg = pipe_ctx->stream_res.tg; + uint32_t start_line = 0; + uint32_t end_line = 0; - if (vline == VLINE0) { - uint32_t start_line = 0; - uint32_t end_line = 0; + dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line); - dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line); - - tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); - - } else if (vline == VLINE1) { - pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1( - tg, - pipe_ctx->stream->periodic_interrupt1.lines_offset); - } + tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); } void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index e5691e4990231..81b5057d5ff12 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -174,8 +174,7 @@ void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx); void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx); void dcn10_setup_periodic_interrupt( struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum vline_select vline); + struct pipe_ctx *pipe_ctx); enum dc_status dcn10_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 3d778760a3b55..c6c4888c66651 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1481,6 +1481,7 @@ static void dcn20_update_dchubp_dpp( /* Any updates are handled in dc interface, just need * to apply existing for plane enable / opp change */ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed + || pipe_ctx->update_flags.bits.plane_changed || pipe_ctx->stream->update_flags.bits.gamut_remap || pipe_ctx->stream->update_flags.bits.out_csc) { #if defined(CONFIG_DRM_AMD_DC_DCN3_0) @@ -1745,7 +1746,7 @@ void dcn20_post_unlock_program_front_end( for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 && hubp->funcs->hubp_is_flip_pending(hubp); j++) - mdelay(1); + udelay(1); } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 2663f1b318420..e427f4ffa0807 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -6653,8 +6653,7 @@ static double CalculateUrgentLatency( return ret; } - -static void UseMinimumDCFCLK( +static noinline_for_stack void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, int MaxInterDCNTileRepeaters, int MaxPrefetchMode, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 64c1be818b0e8..3165a66c5362f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -32,11 +32,6 @@ #include "inc/hw/link_encoder.h" #include "core_status.h" -enum vline_select { - VLINE0, - VLINE1 -}; - struct pipe_ctx; struct dc_state; struct dc_stream_status; @@ -112,8 +107,7 @@ struct hw_sequencer_funcs { int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); void (*setup_periodic_interrupt)(struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum vline_select vline); + struct pipe_ctx *pipe_ctx); void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, unsigned int vmin, unsigned int vmax, unsigned int vmid, unsigned int vmid_frame_number); diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 09bc2c249e1af..3c4390d71a827 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1524,6 +1524,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, struct fixed31_32 lut2; struct fixed31_32 delta_lut; struct fixed31_32 delta_index; + const struct fixed31_32 one = dc_fixpt_from_int(1); i = 0; /* fixed_pt library has problems handling too small values */ @@ -1552,6 +1553,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num, } else hw_x = coordinates_x[i].x; + if (dc_fixpt_le(one, hw_x)) + hw_x = one; + norm_x = dc_fixpt_mul(norm_factor, hw_x); index = dc_fixpt_floor(norm_x); if (index < 0 || index > 255) diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 94132c70d7afd..5e8876ad1a1b0 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -282,7 +282,8 @@ struct amd_pm_funcs { int (*get_power_profile_mode)(void *handle, char *buf); int (*set_power_profile_mode)(void *handle, long *input, uint32_t size); int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size); - int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size); + int (*odn_edit_dpm_table)(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state); int (*smu_i2c_bus_access)(void *handle, bool acquire); /* export to DC */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index eab9768029c11..a98ea29b2fd52 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -924,7 +924,8 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size); } -static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size) +static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 60cde0c528257..57a354a03e8ae 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -2962,7 +2962,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); - int32_t input_index, input_clk, input_vol, i; + int32_t input_clk, input_vol, i; + uint32_t input_index; int od8_id; int ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 1c526cb239e03..3a31058b029e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -379,16 +379,31 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu) return 0; } -static int arcturus_check_powerplay_table(struct smu_context *smu) +static void arcturus_check_bxco_support(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + uint32_t val; if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) - smu_baco->platform_support = true; + powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) { + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + smu_baco->platform_support = + (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : + false; + } +} + +static int arcturus_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = + table_context->power_play_table; + + arcturus_check_bxco_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -2131,13 +2146,11 @@ static void arcturus_get_unique_id(struct smu_context *smu) static bool arcturus_is_baco_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t val; if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev)) return false; - val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); - return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; + return true; } static int arcturus_set_df_cstate(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 2937784bc8249..a7773b6453d53 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -338,19 +338,34 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int navi10_check_powerplay_table(struct smu_context *smu) +static void navi10_check_bxco_support(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + uint32_t val; + + if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || + powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) { + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + smu_baco->platform_support = + (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : + false; + } +} + +static int navi10_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = + table_context->power_play_table; if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; - if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) - smu_baco->platform_support = true; + navi10_check_bxco_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -1948,13 +1963,11 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, static bool navi10_is_baco_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t val; if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu))) return false; - val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); - return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; + return true; } static int navi10_set_default_od_settings(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 49d7fa1d08427..45c8152622008 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -294,16 +294,47 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int sienna_cichlid_check_powerplay_table(struct smu_context *smu) +static void sienna_cichlid_check_bxco_support(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_7_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + uint32_t val; if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) - smu_baco->platform_support = true; + powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) { + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + smu_baco->platform_support = + (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : + false; + + /* + * Disable BACO entry/exit completely on below SKUs to + * avoid hardware intermittent failures. + */ + if (((adev->pdev->device == 0x73A1) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73BF) && + (adev->pdev->revision == 0xCF)) || + ((adev->pdev->device == 0x7422) && + (adev->pdev->revision == 0x00))) + smu_baco->platform_support = false; + + } +} + +static int sienna_cichlid_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_7_powerplay_table *powerplay_table = + table_context->power_play_table; + + if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC) + smu->dc_controlled_by_gpio = true; + + sienna_cichlid_check_bxco_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -1736,13 +1767,11 @@ static int sienna_cichlid_run_btc(struct smu_context *smu) static bool sienna_cichlid_is_baco_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t val; if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu))) return false; - val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); - return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; + return true; } static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) @@ -2806,6 +2835,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .run_btc = sienna_cichlid_run_btc, + .set_power_source = smu_v11_0_set_power_source, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .get_gpu_metrics = sienna_cichlid_get_gpu_metrics, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index e646f5931d795..89f20497c14f5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1476,6 +1476,10 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) if (!smu_baco->platform_support) return false; + /* return true if ASIC is in BACO state already */ + if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) + return true; + /* Arcturus does not support this bit mask */ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index a0f6ee15c2485..e95abeb64b934 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -386,10 +386,7 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); #else static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) { - unsigned int offset = adv7511->type == ADV7533 ? - ADV7533_REG_CEC_OFFSET : 0; - - regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, + regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, ADV7511_CEC_CTRL_POWER_DOWN); return 0; } @@ -397,7 +394,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) void adv7533_dsi_power_on(struct adv7511 *adv); void adv7533_dsi_power_off(struct adv7511 *adv); -void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode); +enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, + const struct drm_display_mode *mode); int adv7533_patch_registers(struct adv7511 *adv); int adv7533_patch_cec_registers(struct adv7511 *adv); int adv7533_attach_dsi(struct adv7511 *adv); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index a20a45c0b353f..ddd1305b82b2c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c @@ -316,7 +316,7 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) goto err_cec_alloc; } - regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0); + regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0); /* cec soft reset */ regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_SOFT_RESET + offset, 0x01); @@ -343,7 +343,7 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n", ret); err_cec_parse_dt: - regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, + regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, ADV7511_CEC_CTRL_POWER_DOWN); return ret == -EPROBE_DEFER ? ret : 0; } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 430c5e8f0388e..6ba860a16e96c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -697,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) } static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { if (mode->clock > 165000) return MODE_CLOCK_HIGH; @@ -791,9 +791,6 @@ static void adv7511_mode_set(struct adv7511 *adv7511, regmap_update_bits(adv7511->regmap, 0x17, 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); - if (adv7511->type == ADV7533 || adv7511->type == ADV7535) - adv7533_mode_set(adv7511, adj_mode); - drm_mode_copy(&adv7511->curr_mode, adj_mode); /* @@ -913,6 +910,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge, adv7511_mode_set(adv, mode, adj_mode); } +static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct adv7511 *adv = bridge_to_adv7511(bridge); + + if (adv->type == ADV7533 || adv->type == ADV7535) + return adv7533_mode_valid(adv, mode); + else + return adv7511_mode_valid(adv, mode); +} + static int adv7511_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { @@ -963,6 +972,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = { .enable = adv7511_bridge_enable, .disable = adv7511_bridge_disable, .mode_set = adv7511_bridge_mode_set, + .mode_valid = adv7511_bridge_mode_valid, .attach = adv7511_bridge_attach, .detect = adv7511_bridge_detect, .get_edid = adv7511_bridge_get_edid, diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index aa19d5a40e319..f304a5ff8e596 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -100,26 +100,27 @@ void adv7533_dsi_power_off(struct adv7511 *adv) regmap_write(adv->regmap_cec, 0x27, 0x0b); } -void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode) +enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, + const struct drm_display_mode *mode) { + int lanes; struct mipi_dsi_device *dsi = adv->dsi; - int lanes, ret; - - if (adv->num_dsi_lanes != 4) - return; if (mode->clock > 80000) lanes = 4; else lanes = 3; - if (lanes != dsi->lanes) { - mipi_dsi_detach(dsi); - dsi->lanes = lanes; - ret = mipi_dsi_attach(dsi); - if (ret) - dev_err(&dsi->dev, "failed to change host lanes\n"); - } + /* + * TODO: add support for dynamic switching of lanes + * by using the bridge pre_enable() op . Till then filter + * out the modes which shall need different number of lanes + * than what was configured in the device tree. + */ + if (lanes != dsi->lanes) + return MODE_BAD; + + return MODE_OK; } int adv7533_patch_registers(struct adv7511 *adv) diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index a7bcb429c02b5..e8baa07450b7d 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1865,12 +1865,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove); int analogix_dp_suspend(struct analogix_dp_device *dp) { clk_disable_unprepare(dp->clock); - - if (dp->plat_data->panel) { - if (drm_panel_unprepare(dp->plat_data->panel)) - DRM_ERROR("failed to turnoff the panel\n"); - } - return 0; } EXPORT_SYMBOL_GPL(analogix_dp_suspend); @@ -1885,13 +1879,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp) return ret; } - if (dp->plat_data->panel) { - if (drm_panel_prepare(dp->plat_data->panel)) { - DRM_ERROR("failed to setup the panel\n"); - return -EBUSY; - } - } - return 0; } EXPORT_SYMBOL_GPL(analogix_dp_resume); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 29b1ce2140abc..1dcc28a4d8537 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -816,13 +816,14 @@ static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt961 drm_connector_helper_add(<9611->connector, <9611_bridge_connector_helper_funcs); - drm_connector_attach_encoder(<9611->connector, bridge->encoder); if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } + drm_connector_attach_encoder(<9611->connector, bridge->encoder); + return 0; } diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index cce98bf2a4e73..72248a565579e 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -296,7 +296,9 @@ static void ge_b850v3_lvds_remove(void) * This check is to avoid both the drivers * removing the bridge in their remove() function */ - if (!ge_b850v3_lvds_ptr) + if (!ge_b850v3_lvds_ptr || + !ge_b850v3_lvds_ptr->stdp2690_i2c || + !ge_b850v3_lvds_ptr->stdp4028_i2c) goto out; drm_bridge_remove(&ge_b850v3_lvds_ptr->bridge); diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 7bd0affa057a5..9248510104005 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -333,8 +333,8 @@ static int ps8640_probe(struct i2c_client *client) if (IS_ERR(ps_bridge->panel_bridge)) return PTR_ERR(ps_bridge->panel_bridge); - ps_bridge->supplies[0].supply = "vdd33"; - ps_bridge->supplies[1].supply = "vdd12"; + ps_bridge->supplies[0].supply = "vdd12"; + ps_bridge->supplies[1].supply = "vdd33"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index b10228b9e3a93..2c3c743df9505 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2609,6 +2609,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, * if supported. In any case the default RGB888 format is added */ + /* Default 8bit RGB fallback */ + output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; + if (max_bpc >= 16 && info->bpc == 16) { if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444) output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48; @@ -2642,9 +2645,6 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444) output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24; - /* Default 8bit RGB fallback */ - output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; - *num_output_fmts = i; return output_fmts; @@ -2984,6 +2984,7 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) { struct dw_hdmi *hdmi = dev_id; u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat; + enum drm_connector_status status = connector_status_unknown; intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0); @@ -3022,13 +3023,15 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) cec_notifier_phys_addr_invalidate(hdmi->cec_notifier); mutex_unlock(&hdmi->cec_notifier_mutex); } - } - if (intr_stat & HDMI_IH_PHY_STAT0_HPD) { - enum drm_connector_status status = phy_int_pol & HDMI_PHY_HPD - ? connector_status_connected - : connector_status_disconnected; + if (phy_stat & HDMI_PHY_HPD) + status = connector_status_connected; + + if (!(phy_stat & (HDMI_PHY_HPD | HDMI_PHY_RX_SENSE))) + status = connector_status_disconnected; + } + if (status != connector_status_unknown) { dev_dbg(hdmi->dev, "EVENT=%s\n", status == connector_status_connected ? "plugin" : "plugout"); diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 1a58481037b3f..77a447a3fb1d1 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -621,9 +621,9 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata) &pdata->bridge.encoder->crtc->state->adjusted_mode; u8 hsync_polarity = 0, vsync_polarity = 0; - if (mode->flags & DRM_MODE_FLAG_PHSYNC) + if (mode->flags & DRM_MODE_FLAG_NHSYNC) hsync_polarity = CHA_HSYNC_POLARITY; - if (mode->flags & DRM_MODE_FLAG_PVSYNC) + if (mode->flags & DRM_MODE_FLAG_NVSYNC) vsync_polarity = CHA_VSYNC_POLARITY; ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG, diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 044acd07c1538..d799ec14fd7f5 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -753,8 +753,8 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge, struct drm_connector_state *conn_state, u32 out_bus_fmt) { + unsigned int i, num_in_bus_fmts = 0; struct drm_bridge_state *cur_state; - unsigned int num_in_bus_fmts, i; struct drm_bridge *prev_bridge; u32 *in_bus_fmts; int ret; @@ -875,7 +875,7 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, struct drm_connector *conn = conn_state->connector; struct drm_encoder *encoder = bridge->encoder; struct drm_bridge_state *last_bridge_state; - unsigned int i, num_out_bus_fmts; + unsigned int i, num_out_bus_fmts = 0; struct drm_bridge *last_bridge; u32 *out_bus_fmts; int ret = 0; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 5163433ac561b..9c3bbe2c3e6f9 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -484,6 +484,9 @@ void drm_connector_cleanup(struct drm_connector *connector) mutex_destroy(&connector->mutex); memset(connector, 0, sizeof(*connector)); + + if (dev->registered) + drm_sysfs_hotplug_event(dev); } EXPORT_SYMBOL(drm_connector_cleanup); diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 1c9ea9f7fdafe..f2ff0bfdf54d7 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -62,23 +62,45 @@ ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, u8 offset, void *buffer, size_t size) { + u8 zero = 0; + char *tmpbuf = NULL; + /* + * As sub-addressing is not supported by all adaptors, + * always explicitly read from the start and discard + * any bytes that come before the requested offset. + * This way, no matter whether the adaptor supports it + * or not, we'll end up reading the proper data. + */ struct i2c_msg msgs[] = { { .addr = DP_DUAL_MODE_SLAVE_ADDRESS, .flags = 0, .len = 1, - .buf = &offset, + .buf = &zero, }, { .addr = DP_DUAL_MODE_SLAVE_ADDRESS, .flags = I2C_M_RD, - .len = size, + .len = size + offset, .buf = buffer, }, }; int ret; + if (offset) { + tmpbuf = kmalloc(size + offset, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + + msgs[1].buf = tmpbuf; + } + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); + if (tmpbuf) + memcpy(buffer, tmpbuf + offset, size); + + kfree(tmpbuf); + if (ret < 0) return ret; if (ret != ARRAY_SIZE(msgs)) @@ -205,18 +227,6 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) if (ret) return DRM_DP_DUAL_MODE_UNKNOWN; - /* - * Sigh. Some (maybe all?) type 1 adaptors are broken and ack - * the offset but ignore it, and instead they just always return - * data from the start of the HDMI ID buffer. So for a broken - * type 1 HDMI adaptor a single byte read will always give us - * 0x44, and for a type 1 DVI adaptor it should give 0x00 - * (assuming it implements any registers). Fortunately neither - * of those values will match the type 2 signature of the - * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with - * the type 2 adaptor detection safely even in the presence - * of broken type 1 adaptors. - */ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, &adaptor_id, sizeof(adaptor_id)); DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n", @@ -231,11 +241,10 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) return DRM_DP_DUAL_MODE_TYPE2_DVI; } /* - * If neither a proper type 1 ID nor a broken type 1 adaptor - * as described above, assume type 1, but let the user know - * that we may have misdetected the type. + * If not a proper type 1 ID, still assume type 1, but let + * the user know that we may have misdetected the type. */ - if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) + if (!is_type1_adaptor(adaptor_id)) DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n", adaptor_id); @@ -339,10 +348,8 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); * @enable: enable (as opposed to disable) the TMDS output buffers * * Set the state of the TMDS output buffers in the adaptor. For - * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As - * some type 1 adaptors have problems with registers (see comments - * in drm_dp_dual_mode_detect()) we avoid touching the register, - * making this function a no-op on type 1 adaptors. + * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. + * Type1 adaptors do not support any register writes. * * Returns: * 0 on success, negative error code on failure diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 3c55753bab161..6ba16db775003 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -2172,17 +2172,8 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, struct drm_dp_phy_test_params *data, u8 dp_rev) { int err, i; - u8 link_config[2]; u8 test_pattern; - link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate); - link_config[1] = data->num_lanes; - if (data->enhanced_frame_cap) - link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2); - if (err < 0) - return err; - test_pattern = data->phy_pattern; if (dp_rev < 0x12) { test_pattern = (test_pattern << 2) & diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index ab423b0413ee5..0feeac52e4eb3 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -4856,14 +4856,14 @@ void drm_dp_mst_dump_topology(struct seq_file *m, seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); - if (ret) { + if (ret != 2) { seq_printf(m, "faux/mst read failed\n"); goto out; } seq_printf(m, "faux/mst: %*ph\n", 2, buf); ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); - if (ret) { + if (ret != 1) { seq_printf(m, "mst ctrl read failed\n"); goto out; } @@ -4871,7 +4871,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, /* dump the standard OUI branch header */ ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); - if (ret) { + if (ret != DP_BRANCH_OUI_HEADER_SIZE) { seq_printf(m, "branch oui read failed\n"); goto out; } @@ -5238,7 +5238,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm mst_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(mst_state)) - return -EINVAL; + return PTR_ERR(mst_state); list_for_each_entry(pos, &mst_state->vcpis, next) { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 006e3b896caea..4ca995ce19af6 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -610,7 +610,7 @@ static int drm_dev_init(struct drm_device *dev, mutex_init(&dev->clientlist_mutex); mutex_init(&dev->master_mutex); - ret = drmm_add_action(dev, drm_dev_init_release, NULL); + ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 722c7ebe4e889..92152c06b75b7 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -280,12 +280,15 @@ const struct drm_format_info *__drm_format_info(u32 format) .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_Q410, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, - .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, - .vsub = 0, .is_yuv = true }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Q401, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, - .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, - .vsub = 0, .is_yuv = true }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2, + .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true}, }; unsigned int i; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 5979af230eda0..8b30e8d83fbcf 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -166,21 +166,6 @@ void drm_gem_private_object_init(struct drm_device *dev, } EXPORT_SYMBOL(drm_gem_private_object_init); -static void -drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) -{ - /* - * Note: obj->dma_buf can't disappear as long as we still hold a - * handle reference in obj->handle_count. - */ - mutex_lock(&filp->prime.lock); - if (obj->dma_buf) { - drm_prime_remove_buf_handle_locked(&filp->prime, - obj->dma_buf); - } - mutex_unlock(&filp->prime.lock); -} - /** * drm_gem_object_handle_free - release resources bound to userspace handles * @obj: GEM object to clean up. @@ -254,7 +239,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) else if (dev->driver->gem_close_object) dev->driver->gem_close_object(obj, file_priv); - drm_gem_remove_prime_handles(obj, file_priv); + drm_prime_remove_buf_handle(&file_priv->prime, id); drm_vma_node_revoke(&obj->vma_node, file_priv); drm_gem_object_handle_put_unlocked(obj); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index cfacce0418a49..c56656a95cf99 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -563,12 +563,20 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - int ret; WARN_ON(shmem->base.import_attach); - ret = drm_gem_shmem_get_pages(shmem); - WARN_ON_ONCE(ret != 0); + mutex_lock(&shmem->pages_lock); + + /* + * We should have already pinned the pages when the buffer was first + * mmap'd, vm_open() just grabs an additional reference for the new + * mm the vma is getting copied into (ie. on fork()). + */ + if (!WARN_ON_ONCE(!shmem->pages_use_count)) + shmem->pages_use_count++; + + mutex_unlock(&shmem->pages_lock); drm_gem_vm_open(vma); } @@ -616,10 +624,8 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) shmem = to_drm_gem_shmem_obj(obj); ret = drm_gem_shmem_get_pages(shmem); - if (ret) { - drm_gem_vm_close(vma); + if (ret) return ret; - } vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index b65865c630b0a..41efe40bc70f6 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -86,8 +86,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); -void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, - struct dma_buf *dma_buf); +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, + uint32_t handle); /* drm_drv.c */ struct drm_minor *drm_minor_acquire(unsigned int minor_id); @@ -116,7 +116,8 @@ static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank) static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank) { - kthread_destroy_worker(vblank->worker); + if (vblank->worker) + kthread_destroy_worker(vblank->worker); } int drm_vblank_worker_init(struct drm_vblank_crtc *vblank); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 4606cc938b36d..c160a45a4274f 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -473,7 +473,13 @@ EXPORT_SYMBOL(drm_invalid_op); */ static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) { - int len; + size_t len; + + /* don't attempt to copy a NULL pointer */ + if (WARN_ONCE(!value, "BUG: the value to copy was not set!")) { + *buf_len = 0; + return 0; + } /* don't overflow userbuf */ len = strlen(value); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 5dd475e829950..2c43d54766f34 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -300,6 +300,7 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + mipi_dsi_detach(dsi); mipi_dsi_device_unregister(dsi); return 0; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index f5ab891731d0b..ce739ba45c551 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -128,6 +128,18 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Acer Switch V 10 (SW5-017) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Anbernic Win600 */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Win600"), + }, + .driver_data = (void *)&lcd720x1280_rightside_up, }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), @@ -260,6 +272,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Ideapad D330-10IGL (HD) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Lenovo Yoga Book X90F / X91F / X91L */ .matches = { /* Non exact match to match all versions */ diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 9f955f2010c25..825499ea3ff59 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -187,29 +187,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri return -ENOENT; } -void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, - struct dma_buf *dma_buf) +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, + uint32_t handle) { struct rb_node *rb; - rb = prime_fpriv->dmabufs.rb_node; + mutex_lock(&prime_fpriv->lock); + + rb = prime_fpriv->handles.rb_node; while (rb) { struct drm_prime_member *member; - member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); - if (member->dma_buf == dma_buf) { + member = rb_entry(rb, struct drm_prime_member, handle_rb); + if (member->handle == handle) { rb_erase(&member->handle_rb, &prime_fpriv->handles); rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); - dma_buf_put(dma_buf); + dma_buf_put(member->dma_buf); kfree(member); - return; - } else if (member->dma_buf < dma_buf) { + break; + } else if (member->handle < handle) { rb = rb->rb_right; } else { rb = rb->rb_left; } } + + mutex_unlock(&prime_fpriv->lock); } void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 2520b7dad6ce7..f3281d56b1d82 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -408,6 +408,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) if (gpu->identity.model == chipModel_GC700) gpu->identity.features &= ~chipFeatures_FAST_CLEAR; + /* These models/revisions don't have the 2D pipe bit */ + if ((gpu->identity.model == chipModel_GC500 && + gpu->identity.revision <= 2) || + gpu->identity.model == chipModel_GC300) + gpu->identity.features |= chipFeatures_PIPE_2D; + if ((gpu->identity.model == chipModel_GC500 && gpu->identity.revision < 2) || (gpu->identity.model == chipModel_GC300 && @@ -441,8 +447,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); } - /* GC600 idle register reports zero bits where modules aren't present */ - if (gpu->identity.model == chipModel_GC600) + /* GC600/300 idle register reports zero bits where modules aren't present */ + if (gpu->identity.model == chipModel_GC600 || + gpu->identity.model == chipModel_GC300) gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | VIVS_HI_IDLE_STATE_RA | VIVS_HI_IDLE_STATE_SE | diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 9ba2fe48228f1..44fbc0a123bf3 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -80,10 +80,10 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, return -EINVAL; for_each_sgtable_dma_sg(sgt, sg, i) { - u32 pa = sg_dma_address(sg) - sg->offset; + phys_addr_t pa = sg_dma_address(sg) - sg->offset; size_t bytes = sg_dma_len(sg) + sg->offset; - VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); + VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 4d4a715b429d1..2c2b92324a2e9 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -60,8 +60,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) return drm_panel_get_modes(fsl_connector->panel, connector); } -static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { if (mode->hdisplay & 0xf) return MODE_ERROR; diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 3df6d6e850f52..70148ae16f146 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -529,15 +529,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc, WARN_ON(drm_crtc_vblank_get(crtc) != 0); gma_crtc->page_flip_event = event; + spin_unlock_irqrestore(&dev->event_lock, flags); /* Call this locked if we want an event at vblank interrupt. */ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); if (ret) { - gma_crtc->page_flip_event = NULL; - drm_crtc_vblank_put(crtc); + spin_lock_irqsave(&dev->event_lock, flags); + if (gma_crtc->page_flip_event) { + gma_crtc->page_flip_event = NULL; + drm_crtc_vblank_put(crtc); + } + spin_unlock_irqrestore(&dev->event_lock, flags); } - - spin_unlock_irqrestore(&dev->event_lock, flags); } else { ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); } diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 43943e9802036..4e41c144a2902 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_HISI_HIBMC tristate "DRM Support for Hisilicon Hibmc" - depends on DRM && PCI && ARM64 + depends on DRM && PCI && (ARM64 || COMPILE_TEST) + depends on MMU select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index f2c8b56be9ead..261a5e97a0b4a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -163,6 +163,28 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) intel_dp_compute_rate(intel_dp, intel_dp->link_rate, &link_bw, &rate_select); + /* + * WaEdpLinkRateDataReload + * + * Parade PS8461E MUX (used on varius TGL+ laptops) needs + * to snoop the link rates reported by the sink when we + * use LINK_RATE_SET in order to operate in jitter cleaning + * mode (as opposed to redriver mode). Unfortunately it + * loses track of the snooped link rates when powered down, + * so we need to make it re-snoop often. Without this high + * link rates are not stable. + */ + if (!link_bw) { + struct intel_connector *connector = intel_dp->attached_connector; + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n", + connector->base.base.id, connector->base.name); + + drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, + sink_rates, sizeof(sink_rates)); + } + if (link_bw) drm_dbg_kms(&i915->drm, "Using LINK_BW_SET value %02x\n", link_bw); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 8777d35ac7a73..117ea44fd8be7 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -133,9 +133,9 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, return ffs(intel_dsi->ports) - 1; if (seq_port) { - if (intel_dsi->ports & PORT_B) + if (intel_dsi->ports & BIT(PORT_B)) return PORT_B; - else if (intel_dsi->ports & PORT_C) + else if (intel_dsi->ports & BIT(PORT_C)) return PORT_C; } diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 4eaa4aa86ecdf..58f8fb7c8799c 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2760,13 +2760,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) if (!intel_sdvo_connector) return false; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; - } intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; @@ -2821,7 +2818,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - intel_sdvo->controlled_output |= type; intel_sdvo_connector->output_flag = type; if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { @@ -2862,13 +2858,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; - } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); @@ -2898,13 +2891,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; - } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); @@ -2937,16 +2927,39 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) return false; } +static u16 intel_sdvo_filter_output_flags(u16 flags) +{ + flags &= SDVO_OUTPUT_MASK; + + /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + if (!(flags & SDVO_OUTPUT_TMDS0)) + flags &= ~SDVO_OUTPUT_TMDS1; + + if (!(flags & SDVO_OUTPUT_RGB0)) + flags &= ~SDVO_OUTPUT_RGB1; + + if (!(flags & SDVO_OUTPUT_LVDS0)) + flags &= ~SDVO_OUTPUT_LVDS1; + + return flags; +} + static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) { - /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); + + flags = intel_sdvo_filter_output_flags(flags); + + intel_sdvo->controlled_output = flags; + + intel_sdvo_select_ddc_bus(i915, intel_sdvo); if (flags & SDVO_OUTPUT_TMDS0) if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) + if (flags & SDVO_OUTPUT_TMDS1) if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; @@ -2967,7 +2980,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) + if (flags & SDVO_OUTPUT_RGB1) if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; @@ -2975,14 +2988,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) + if (flags & SDVO_OUTPUT_LVDS1) if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; - if ((flags & SDVO_OUTPUT_MASK) == 0) { + if (flags == 0) { unsigned char bytes[2]; - intel_sdvo->controlled_output = 0; memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(intel_sdvo), @@ -3394,8 +3406,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, */ intel_sdvo->base.cloneable = 0; - intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo); - /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err_output; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 8dd295dbe2416..dd35d3d7ad03d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -36,13 +36,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme goto err_unpin_pages; } - ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); + ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL); if (ret) goto err_free; src = obj->mm.pages->sgl; dst = st->sgl; - for (i = 0; i < obj->mm.pages->nents; i++) { + for (i = 0; i < obj->mm.pages->orig_nents; i++) { sg_set_page(dst, sg_page(src), src->length, 0); dst = sg_next(dst); src = sg_next(src); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index ffcaee74a2493..545fa703d9757 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -296,10 +296,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, spin_unlock(&obj->vma.lock); obj->tiling_and_stride = tiling | stride; - i915_gem_object_unlock(obj); - - /* Force the fence to be reacquired for GTT access */ - i915_gem_object_release_mmap_gtt(obj); /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { @@ -312,6 +308,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, obj->bit_17 = NULL; } + i915_gem_object_unlock(obj); + + /* Force the fence to be reacquired for GTT access */ + i915_gem_object_release_mmap_gtt(obj); + return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index a33887f2464fa..5f86d9aacb8a3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -745,6 +745,10 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt) if (!i915_mmio_reg_offset(rb.reg)) continue; + if (INTEL_GEN(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS || + engine->class == VIDEO_ENHANCEMENT_CLASS)) + rb.bit = _MASKED_BIT_ENABLE(rb.bit); + intel_uncore_write_fw(uncore, rb.reg, rb.bit); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 66fcbf9d0fdd3..cca285185dc44 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -200,7 +200,7 @@ out_active: spin_lock(&timelines->lock); if (flush_submission(gt, timeout)) /* Wait, there's more! */ active_count++; - return active_count ? timeout : 0; + return active_count ? timeout ?: -ETIME : 0; } int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index ac36b67fb46be..00b5912a88b82 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -289,6 +289,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) { struct intel_uncore *uncore = gt->uncore; + int loops = 2; int err; /* @@ -296,18 +297,39 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) * for fifo space for the write or forcewake the chip for * the read */ - intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); + do { + intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); - /* Wait for the device to ack the reset requests */ - err = __intel_wait_for_register_fw(uncore, - GEN6_GDRST, hw_domain_mask, 0, - 500, 0, - NULL); + /* + * Wait for the device to ack the reset requests. + * + * On some platforms, e.g. Jasperlake, we see that the + * engine register state is not cleared until shortly after + * GDRST reports completion, causing a failure as we try + * to immediately resume while the internal state is still + * in flux. If we immediately repeat the reset, the second + * reset appears to serialise with the first, and since + * it is a no-op, the registers should retain their reset + * value. However, there is still a concern that upon + * leaving the second reset, the internal engine state + * is still in flux and not ready for resuming. + */ + err = __intel_wait_for_register_fw(uncore, GEN6_GDRST, + hw_domain_mask, 0, + 2000, 0, + NULL); + } while (err == 0 && --loops); if (err) drm_dbg(>->i915->drm, "Wait for 0x%08x engines reset failed\n", hw_domain_mask); + /* + * As we have observed that the engine state is still volatile + * after GDRST is acked, impose a small delay to let everything settle. + */ + udelay(50); + return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 4a3bde7c9f217..ae5cf2b55e159 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1212,6 +1212,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_L3_COH_PIPE); + /* + * Wa_1408615072:icl,ehl (vsunit) + * Wa_1407596294:icl,ehl (hsunit) + */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); + + /* Wa_1407352427:icl,ehl */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + PSDUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl,ehl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + /* Wa_1607087056:icl,ehl,jsl */ if (IS_ICELAKE(i915) || IS_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) { @@ -1816,22 +1832,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, GEN11_ENABLE_32_PLANE_MODE); - /* - * Wa_1408615072:icl,ehl (vsunit) - * Wa_1407596294:icl,ehl (hsunit) - */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, - VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); - - /* Wa_1407352427:icl,ehl */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - PSDUNIT_CLKGATE_DIS); - - /* Wa_1406680159:icl,ehl */ - wa_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE, - GWUNIT_CLKGATE_DIS); - /* * Wa_1408767742:icl[a2..forever],ehl[all] * Wa_1605460711:icl[a0..c0] diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 9f1c209d92511..e08ed0e9f1653 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -175,8 +175,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) */ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) { - debugfs_remove_recursive(vgpu->debugfs); - vgpu->debugfs = NULL; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_minor *minor = gvt->gt->i915->drm.primary; + + if (minor->debugfs_root && gvt->debugfs_root) { + debugfs_remove_recursive(vgpu->debugfs); + vgpu->debugfs = NULL; + } } /** @@ -199,6 +204,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt) */ void intel_gvt_debugfs_clean(struct intel_gvt *gvt) { - debugfs_remove_recursive(gvt->debugfs_root); - gvt->debugfs_root = NULL; + struct drm_minor *minor = gvt->gt->i915->drm.primary; + + if (minor->debugfs_root) { + debugfs_remove_recursive(gvt->debugfs_root); + gvt->debugfs_root = NULL; + } } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index a3a4305eda01b..0201f9b5f87e7 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1192,10 +1192,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, for_each_shadow_entry(sub_spt, &sub_se, sub_index) { ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, start_gfn + sub_index, PAGE_SIZE, &dma_addr); - if (ret) { - ppgtt_invalidate_spt(spt); - return ret; - } + if (ret) + goto err; sub_se.val64 = se->val64; /* Copy the PAT field from PDE. */ @@ -1214,6 +1212,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ops->set_pfn(se, sub_spt->shadow_page.mfn); ppgtt_set_shadow_entry(spt, se, index); return 0; +err: + /* Cancel the existing addess mappings of DMA addr. */ + for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { + gvt_vdbg_mm("invalidate 4K entry\n"); + ppgtt_invalidate_pte(sub_spt, &sub_se); + } + /* Release the new allocated spt. */ + trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, + sub_spt->guest_page.gfn, sub_spt->shadow_page.type); + ppgtt_free_spt(sub_spt); + return ret; } static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index aed2ef6466a2d..2bb6203298bcd 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -647,6 +647,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || !workload->shadow_mm->ppgtt_mm.shadowed) { + intel_vgpu_unpin_mm(workload->shadow_mm); gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index fb5e30de78c2a..63dc0fcfcc8eb 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -403,7 +403,8 @@ static const struct intel_device_info ilk_m_info = { .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ - .has_rc6p = 1, \ + /* snb does support rc6p, but enabling it causes various issues */ \ + .has_rc6p = 0, \ .has_rps = true, \ .dma_mask_size = 40, \ .ppgtt_type = INTEL_PPGTT_ALIASING, \ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2f2dc029668bc..1b5e8d3e45a98 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5145,10 +5145,14 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || modifier == I915_FORMAT_MOD_Yf_TILED || modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + modifier == I915_FORMAT_MOD_Yf_TILED_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + modifier == I915_FORMAT_MOD_Yf_TILED_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); wp->width = width; diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 2a8d2e32e7b42..9fe6a47331067 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -212,8 +212,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector) return ret; } -static int imx_tve_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +imx_tve_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct imx_tve *tve = con_to_tve(connector); unsigned long rate; diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index e34718cf5c2e9..5ec9770e401e4 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -1120,7 +1120,11 @@ static int ingenic_drm_init(void) return err; } - return platform_driver_register(&ingenic_drm_driver); + err = platform_driver_register(&ingenic_drm_driver); + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err) + platform_driver_unregister(ingenic_ipu_driver_ptr); + + return err; } module_init(ingenic_drm_init); diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index c1ae336df6833..aa3d472c79d77 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -367,9 +367,6 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) if (--dpi->refcount != 0) return; - if (dpi->pinctrl && dpi->pins_gpio) - pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); - mtk_dpi_disable(dpi); clk_disable_unprepare(dpi->pixel_clk); clk_disable_unprepare(dpi->engine_clk); @@ -394,9 +391,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) goto err_pixel; } - if (dpi->pinctrl && dpi->pins_dpi) - pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); - return 0; err_pixel: @@ -525,12 +519,18 @@ static void mtk_dpi_bridge_disable(struct drm_bridge *bridge) struct mtk_dpi *dpi = bridge_to_dpi(bridge); mtk_dpi_power_off(dpi); + + if (dpi->pinctrl && dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); } static void mtk_dpi_bridge_enable(struct drm_bridge *bridge) { struct mtk_dpi *dpi = bridge_to_dpi(bridge); + if (dpi->pinctrl && dpi->pins_dpi) + pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); + mtk_dpi_power_on(dpi); mtk_dpi_set_display_mode(dpi, &dpi->mode); mtk_dpi_enable(dpi); diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 7d37d2a01e3cf..146c4d04f572d 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -668,6 +668,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) if (--dsi->refcount != 0) return; + /* + * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since + * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), + * which needs irq for vblank, and mtk_dsi_stop() will disable irq. + * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), + * after dsi is fully set. + */ + mtk_dsi_stop(dsi); + + mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); mtk_dsi_reset_engine(dsi); mtk_dsi_lane0_ulp_mode_enter(dsi); mtk_dsi_clk_ulp_mode_enter(dsi); @@ -718,17 +728,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) if (!dsi->enabled) return; - /* - * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since - * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), - * which needs irq for vblank, and mtk_dsi_stop() will disable irq. - * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), - * after dsi is fully set. - */ - mtk_dsi_stop(dsi); - - mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); - dsi->enabled = false; } @@ -791,10 +790,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { .attach = mtk_dsi_bridge_attach, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_disable = mtk_dsi_bridge_atomic_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_enable = mtk_dsi_bridge_atomic_enable, .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, .mode_set = mtk_dsi_bridge_mode_set, }; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 2d022f3fb437e..b0bfe85f5f6a8 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -528,6 +528,13 @@ static int meson_drv_probe(struct platform_device *pdev) return 0; }; +static int meson_drv_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &meson_drv_master_ops); + + return 0; +} + static struct meson_drm_match_data meson_drm_gxbb_data = { .compat = VPU_COMPATIBLE_GXBB, }; @@ -565,6 +572,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = { static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, + .remove = meson_drv_remove, .shutdown = meson_drv_shutdown, .driver = { .name = "meson-drm", diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 35338ed182099..255c6b863f8d2 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -163,7 +163,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, /* Enable OSD and BLK0, set max global alpha */ priv->viu.osd1_ctrl_stat = OSD_ENABLE | - (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | + (0x100 << OSD_GLOBAL_ALPHA_SHIFT) | OSD_BLK0_ENABLE; priv->viu.osd1_ctrl_stat2 = readl(priv->io_base + diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index bb7e109534de1..cd399b0b71814 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv, priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12)); writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21)); - writel((m[11] & 0x1fff) << 16, + writel((m[11] & 0x1fff), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22)); writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff), @@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv) /* Initialize OSD1 fifo control register */ reg = VIU_OSD_DDR_PRIORITY_URGENT | - VIU_OSD_HOLD_FIFO_LINES(31) | VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */ VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */ VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) - reg |= VIU_OSD_BURST_LENGTH_32; + reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31)); else - reg |= VIU_OSD_BURST_LENGTH_64; + reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 340682cd0f320..2457ef9851bb3 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -19,7 +19,7 @@ msm-y := \ hdmi/hdmi.o \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ - hdmi/hdmi_connector.o \ + hdmi/hdmi_hpd.o \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index c3775f79525a7..4656e707fe27d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -28,11 +28,9 @@ enum { ADRENO_FW_MAX, }; -enum adreno_quirks { - ADRENO_QUIRK_TWO_PASS_USE_WFI = 1, - ADRENO_QUIRK_FAULT_DETECT_MASK = 2, - ADRENO_QUIRK_LMLOADKILL_DISABLE = 3, -}; +#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0) +#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1) +#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) struct adreno_rev { uint8_t core; @@ -62,7 +60,7 @@ struct adreno_info { const char *name; const char *fw[ADRENO_FW_MAX]; uint32_t gmem; - enum adreno_quirks quirks; + u64 quirks; struct msm_gpu *(*init)(struct drm_device *dev); const char *zapfw; u32 inactive_period; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 7503f093f3b64..b7841f7fc10a8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -675,12 +675,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) _dpu_kms_mmu_destroy(dpu_kms); if (dpu_kms->catalog) { - for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { - u32 vbif_idx = dpu_kms->catalog->vbif[i].id; - - if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) { - dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]); - dpu_kms->hw_vbif[vbif_idx] = NULL; + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { + if (dpu_kms->hw_vbif[i]) { + dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); + dpu_kms->hw_vbif[i] = NULL; } } } @@ -987,7 +985,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { u32 vbif_idx = dpu_kms->catalog->vbif[i].id; - dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx, + dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx, dpu_kms->vbif[vbif_idx], dpu_kms->catalog); if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) { rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 5e8c3f3e66256..fc86d34aec805 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -11,6 +11,14 @@ #include "dpu_hw_vbif.h" #include "dpu_trace.h" +static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx) +{ + if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif)) + return dpu_kms->hw_vbif[vbif_idx]; + + return NULL; +} + /** * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt * @vbif: Pointer to hardware vbif driver @@ -148,20 +156,15 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif, void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, struct dpu_vbif_set_ot_params *params) { - struct dpu_hw_vbif *vbif = NULL; + struct dpu_hw_vbif *vbif; struct dpu_hw_mdp *mdp; bool forced_on = false; u32 ot_lim; - int ret, i; + int ret; mdp = dpu_kms->hw_mdp; - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - if (dpu_kms->hw_vbif[i] && - dpu_kms->hw_vbif[i]->idx == params->vbif_idx) - vbif = dpu_kms->hw_vbif[i]; - } - + vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); if (!vbif || !mdp) { DPU_DEBUG("invalid arguments vbif %d mdp %d\n", vbif != NULL, mdp != NULL); @@ -204,7 +207,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params) { - struct dpu_hw_vbif *vbif = NULL; + struct dpu_hw_vbif *vbif; struct dpu_hw_mdp *mdp; bool forced_on = false; const struct dpu_vbif_qos_tbl *qos_tbl; @@ -216,13 +219,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, } mdp = dpu_kms->hw_mdp; - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - if (dpu_kms->hw_vbif[i] && - dpu_kms->hw_vbif[i]->idx == params->vbif_idx) { - vbif = dpu_kms->hw_vbif[i]; - break; - } - } + vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); if (!vbif || !vbif->cap) { DPU_ERROR("invalid vbif %d\n", params->vbif_idx); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c index 7288041dd86ad..7444b75c42157 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c @@ -56,8 +56,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector) return ret; } -static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +mdp4_lvds_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct mdp4_lvds_connector *mdp4_lvds_connector = to_mdp4_lvds_connector(connector); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 19b35ae3e9272..a73a2b28a70a6 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -423,6 +423,10 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux) aux->isr = dp_catalog_aux_get_irq(aux->catalog); + /* no interrupts pending, return immediately */ + if (!aux->isr) + return; + if (!aux->cmd_busy) return; diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 2da6982efdbfc..613348b022fe8 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -416,7 +416,7 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, if (rate == link_rate_hbr3) pixel_div = 6; - else if (rate == 1620000 || rate == 270000) + else if (rate == 162000 || rate == 270000) pixel_div = 2; else if (rate == link_rate_hbr2) pixel_div = 4; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index a3de1d0523ea0..1c3dcbc6cce8c 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -848,7 +848,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display, dp = container_of(dp_display, struct dp_display_private, dp_display); - dp->panel->dp_mode.drm_mode = mode->drm_mode; + drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); dp->panel->dp_mode.bpp = mode->bpp; dp->panel->dp_mode.capabilities = mode->capabilities; dp_panel_init_panel_info(dp->panel); @@ -1201,7 +1201,7 @@ int dp_display_request_irq(struct msm_dp *dp_display) return -EINVAL; } - rc = devm_request_irq(&dp->pdev->dev, dp->irq, + rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq, dp_display_irq_handler, IRQF_TRIGGER_HIGH, "dp_display_isr", dp); if (rc < 0) { diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index f845333593daa..7377596a13f4b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -205,6 +205,12 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, return -EINVAL; priv = dev->dev_private; + + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + msm_dsi->dev = dev; ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 28b33b35a30ce..efb14043a6ec4 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -8,6 +8,8 @@ #include #include +#include + #include #include "hdmi.h" @@ -41,7 +43,7 @@ static irqreturn_t msm_hdmi_irq(int irq, void *dev_id) struct hdmi *hdmi = dev_id; /* Process HPD: */ - msm_hdmi_connector_irq(hdmi->connector); + msm_hdmi_hpd_irq(hdmi->bridge); /* Process DDC: */ msm_hdmi_i2c_irq(hdmi->i2c); @@ -245,6 +247,20 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) hdmi->pwr_clks[i] = clk; } + hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); + /* This will catch e.g. -EPROBE_DEFER */ + if (IS_ERR(hdmi->hpd_gpiod)) { + ret = PTR_ERR(hdmi->hpd_gpiod); + DRM_DEV_ERROR(&pdev->dev, "failed to get hpd gpio: (%d)\n", ret); + goto fail; + } + + if (!hdmi->hpd_gpiod) + DBG("failed to get HPD gpio"); + + if (hdmi->hpd_gpiod) + gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD"); + pm_runtime_enable(&pdev->dev); hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); @@ -293,6 +309,11 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, struct platform_device *pdev = hdmi->pdev; int ret; + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + hdmi->dev = dev; hdmi->encoder = encoder; @@ -306,7 +327,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - hdmi->connector = msm_hdmi_connector_init(hdmi); + hdmi->connector = drm_bridge_connector_init(hdmi->dev, encoder); if (IS_ERR(hdmi->connector)) { ret = PTR_ERR(hdmi->connector); DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret); @@ -314,6 +335,8 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } + drm_connector_attach_encoder(hdmi->connector, hdmi->encoder); + hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!hdmi->irq) { ret = -EINVAL; @@ -321,8 +344,8 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - ret = devm_request_irq(&pdev->dev, hdmi->irq, - msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + ret = devm_request_irq(dev->dev, hdmi->irq, + msm_hdmi_irq, IRQF_TRIGGER_HIGH, "hdmi_isr", hdmi); if (ret < 0) { DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n", @@ -330,7 +353,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - ret = msm_hdmi_hpd_enable(hdmi->connector); + drm_bridge_connector_enable_hpd(hdmi->connector); + + ret = msm_hdmi_hpd_enable(hdmi->bridge); if (ret < 0) { DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); goto fail; @@ -418,20 +443,6 @@ static struct hdmi_platform_config hdmi_tx_8996_config = { .hpd_freq = hpd_clk_freq_8x74, }; -static const struct { - const char *name; - const bool output; - const int value; - const char *label; -} msm_hdmi_gpio_pdata[] = { - { "qcom,hdmi-tx-ddc-clk", true, 1, "HDMI_DDC_CLK" }, - { "qcom,hdmi-tx-ddc-data", true, 1, "HDMI_DDC_DATA" }, - { "qcom,hdmi-tx-hpd", false, 1, "HDMI_HPD" }, - { "qcom,hdmi-tx-mux-en", true, 1, "HDMI_MUX_EN" }, - { "qcom,hdmi-tx-mux-sel", true, 0, "HDMI_MUX_SEL" }, - { "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" }, -}; - /* * HDMI audio codec callbacks */ @@ -544,7 +555,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; struct device_node *of_node = dev->of_node; - int i, err; + int err; hdmi_cfg = (struct hdmi_platform_config *) of_device_get_match_data(dev); @@ -556,42 +567,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) hdmi_cfg->mmio_name = "core_physical"; hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; - for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { - const char *name = msm_hdmi_gpio_pdata[i].name; - struct gpio_desc *gpiod; - - /* - * We are fetching the GPIO lines "as is" since the connector - * code is enabling and disabling the lines. Until that point - * the power-on default value will be kept. - */ - gpiod = devm_gpiod_get_optional(dev, name, GPIOD_ASIS); - /* This will catch e.g. -PROBE_DEFER */ - if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); - if (!gpiod) { - /* Try a second time, stripping down the name */ - char name3[32]; - - /* - * Try again after stripping out the "qcom,hdmi-tx" - * prefix. This is mainly to match "hpd-gpios" used - * in the upstream bindings. - */ - if (sscanf(name, "qcom,hdmi-tx-%s", name3)) - gpiod = devm_gpiod_get_optional(dev, name3, GPIOD_ASIS); - if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); - if (!gpiod) - DBG("failed to get gpio: %s", name); - } - hdmi_cfg->gpios[i].gpiod = gpiod; - if (gpiod) - gpiod_set_consumer_name(gpiod, msm_hdmi_gpio_pdata[i].label); - hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output; - hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value; - } - dev->platform_data = hdmi_cfg; hdmi = msm_hdmi_init(to_platform_device(dev)); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index d0b84f0abee17..20f554312b17c 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -19,17 +19,9 @@ #include "msm_drv.h" #include "hdmi.xml.h" -#define HDMI_MAX_NUM_GPIO 6 - struct hdmi_phy; struct hdmi_platform_config; -struct hdmi_gpio_data { - struct gpio_desc *gpiod; - bool output; - int value; -}; - struct hdmi_audio { bool enabled; struct hdmi_audio_infoframe infoframe; @@ -61,6 +53,8 @@ struct hdmi { struct clk **hpd_clks; struct clk **pwr_clks; + struct gpio_desc *hpd_gpiod; + struct hdmi_phy *phy; struct device *phy_dev; @@ -109,10 +103,14 @@ struct hdmi_platform_config { /* clks that need to be on for screen pwr (ie pixel clk): */ const char **pwr_clk_names; int pwr_clk_cnt; +}; - /* gpio's: */ - struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO]; +struct hdmi_bridge { + struct drm_bridge base; + struct hdmi *hdmi; + struct work_struct hpd_work; }; +#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on); @@ -230,13 +228,11 @@ void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate); struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi); void msm_hdmi_bridge_destroy(struct drm_bridge *bridge); -/* - * hdmi connector: - */ - -void msm_hdmi_connector_irq(struct drm_connector *connector); -struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi); -int msm_hdmi_hpd_enable(struct drm_connector *connector); +void msm_hdmi_hpd_irq(struct drm_bridge *bridge); +enum drm_connector_status msm_hdmi_bridge_detect( + struct drm_bridge *bridge); +int msm_hdmi_hpd_enable(struct drm_bridge *bridge); +void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge); /* * i2c adapter for ddc: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 6e380db9287ba..efcfdd70a02e0 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -5,17 +5,16 @@ */ #include +#include +#include "msm_kms.h" #include "hdmi.h" -struct hdmi_bridge { - struct drm_bridge base; - struct hdmi *hdmi; -}; -#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) - void msm_hdmi_bridge_destroy(struct drm_bridge *bridge) { + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + + msm_hdmi_hpd_disable(hdmi_bridge); } static void msm_hdmi_power_on(struct drm_bridge *bridge) @@ -259,14 +258,76 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge, msm_hdmi_audio_update(hdmi); } +static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + struct edid *edid; + uint32_t hdmi_ctrl; + + hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); + + edid = drm_get_edid(connector, hdmi->i2c); + + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); + + hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); + + return edid; +} + +static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + const struct hdmi_platform_config *config = hdmi->config; + struct msm_drm_private *priv = bridge->dev->dev_private; + struct msm_kms *kms = priv->kms; + long actual, requested; + + requested = 1000 * mode->clock; + actual = kms->funcs->round_pixclk(kms, + requested, hdmi_bridge->hdmi->encoder); + + /* for mdp5/apq8074, we manage our own pixel clk (as opposed to + * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder + * instead): + */ + if (config->pwr_clk_cnt > 0) + actual = clk_round_rate(hdmi->pwr_clks[0], actual); + + DBG("requested=%ld, actual=%ld", requested, actual); + + if (actual != requested) + return MODE_CLOCK_RANGE; + + return 0; +} + static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = { .pre_enable = msm_hdmi_bridge_pre_enable, .enable = msm_hdmi_bridge_enable, .disable = msm_hdmi_bridge_disable, .post_disable = msm_hdmi_bridge_post_disable, .mode_set = msm_hdmi_bridge_mode_set, + .mode_valid = msm_hdmi_bridge_mode_valid, + .get_edid = msm_hdmi_bridge_get_edid, + .detect = msm_hdmi_bridge_detect, }; +static void +msm_hdmi_hotplug_work(struct work_struct *work) +{ + struct hdmi_bridge *hdmi_bridge = + container_of(work, struct hdmi_bridge, hpd_work); + struct drm_bridge *bridge = &hdmi_bridge->base; + + drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge)); +} /* initialize bridge */ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi) @@ -283,11 +344,17 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi) } hdmi_bridge->hdmi = hdmi; + INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work); bridge = &hdmi_bridge->base; bridge->funcs = &msm_hdmi_bridge_funcs; + bridge->ddc = hdmi->i2c; + bridge->type = DRM_MODE_CONNECTOR_HDMIA; + bridge->ops = DRM_BRIDGE_OP_HPD | + DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_EDID; - ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0); + ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c similarity index 51% rename from drivers/gpu/drm/msm/hdmi/hdmi_connector.c rename to drivers/gpu/drm/msm/hdmi/hdmi_hpd.c index 58707a1f3878f..52ebe562ca9be 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c @@ -11,13 +11,6 @@ #include "msm_kms.h" #include "hdmi.h" -struct hdmi_connector { - struct drm_connector base; - struct hdmi *hdmi; - struct work_struct hpd_work; -}; -#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) - static void msm_hdmi_phy_reset(struct hdmi *hdmi) { unsigned int val; @@ -67,48 +60,6 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi) } } -static int gpio_config(struct hdmi *hdmi, bool on) -{ - const struct hdmi_platform_config *config = hdmi->config; - int i; - - if (on) { - for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { - struct hdmi_gpio_data gpio = config->gpios[i]; - - if (gpio.gpiod) { - if (gpio.output) { - gpiod_direction_output(gpio.gpiod, - gpio.value); - } else { - gpiod_direction_input(gpio.gpiod); - gpiod_set_value_cansleep(gpio.gpiod, - gpio.value); - } - } - } - - DBG("gpio on"); - } else { - for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { - struct hdmi_gpio_data gpio = config->gpios[i]; - - if (!gpio.gpiod) - continue; - - if (gpio.output) { - int value = gpio.value ? 0 : 1; - - gpiod_set_value_cansleep(gpio.gpiod, value); - } - } - - DBG("gpio off"); - } - - return 0; -} - static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) { const struct hdmi_platform_config *config = hdmi->config; @@ -139,10 +90,10 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) } } -int msm_hdmi_hpd_enable(struct drm_connector *connector) +int msm_hdmi_hpd_enable(struct drm_bridge *bridge) { - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; uint32_t hpd_ctrl; @@ -164,11 +115,8 @@ int msm_hdmi_hpd_enable(struct drm_connector *connector) goto fail; } - ret = gpio_config(hdmi, true); - if (ret) { - DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret); - goto fail; - } + if (hdmi->hpd_gpiod) + gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1); pm_runtime_get_sync(dev); enable_hpd_clocks(hdmi, true); @@ -202,9 +150,9 @@ int msm_hdmi_hpd_enable(struct drm_connector *connector) return ret; } -static void hdp_disable(struct hdmi_connector *hdmi_connector) +void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge) { - struct hdmi *hdmi = hdmi_connector->hdmi; + struct hdmi *hdmi = hdmi_bridge->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; int i, ret = 0; @@ -217,10 +165,6 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) enable_hpd_clocks(hdmi, false); pm_runtime_put_autosuspend(dev); - ret = gpio_config(hdmi, false); - if (ret) - dev_warn(dev, "failed to unconfigure GPIOs: %d\n", ret); - ret = pinctrl_pm_select_sleep_state(dev); if (ret) dev_warn(dev, "pinctrl state chg failed: %d\n", ret); @@ -233,19 +177,10 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) } } -static void -msm_hdmi_hotplug_work(struct work_struct *work) -{ - struct hdmi_connector *hdmi_connector = - container_of(work, struct hdmi_connector, hpd_work); - struct drm_connector *connector = &hdmi_connector->base; - drm_helper_hpd_irq_event(connector->dev); -} - -void msm_hdmi_connector_irq(struct drm_connector *connector) +void msm_hdmi_hpd_irq(struct drm_bridge *bridge) { - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; uint32_t hpd_int_status, hpd_int_ctrl; /* Process HPD: */ @@ -268,7 +203,7 @@ void msm_hdmi_connector_irq(struct drm_connector *connector) hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); - queue_work(hdmi->workq, &hdmi_connector->hpd_work); + queue_work(hdmi->workq, &hdmi_bridge->hpd_work); } } @@ -291,21 +226,16 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi) #define HPD_GPIO_INDEX 2 static enum drm_connector_status detect_gpio(struct hdmi *hdmi) { - const struct hdmi_platform_config *config = hdmi->config; - struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; - - return gpiod_get_value(hpd_gpio.gpiod) ? + return gpiod_get_value(hdmi->hpd_gpiod) ? connector_status_connected : connector_status_disconnected; } -static enum drm_connector_status hdmi_connector_detect( - struct drm_connector *connector, bool force) +enum drm_connector_status msm_hdmi_bridge_detect( + struct drm_bridge *bridge) { - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; - const struct hdmi_platform_config *config = hdmi->config; - struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; enum drm_connector_status stat_gpio, stat_reg; int retry = 20; @@ -313,7 +243,7 @@ static enum drm_connector_status hdmi_connector_detect( * some platforms may not have hpd gpio. Rely only on the status * provided by REG_HDMI_HPD_INT_STATUS in this case. */ - if (!hpd_gpio.gpiod) + if (!hdmi->hpd_gpiod) return detect_reg(hdmi); do { @@ -337,115 +267,3 @@ static enum drm_connector_status hdmi_connector_detect( return stat_gpio; } - -static void hdmi_connector_destroy(struct drm_connector *connector) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - - hdp_disable(hdmi_connector); - - drm_connector_cleanup(connector); - - kfree(hdmi_connector); -} - -static int msm_hdmi_connector_get_modes(struct drm_connector *connector) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; - struct edid *edid; - uint32_t hdmi_ctrl; - int ret = 0; - - hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); - hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); - - edid = drm_get_edid(connector, hdmi->i2c); - - hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); - - hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); - drm_connector_update_edid_property(connector, edid); - - if (edid) { - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - } - - return ret; -} - -static int msm_hdmi_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; - const struct hdmi_platform_config *config = hdmi->config; - struct msm_drm_private *priv = connector->dev->dev_private; - struct msm_kms *kms = priv->kms; - long actual, requested; - - requested = 1000 * mode->clock; - actual = kms->funcs->round_pixclk(kms, - requested, hdmi_connector->hdmi->encoder); - - /* for mdp5/apq8074, we manage our own pixel clk (as opposed to - * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder - * instead): - */ - if (config->pwr_clk_cnt > 0) - actual = clk_round_rate(hdmi->pwr_clks[0], actual); - - DBG("requested=%ld, actual=%ld", requested, actual); - - if (actual != requested) - return MODE_CLOCK_RANGE; - - return 0; -} - -static const struct drm_connector_funcs hdmi_connector_funcs = { - .detect = hdmi_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = hdmi_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { - .get_modes = msm_hdmi_connector_get_modes, - .mode_valid = msm_hdmi_connector_mode_valid, -}; - -/* initialize connector */ -struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) -{ - struct drm_connector *connector = NULL; - struct hdmi_connector *hdmi_connector; - - hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); - if (!hdmi_connector) - return ERR_PTR(-ENOMEM); - - hdmi_connector->hdmi = hdmi; - INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work); - - connector = &hdmi_connector->base; - - drm_connector_init_with_ddc(hdmi->dev, connector, - &hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA, - hdmi->i2c); - drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs); - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT; - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - drm_connector_attach_encoder(connector, hdmi->encoder); - - return connector; -} diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index fea30e7aa9e83..084b6ae2a4761 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -191,6 +191,9 @@ static int rd_open(struct inode *inode, struct file *file) file->private_data = rd; rd->open = true; + /* Reset fifo to clear any previously unread data: */ + rd->fifo.head = rd->fifo.tail = 0; + /* the parsing tools need to know gpu-id to know which * register database to load. */ diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b4946b595d86e..7633f56bc0a4f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -279,8 +279,10 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, break; } - if (WARN_ON(pi < 0)) + if (WARN_ON(pi < 0)) { + kfree(nvbo); return ERR_PTR(-EINVAL); + } /* Disable compression if suitable settings couldn't be found. */ if (nvbo->comp && !vmm->page[pi].comp) { @@ -821,6 +823,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, if (ret == 0) { ret = nouveau_fence_new(chan, false, &fence); if (ret == 0) { + /* TODO: figure out a better solution here + * + * wait on the fence here explicitly as going through + * ttm_bo_move_accel_cleanup somehow doesn't seem to do it. + * + * Without this the operation can timeout and we'll fallback to a + * software copy, which might take several minutes to finish. + */ + nouveau_fence_wait(fence, false, false); ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 4c992fd5bd68a..9542fc63e7968 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -500,7 +500,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector, connector->interlace_allowed = nv_encoder->caps.dp_interlace; else - connector->interlace_allowed = true; + connector->interlace_allowed = + drm->client.device.info.family < NV_DEVICE_INFO_V0_VOLTA; connector->doublescan_allowed = true; } else if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS || diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 5f5b87f995468..f08bda533bd94 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -89,7 +89,6 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART, sg, robj); if (ret) { - nouveau_bo_ref(NULL, &nvbo); obj = ERR_PTR(ret); goto unlock; } diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 6ccbc29c4ce4b..d5b3123ed081e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1173,6 +1173,7 @@ static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports) default: break; } + of_node_put(port); } } @@ -1205,11 +1206,13 @@ static int dss_init_ports(struct dss_device *dss) default: break; } + of_node_put(port); } return 0; error: + of_node_put(port); __dss_uninit_ports(dss, i); return r; } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index bf2c845ef3a20..1a87cc445b5e1 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2201,7 +2201,7 @@ static const struct panel_desc innolux_g121i1_l01 = { .enable = 200, .disable = 20, }, - .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, .connector_type = DRM_MODE_CONNECTOR_LVDS, }; @@ -2655,6 +2655,7 @@ static const struct display_timing logictechno_lt161010_2nh_timing = { static const struct panel_desc logictechno_lt161010_2nh = { .timings = &logictechno_lt161010_2nh_timing, .num_timings = 1, + .bpc = 6, .size = { .width = 154, .height = 86, @@ -2684,6 +2685,7 @@ static const struct display_timing logictechno_lt170410_2whc_timing = { static const struct panel_desc logictechno_lt170410_2whc = { .timings = &logictechno_lt170410_2whc_timing, .num_timings = 1, + .bpc = 8, .size = { .width = 217, .height = 136, diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c index 4d2a149b202cb..cd9f01940b173 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c @@ -384,7 +384,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi) st7701->dsi = dsi; st7701->desc = desc; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret) + goto err_attach; + + return 0; + +err_attach: + drm_panel_remove(&st7701->panel); + return ret; } static int st7701_dsi_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig index 86cdc0ce79e65..77f4d32e52045 100644 --- a/drivers/gpu/drm/panfrost/Kconfig +++ b/drivers/gpu/drm/panfrost/Kconfig @@ -3,7 +3,8 @@ config DRM_PANFROST tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)" depends on DRM - depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64) + depends on ARM || ARM64 || COMPILE_TEST + depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE depends on MMU select DRM_SCHED select IOMMU_SUPPORT diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 1dfc457bbefc8..4af25c0b6570f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -81,6 +81,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; struct panfrost_gem_mapping *mapping; + int ret; if (!args->size || args->pad || (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) @@ -91,21 +92,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, !(args->flags & PANFROST_BO_NOEXEC)) return -EINVAL; - bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, - &args->handle); + bo = panfrost_gem_create(dev, args->size, args->flags); if (IS_ERR(bo)) return PTR_ERR(bo); + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); + if (ret) + goto out; + mapping = panfrost_gem_mapping_get(bo, priv); - if (!mapping) { - drm_gem_object_put(&bo->base.base); - return -EINVAL; + if (mapping) { + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); + } else { + /* This can only happen if the handle from + * drm_gem_handle_create() has already been guessed and freed + * by user space + */ + ret = -EINVAL; } - args->offset = mapping->mmnode.start << PAGE_SHIFT; - panfrost_gem_mapping_put(mapping); - - return 0; +out: + drm_gem_object_put(&bo->base.base); + return ret; } /** diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 1d917cea5ceb4..c843fbfdb878e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -232,12 +232,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t } struct panfrost_gem_object * -panfrost_gem_create_with_handle(struct drm_file *file_priv, - struct drm_device *dev, size_t size, - u32 flags, - uint32_t *handle) +panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags) { - int ret; struct drm_gem_shmem_object *shmem; struct panfrost_gem_object *bo; @@ -253,16 +249,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, bo->noexec = !!(flags & PANFROST_BO_NOEXEC); bo->is_heap = !!(flags & PANFROST_BO_HEAP); - /* - * Allocate an id of idr table where the obj is registered - * and handle has the id what user can see. - */ - ret = drm_gem_handle_create(file_priv, &shmem->base, handle); - /* drop reference from allocate - handle holds it now. */ - drm_gem_object_put(&shmem->base); - if (ret) - return ERR_PTR(ret); - return bo; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 8088d5fd8480e..ad2877eeeccdf 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt); struct panfrost_gem_object * -panfrost_gem_create_with_handle(struct drm_file *file_priv, - struct drm_device *dev, size_t size, - u32 flags, - uint32_t *handle); +panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags); int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); void panfrost_gem_close(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index bdd883f4f0da5..963a5d5e6987a 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -402,6 +402,7 @@ static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np, if (of_device_is_compatible(child, "arm,pl111")) { has_coretile_clcd = true; ct_clcd = child; + of_node_put(child); break; } if (of_device_is_compatible(child, "arm,hdlcd")) { diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index bb29cf02974d1..0c94147f76256 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -227,6 +227,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) if (!found) return false; + pci_dev_put(pdev); rdev->bios = kmalloc(size, GFP_KERNEL); if (!rdev->bios) { @@ -612,13 +613,14 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) acpi_size tbl_size; UEFI_ACPI_VFCT *vfct; unsigned offset; + bool r = false; if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) return false; tbl_size = hdr->length; if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); - return false; + goto out; } vfct = (UEFI_ACPI_VFCT *)hdr; @@ -631,13 +633,13 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) offset += sizeof(VFCT_IMAGE_HEADER); if (offset > tbl_size) { DRM_ERROR("ACPI VFCT image header truncated\n"); - return false; + goto out; } offset += vhdr->ImageLength; if (offset > tbl_size) { DRM_ERROR("ACPI VFCT image truncated\n"); - return false; + goto out; } if (vhdr->ImageLength && @@ -649,15 +651,18 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); + if (rdev->bios) + r = true; - if (!rdev->bios) - return false; - return true; + goto out; } } DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); - return false; + +out: + acpi_put_table(hdr); + return r; } #else static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 266e3cbbd09bd..8287410f471fb 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1623,6 +1623,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, if (r) { /* delay GPU reset to resume */ radeon_fence_driver_force_completion(rdev, i); + } else { + /* finish executing delayed work */ + flush_delayed_work(&rdev->fence_drv[i].lockup_work); } } diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index dec54c70e0082..adeaa0140f0f7 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -276,8 +276,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector) return ret; } -static int cdn_dp_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +cdn_dp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct cdn_dp_device *dp = connector_to_dp(connector); struct drm_display_info *display_info = &dp->connector.display_info; @@ -563,7 +564,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); - memcpy(&dp->mode, adjusted, sizeof(*mode)); + drm_mode_copy(&dp->mode, adjusted); } static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index b0fb3c3cba596..c51be1c9c2070 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -1286,5 +1286,11 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, .pm = &dw_mipi_dsi_rockchip_pm_ops, .name = "dw-mipi-dsi-rockchip", + /* + * For dual-DSI display, one DSI pokes at the other DSI's + * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not + * safe for asynchronous probe. + */ + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index 7afdc54eb3ec1..78120da5e63aa 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -488,7 +488,7 @@ static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder, inno_hdmi_setup(hdmi, adj_mode); /* Store the display mode for plugin/DPMS poweron events */ - memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); + drm_mode_copy(&hdmi->previous_mode, adj_mode); } static void inno_hdmi_encoder_enable(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index 1c546c3a89984..17e7c40a9e7b9 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -383,7 +383,7 @@ rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder, struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder); /* Store the display mode for plugin/DPMS poweron events. */ - memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); + drm_mode_copy(&hdmi->previous_mode, adj_mode); } static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 7c20b4a24a7e2..e2487937c4e3d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -145,7 +145,7 @@ static int rk3288_lvds_poweron(struct rockchip_lvds *lvds) DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret); return ret; } - ret = pm_runtime_get_sync(lvds->dev); + ret = pm_runtime_resume_and_get(lvds->dev); if (ret < 0) { DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret); clk_disable(lvds->pclk); @@ -329,16 +329,20 @@ static int px30_lvds_poweron(struct rockchip_lvds *lvds) { int ret; - ret = pm_runtime_get_sync(lvds->dev); + ret = pm_runtime_resume_and_get(lvds->dev); if (ret < 0) { DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret); return ret; } /* Enable LVDS mode */ - return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1, + ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1, PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1), PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1)); + if (ret) + pm_runtime_put(lvds->dev); + + return ret; } static void px30_lvds_poweroff(struct rockchip_lvds *lvds) diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index ddb4184f07264..11225ac213e13 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -288,7 +288,7 @@ static void sti_dvo_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); - memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&dvo->mode, mode); /* According to the path used (main or aux), the dvo clocks should * have a different parent clock. */ @@ -346,8 +346,9 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector) #define CLK_TOLERANCE_HZ 50 -static int sti_dvo_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_dvo_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 5c2b650b561d5..418dfccc2faf3 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -523,7 +523,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); - memcpy(&hda->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&hda->mode, mode); if (!hda_get_mode_idx(hda->mode, &mode_idx)) { DRM_ERROR("Undefined mode\n"); @@ -600,8 +600,9 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector) #define CLK_TOLERANCE_HZ 50 -static int sti_hda_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_hda_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 38a558768e531..1bcee73f51144 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -934,7 +934,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); /* Copy the drm display mode in the connector local structure */ - memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&hdmi->mode, mode); /* Update clock framerate according to the selected mode */ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000); @@ -997,8 +997,9 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector) #define CLK_TOLERANCE_HZ 50 -static int sti_hdmi_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index ceb86338c0039..958d12da902d3 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -2564,8 +2564,10 @@ static int tegra_dc_probe(struct platform_device *pdev) usleep_range(2000, 4000); err = reset_control_assert(dc->rst); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(dc->clk); return err; + } usleep_range(2000, 4000); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 2c6ebc328b24f..318692ad9680f 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1042,6 +1042,10 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) struct host1x *host1x = dev_get_drvdata(dev->dev.parent); struct iommu_domain *domain; + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If the Tegra DRM clients are backed by an IOMMU, push buffers are * likely to be allocated beyond the 32-bit boundary if sufficient diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index edcfd8c120c44..209b5ceba3e00 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -400,9 +400,6 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, udl_handle_damage(fb, 0, 0, fb->width, fb->height); - if (!crtc_state->mode_changed) - return; - /* enable display */ udl_crtc_write_mode_to_hw(crtc); } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 52426bc8edb8b..888aec1bbeee6 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -404,7 +404,12 @@ static int __init vc4_drm_register(void) if (ret) return ret; - return platform_driver_register(&vc4_platform_driver); + ret = platform_driver_register(&vc4_platform_driver); + if (ret) + platform_unregister_drivers(component_drivers, + ARRAY_SIZE(component_drivers)); + + return ret; } static void __exit vc4_drm_unregister(void) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 08175c3dd374b..539ebf85fd7c0 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1491,7 +1491,8 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) return 0; vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, - vc4_hdmi, "vc4", + vc4_hdmi, + vc4_hdmi->variant->card_name, CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO, 1); ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap); diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c index bd5b8eb58b180..c6bd168a58983 100644 --- a/drivers/gpu/drm/vc4/vc4_vec.c +++ b/drivers/gpu/drm/vc4/vc4_vec.c @@ -257,7 +257,7 @@ static void vc4_vec_ntsc_j_mode_set(struct vc4_vec *vec) static const struct drm_display_mode ntsc_mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500, 720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0, - 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0, + 480, 480 + 7, 480 + 7 + 6, 525, 0, DRM_MODE_FLAG_INTERLACE) }; @@ -279,7 +279,7 @@ static void vc4_vec_pal_m_mode_set(struct vc4_vec *vec) static const struct drm_display_mode pal_mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500, 720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0, - 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0, + 576, 576 + 4, 576 + 4 + 6, 625, 0, DRM_MODE_FLAG_INTERLACE) }; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 33b8ebab178a1..36efa273155df 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -279,10 +279,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, drm_gem_object_release(obj); return ret; } - drm_gem_object_put(obj); rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ rc->bo_handle = handle; + + /* + * The handle owns the reference now. But we must drop our + * remaining reference *after* we no longer need to dereference + * the obj. Otherwise userspace could guess the handle and + * race closing it from another thread. + */ + drm_gem_object_put(obj); + return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 6a311cd934403..e6de627342695 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -213,14 +213,14 @@ static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, } static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_plane_state *state) { struct virtio_gpu_framebuffer *vgfb; - if (!plane->state->fb) + if (!state->fb) return; - vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgfb = to_virtio_gpu_framebuffer(state->fb); if (vgfb->fence) { dma_fence_put(&vgfb->fence->f); vgfb->fence = NULL; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 5e40fa0f5e8f2..e98a29d243c08 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -601,7 +601,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); - if (use_dma_api) + if (virtio_gpu_is_shmem(bo) && use_dma_api) dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, shmem->pages, DMA_TO_DEVICE); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e58112997c881..0e963fd7db17e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -182,7 +182,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, if (cmd->dma.guest.ptr.offset % PAGE_SIZE || box->x != 0 || box->y != 0 || box->z != 0 || box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || - box->d != 1 || box_count != 1) { + box->d != 1 || box_count != 1 || + box->w > 64 || box->h > 64) { /* TODO handle none page aligned offsets */ /* TODO handle more dst & src != 0 */ /* TODO handle more then one copy */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 4bf0f5ec4fc2d..2b6590344468d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -949,6 +949,10 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) struct drm_device *dev = dev_priv->dev; int i, ret; + /* Screen objects won't work if GMR's aren't available */ + if (!dev_priv->has_gmr) + return -ENOSYS; + if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { DRM_INFO("Not using screen objects," " missing cap SCREEN_OBJECT_2\n"); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 8659558b518d6..9f674a8d5009d 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -198,6 +198,10 @@ static void host1x_setup_sid_table(struct host1x *host) static bool host1x_wants_iommu(struct host1x *host1x) { + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If we support addressing a maximum of 32 bits of physical memory * and if the host1x firewall is enabled, there's no need to enable diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c index 467d789f9bc2d..25ed7b9a917e4 100644 --- a/drivers/hid/hid-betopff.c +++ b/drivers/hid/hid-betopff.c @@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid) struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev; - int field_count = 0; int error; int i, j; @@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid) * ----------------------------------------- * Do init them with default value. */ + if (report->maxfield < 4) { + hid_err(hid, "not enough fields in the report: %d\n", + report->maxfield); + return -ENODEV; + } for (i = 0; i < report->maxfield; i++) { + if (report->field[i]->report_count < 1) { + hid_err(hid, "no values in the field\n"); + return -ENODEV; + } for (j = 0; j < report->field[i]->report_count; j++) { report->field[i]->value[j] = 0x00; - field_count++; } } - if (field_count < 4) { - hid_err(hid, "not enough fields in the report: %d\n", - field_count); - return -ENODEV; - } - betopff = kzalloc(sizeof(*betopff), GFP_KERNEL); if (!betopff) return -ENOMEM; diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c index e8c5e3ac9fff1..e8b16665860d6 100644 --- a/drivers/hid/hid-bigbenff.c +++ b/drivers/hid/hid-bigbenff.c @@ -344,6 +344,11 @@ static int bigben_probe(struct hid_device *hid, } report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; + if (list_empty(report_list)) { + hid_err(hid, "no output report found\n"); + error = -ENODEV; + goto error_hw_stop; + } bigben->report = list_entry(report_list->next, struct hid_report, list); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 5550c943f9855..baadead947c8b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -988,8 +988,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid, * Validating on id 0 means we should examine the first * report in the list. */ - report = list_entry( - hid->report_enum[type].report_list.next, + report = list_first_entry_or_null( + &hid->report_enum[type].report_list, struct hid_report, list); } else { report = hid->report_enum[type].report_id_hash[id]; @@ -1310,6 +1310,9 @@ static s32 snto32(__u32 value, unsigned n) if (!value || !n) return 0; + if (n > 32) + n = 32; + switch (n) { case 8: return ((__s8)value); case 16: return ((__s16)value); diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 978ee2aab2d40..b7704dd6809dc 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -498,7 +498,7 @@ static int mousevsc_probe(struct hv_device *device, ret = hid_add_device(hid_dev); if (ret) - goto probe_err1; + goto probe_err2; ret = hid_parse(hid_dev); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index bb096dfb7b363..1d1306a6004e6 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -827,6 +827,7 @@ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 #define USB_DEVICE_ID_MADCATZ_RAT5 0x1705 #define USB_DEVICE_ID_MADCATZ_RAT9 0x1709 +#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713 #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 @@ -873,6 +874,7 @@ #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 +#define USB_DEVICE_ID_MS_MOUSE_0783 0x0783 #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -945,7 +947,10 @@ #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003 #define USB_VENDOR_ID_PLANTRONICS 0x047f +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES 0xc055 #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058 #define USB_VENDOR_ID_PANASONIC 0x04da #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 @@ -1147,7 +1152,9 @@ #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 +#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5 +#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017 0x73f6 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 @@ -1301,6 +1308,7 @@ #define USB_VENDOR_ID_PRIMAX 0x0461 #define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22 +#define USB_DEVICE_ID_PRIMAX_MOUSE_4E2A 0x4e2a #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 #define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72 #define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index 742c052b0110a..b8cce9c196d8c 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -18,10 +18,21 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) { + /* For Acer Aspire Switch 10 SW5-012 keyboard-dock */ if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) { - hid_info(hdev, "Fixing up ITE keyboard report descriptor\n"); + hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n"); rdesc[163] = HID_MAIN_ITEM_RELATIVE; } + /* For Acer One S1002/S1003 keyboard-dock */ + if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) { + hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n"); + rdesc[186] = HID_MAIN_ITEM_RELATIVE; + } + /* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */ + if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) { + hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n"); + rdesc[185] = HID_MAIN_ITEM_RELATIVE; + } } return rdesc; @@ -103,7 +114,18 @@ static const struct hid_device_id ite_devices[] = { /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_SYNAPTICS, - USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) }, + USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002), + .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, + /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003), + .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, + /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017), + .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index 5e6a0cef2a06d..e3fcf1353fb3b 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -872,6 +872,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att return -ENOMEM; i = strlen(lbuf); + + if (i == 0) { + kfree(lbuf); + return -EINVAL; + } + if (lbuf[i-1] == '\n') { if (i == 1) { kfree(lbuf); diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index fc4c074597539..28158d2f23523 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -387,7 +387,7 @@ static int magicmouse_raw_event(struct hid_device *hdev, magicmouse_raw_event(hdev, report, data + 2, data[1]); magicmouse_raw_event(hdev, report, data + 2 + data[1], size - 2 - data[1]); - break; + return 0; default: return 0; } diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c index de52e9f7bb8cb..560eeec4035aa 100644 --- a/drivers/hid/hid-mcp2221.c +++ b/drivers/hid/hid-mcp2221.c @@ -840,12 +840,19 @@ static int mcp2221_probe(struct hid_device *hdev, return ret; } - ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); + /* + * This driver uses the .raw_event callback and therefore does not need any + * HID_CONNECT_xxx flags. + */ + ret = hid_hw_start(hdev, 0); if (ret) { hid_err(hdev, "can't start hardware\n"); return ret; } + hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8, + hdev->version & 0xff, hdev->name, hdev->phys); + ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "can't open device\n"); @@ -870,8 +877,7 @@ static int mcp2221_probe(struct hid_device *hdev, mcp->adapter.retries = 1; mcp->adapter.dev.parent = &hdev->dev; snprintf(mcp->adapter.name, sizeof(mcp->adapter.name), - "MCP2221 usb-i2c bridge on hidraw%d", - ((struct hidraw *)hdev->hidraw)->minor); + "MCP2221 usb-i2c bridge"); ret = i2c_add_adapter(&mcp->adapter); if (ret) { diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index d686917cc3b1f..ea8c52f0aa783 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1155,7 +1155,7 @@ static void mt_touch_report(struct hid_device *hid, int contact_count = -1; /* sticky fingers release in progress, abort */ - if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) + if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) return; scantime = *app->scantime; @@ -1236,7 +1236,7 @@ static void mt_touch_report(struct hid_device *hid, del_timer(&td->release_timer); } - clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); + clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } static int mt_touch_input_configured(struct hid_device *hdev, @@ -1671,11 +1671,11 @@ static void mt_expired_timeout(struct timer_list *t) * An input report came in just before we release the sticky fingers, * it will take care of the sticky fingers. */ - if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) + if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) return; if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) mt_release_contacts(hdev); - clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); + clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) @@ -1912,6 +1912,10 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x313a) }, + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_ELAN, 0x3148) }, + /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index e81b7cec2d124..3d414ae194acb 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -198,9 +198,18 @@ static int plantronics_probe(struct hid_device *hdev, } static const struct hid_device_id plantronics_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES), + .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES), .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES), + .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES), + .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { } }; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 2ab71d717bb03..9f1fcbea19eb7 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -122,6 +122,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_MOUSE_0783), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS }, @@ -146,6 +147,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4E2A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL }, @@ -609,6 +611,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) }, #endif #if IS_ENABLED(CONFIG_HID_SAMSUNG) { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index 26373b82fe812..6da80e442fdd1 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -257,6 +257,8 @@ int roccat_report_event(int minor, u8 const *data) if (!new_value) return -ENOMEM; + mutex_lock(&device->cbuf_lock); + report = &device->cbuf[device->cbuf_end]; /* passing NULL is safe */ @@ -276,6 +278,8 @@ int roccat_report_event(int minor, u8 const *data) reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; } + mutex_unlock(&device->cbuf_lock); + wake_up_interruptible(&device->wait); return 0; } diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index c7bf14c019605..b84e975977c42 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = { .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7), .driver_data = SAITEK_RELEASE_MODE_MMO7 }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7), + .driver_data = SAITEK_RELEASE_MODE_MMO7 }, { } }; diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index 4d25577a8573f..971600a6397a4 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -59,7 +59,7 @@ struct hid_sensor_sample { u32 raw_len; } __packed; -static struct attribute hid_custom_attrs[] = { +static struct attribute hid_custom_attrs[HID_CUSTOM_TOTAL_ATTRS] = { {.name = "name", .mode = S_IRUGO}, {.name = "units", .mode = S_IRUGO}, {.name = "unit-expo", .mode = S_IRUGO}, diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index 4edb241957040..e4811d37ca775 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -172,6 +172,7 @@ static int uclogic_probe(struct hid_device *hdev, * than the pen, so use QUIRK_MULTI_INPUT for all tablets. */ hdev->quirks |= HID_QUIRK_MULTI_INPUT; + hdev->quirks |= HID_QUIRK_HIDINPUT_FORCE; /* Allocate and assign driver data */ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL); diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h index 5ffd0da3cf1fa..65af0ebef79f6 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid.h +++ b/drivers/hid/intel-ish-hid/ishtp-hid.h @@ -110,7 +110,7 @@ struct report_list { * @multi_packet_cnt: Count of fragmented packet count * * This structure is used to store completion flags and per client data like - * like report description, number of HID devices etc. + * report description, number of HID devices etc. */ struct ishtp_cl_data { /* completion flags */ diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c index 1cc157126fce7..c0d69303e3b09 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client.c +++ b/drivers/hid/intel-ish-hid/ishtp/client.c @@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb) } /** - * ipc_tx_callback() - IPC tx callback function + * ipc_tx_send() - IPC tx send function * @prm: Pointer to client device instance * - * Send message over IPC either first time or on callback on previous message - * completion + * Send message over IPC. Message will be split into fragments + * if message size is bigger than IPC FIFO size, and all + * fragments will be sent one by one. */ -static void ipc_tx_callback(void *prm) +static void ipc_tx_send(void *prm) { struct ishtp_cl *cl = prm; struct ishtp_cl_tx_ring *cl_msg; @@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm) list); rem = cl_msg->send_buf.size - cl->tx_offs; - ishtp_hdr.host_addr = cl->host_client_id; - ishtp_hdr.fw_addr = cl->fw_client_id; - ishtp_hdr.reserved = 0; - pmsg = cl_msg->send_buf.data + cl->tx_offs; + while (rem > 0) { + ishtp_hdr.host_addr = cl->host_client_id; + ishtp_hdr.fw_addr = cl->fw_client_id; + ishtp_hdr.reserved = 0; + pmsg = cl_msg->send_buf.data + cl->tx_offs; + + if (rem <= dev->mtu) { + /* Last fragment or only one packet */ + ishtp_hdr.length = rem; + ishtp_hdr.msg_complete = 1; + /* Submit to IPC queue with no callback */ + ishtp_write_message(dev, &ishtp_hdr, pmsg); + cl->tx_offs = 0; + cl->sending = 0; - if (rem <= dev->mtu) { - ishtp_hdr.length = rem; - ishtp_hdr.msg_complete = 1; - cl->sending = 0; - list_del_init(&cl_msg->list); /* Must be before write */ - spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); - /* Submit to IPC queue with no callback */ - ishtp_write_message(dev, &ishtp_hdr, pmsg); - spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); - list_add_tail(&cl_msg->list, &cl->tx_free_list.list); - ++cl->tx_ring_free_size; - spin_unlock_irqrestore(&cl->tx_free_list_spinlock, - tx_free_flags); - } else { - /* Send IPC fragment */ - spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); - cl->tx_offs += dev->mtu; - ishtp_hdr.length = dev->mtu; - ishtp_hdr.msg_complete = 0; - ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl); + break; + } else { + /* Send ipc fragment */ + ishtp_hdr.length = dev->mtu; + ishtp_hdr.msg_complete = 0; + /* All fregments submitted to IPC queue with no callback */ + ishtp_write_message(dev, &ishtp_hdr, pmsg); + cl->tx_offs += dev->mtu; + rem = cl_msg->send_buf.size - cl->tx_offs; + } } + + list_del_init(&cl_msg->list); + spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); + + spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); + list_add_tail(&cl_msg->list, &cl->tx_free_list.list); + ++cl->tx_ring_free_size; + spin_unlock_irqrestore(&cl->tx_free_list_spinlock, + tx_free_flags); } /** @@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev, return; cl->tx_offs = 0; - ipc_tx_callback(cl); + ipc_tx_send(cl); ++cl->send_msg_cnt_ipc; } diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c index 40554c8daca07..00046cbfd4ed0 100644 --- a/drivers/hid/intel-ish-hid/ishtp/dma-if.c +++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c @@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev, int required_slots = (size / DMA_SLOT_SIZE) + 1 * (size % DMA_SLOT_SIZE != 0); + if (!dev->ishtp_dma_tx_map) { + dev_err(dev->devc, "Fail to allocate Tx map\n"); + return NULL; + } + spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags); for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) { free = 1; @@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev, return; } + if (!dev->ishtp_dma_tx_map) { + dev_err(dev->devc, "Fail to allocate Tx map\n"); + return; + } + i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE; spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags); for (j = 0; j < acked_slots; j++) { diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 4dbf69078387f..b42785fdf7ed5 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -160,6 +160,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, { struct wacom *wacom = hid_get_drvdata(hdev); + if (wacom->wacom_wac.features.type == BOOTLOADER) + return 0; + if (size > WACOM_PKGLEN_MAX) return 1; @@ -2786,6 +2789,11 @@ static int wacom_probe(struct hid_device *hdev, return error; } + if (features->type == BOOTLOADER) { + hid_warn(hdev, "Using device in hidraw-only mode"); + return hid_hw_start(hdev, HID_CONNECT_HIDRAW); + } + error = wacom_parse_and_register(wacom, false); if (error) return error; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index d8d127fcc82a8..afb94b89fc4d4 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -4782,6 +4782,9 @@ static const struct wacom_features wacom_features_0x3c8 = static const struct wacom_features wacom_features_HID_ANY_ID = { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; +static const struct wacom_features wacom_features_0x94 = + { "Wacom Bootloader", .type = BOOTLOADER }; + #define USB_DEVICE_WACOM(prod) \ HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ .driver_data = (kernel_ulong_t)&wacom_features_##prod @@ -4855,6 +4858,7 @@ const struct hid_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x84) }, { USB_DEVICE_WACOM(0x90) }, { USB_DEVICE_WACOM(0x93) }, + { USB_DEVICE_WACOM(0x94) }, { USB_DEVICE_WACOM(0x97) }, { USB_DEVICE_WACOM(0x9A) }, { USB_DEVICE_WACOM(0x9F) }, diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 8dea7cb298e69..ca172efcf072f 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -242,6 +242,7 @@ enum { MTTPC, MTTPC_B, HID_GENERIC, + BOOTLOADER, MAX_TYPE }; diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index 44a3f5660c109..26f2c3c012978 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -502,8 +502,10 @@ static int ssi_probe(struct platform_device *pd) platform_set_drvdata(pd, ssi); err = ssi_add_controller(ssi, pd); - if (err < 0) + if (err < 0) { + hsi_put_controller(ssi); goto out1; + } pm_runtime_enable(&pd->dev); @@ -524,6 +526,7 @@ static int ssi_probe(struct platform_device *pd) if (!childpdev) { err = -ENODEV; dev_err(&pd->dev, "failed to create ssi controller port\n"); + of_node_put(child); goto out3; } } @@ -535,9 +538,9 @@ static int ssi_probe(struct platform_device *pd) device_for_each_child(&pd->dev, NULL, ssi_remove_ports); out2: ssi_remove_controller(ssi); + pm_runtime_disable(&pd->dev); out1: platform_set_drvdata(pd, NULL); - pm_runtime_disable(&pd->dev); return err; } @@ -628,7 +631,13 @@ static int __init ssi_init(void) { if (ret) return ret; - return platform_driver_register(&ssi_port_pdriver); + ret = platform_driver_register(&ssi_port_pdriver); + if (ret) { + platform_driver_unregister(&ssi_pdriver); + return ret; + } + + return 0; } module_init(ssi_init); diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c index a0cb5be246e1c..b9495b720f1bd 100644 --- a/drivers/hsi/controllers/omap_ssi_port.c +++ b/drivers/hsi/controllers/omap_ssi_port.c @@ -230,10 +230,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) if (msg->ttype == HSI_MSG_READ) { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_FROM_DEVICE); - if (err < 0) { + if (!err) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); pm_runtime_put_autosuspend(omap_port->pdev); - return err; + return -EIO; } csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | @@ -247,10 +247,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) } else { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_TO_DEVICE); - if (err < 0) { + if (!err) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); pm_runtime_put_autosuspend(omap_port->pdev); - return err; + return -EIO; } csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 10188b1a6a089..5b902adb0d1bf 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -501,13 +501,17 @@ static void vmbus_add_channel_work(struct work_struct *work) * Add the new device to the bus. This will kick off device-driver * binding which eventually invokes the device driver's AddDevice() * method. + * + * If vmbus_device_register() fails, the 'device_obj' is freed in + * vmbus_device_release() as called by device_unregister() in the + * error path of vmbus_device_register(). In the outside error + * path, there's no need to free it. */ ret = vmbus_device_register(newchannel->device_obj); if (ret != 0) { pr_err("unable to add child device object (relid %d)\n", newchannel->offermsg.child_relid); - kfree(newchannel->device_obj); goto err_deq_chan; } diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 769851b6e74c5..7ed6fad3fa8ff 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -246,6 +246,19 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) mutex_unlock(&ring_info->ring_buffer_mutex); } +/* + * Check if the ring buffer spinlock is available to take or not; used on + * atomic contexts, like panic path (see the Hyper-V framebuffer driver). + */ + +bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *rinfo = &channel->outbound; + + return spin_is_locked(&rinfo->ring_lock); +} +EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy); + /* Write to the ring buffer. */ int hv_ringbuffer_write(struct vmbus_channel *channel, const struct kvec *kv_list, u32 kv_count) diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 5d820037e2918..e99400f3ae1d1 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2020,6 +2020,7 @@ int vmbus_device_register(struct hv_device *child_device_obj) ret = device_register(&child_device_obj->device); if (ret) { pr_err("Unable to register child device\n"); + put_device(&child_device_obj->device); return ret; } @@ -2251,7 +2252,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, bool fb_overlap_ok) { struct resource *iter, *shadow; - resource_size_t range_min, range_max, start; + resource_size_t range_min, range_max, start, end; const char *dev_n = dev_name(&device_obj->device); int retval; @@ -2286,6 +2287,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, range_max = iter->end; start = (range_min + align - 1) & ~(align - 1); for (; start + size - 1 <= range_max; start += align) { + end = start + size - 1; + + /* Skip the whole fb_mmio region if not fb_overlap_ok */ + if (!fb_overlap_ok && fb_mmio && + (((start >= fb_mmio->start) && (start <= fb_mmio->end)) || + ((end >= fb_mmio->start) && (end <= fb_mmio->end)))) + continue; + shadow = __request_region(iter, start, size, NULL, IORESOURCE_BUSY); if (!shadow) diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index f741c7492ee40..8a427467a8427 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -766,6 +766,7 @@ config SENSORS_IT87 config SENSORS_JC42 tristate "JEDEC JC42.4 compliant memory module temperature sensors" depends on I2C + select REGMAP_I2C help If you say yes here, you get support for JEDEC JC42.4 compliant temperature sensors, which are used on many DDR3 memory modules for diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index bb9211215a688..42b84ebff0579 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) -#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) -#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) - #ifdef CONFIG_SMP #define for_each_sibling(i, cpu) \ for_each_cpu(i, topology_sibling_cpumask(cpu)) @@ -91,6 +88,8 @@ struct temp_data { struct platform_data { struct device *hwmon_dev; u16 pkg_id; + u16 cpu_map[NUM_REAL_CORES]; + struct ida ida; struct cpumask cpumask; struct temp_data *core_data[MAX_CORE_DATA]; struct device_attribute name_attr; @@ -243,10 +242,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) */ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) { for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) { - if (host_bridge->device == tjmax_pci_table[i].device) + if (host_bridge->device == tjmax_pci_table[i].device) { + pci_dev_put(host_bridge); return tjmax_pci_table[i].tjmax; + } } } + pci_dev_put(host_bridge); for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) { if (strstr(c->x86_model_id, tjmax_table[i].id)) @@ -441,7 +443,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) MSR_IA32_THERM_STATUS; tdata->is_pkg_data = pkg_flag; tdata->cpu = cpu; - tdata->cpu_core_id = TO_CORE_ID(cpu); + tdata->cpu_core_id = topology_core_id(cpu); tdata->attr_size = MAX_CORE_ATTRS; mutex_init(&tdata->update_lock); return tdata; @@ -454,7 +456,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; - int err, attr_no; + int err, index, attr_no; /* * Find attr number for sysfs: @@ -462,14 +464,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, * The attr number is always core id + 2 * The Pkgtemp will always show up as temp1_*, if available */ - attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu); + if (pkg_flag) { + attr_no = PKG_SYSFS_ATTR_NO; + } else { + index = ida_alloc(&pdata->ida, GFP_KERNEL); + if (index < 0) + return index; + pdata->cpu_map[index] = topology_core_id(cpu); + attr_no = index + BASE_SYSFS_ATTR_NO; + } - if (attr_no > MAX_CORE_DATA - 1) - return -ERANGE; + if (attr_no > MAX_CORE_DATA - 1) { + err = -ERANGE; + goto ida_free; + } tdata = init_temp_data(cpu, pkg_flag); - if (!tdata) - return -ENOMEM; + if (!tdata) { + err = -ENOMEM; + goto ida_free; + } /* Test if we can access the status register */ err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx); @@ -505,6 +519,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, exit_free: pdata->core_data[attr_no] = NULL; kfree(tdata); +ida_free: + if (!pkg_flag) + ida_free(&pdata->ida, index); return err; } @@ -519,11 +536,18 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx) { struct temp_data *tdata = pdata->core_data[indx]; + /* if we errored on add then this is already gone */ + if (!tdata) + return; + /* Remove the sysfs attributes */ sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group); kfree(pdata->core_data[indx]); pdata->core_data[indx] = NULL; + + if (indx >= BASE_SYSFS_ATTR_NO) + ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO); } static int coretemp_probe(struct platform_device *pdev) @@ -537,6 +561,7 @@ static int coretemp_probe(struct platform_device *pdev) return -ENOMEM; pdata->pkg_id = pdev->id; + ida_init(&pdata->ida); platform_set_drvdata(pdev, pdata); pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME, @@ -553,6 +578,7 @@ static int coretemp_remove(struct platform_device *pdev) if (pdata->core_data[i]) coretemp_remove_core(pdata, i); + ida_destroy(&pdata->ida); return 0; } @@ -647,7 +673,7 @@ static int coretemp_cpu_offline(unsigned int cpu) struct platform_device *pdev = coretemp_get_pdev(cpu); struct platform_data *pd; struct temp_data *tdata; - int indx, target; + int i, indx = -1, target; /* * Don't execute this on suspend as the device remove locks @@ -660,12 +686,19 @@ static int coretemp_cpu_offline(unsigned int cpu) if (!pdev) return 0; - /* The core id is too big, just return */ - indx = TO_ATTR_NO(cpu); - if (indx > MAX_CORE_DATA - 1) + pd = platform_get_drvdata(pdev); + + for (i = 0; i < NUM_REAL_CORES; i++) { + if (pd->cpu_map[i] == topology_core_id(cpu)) { + indx = i + BASE_SYSFS_ATTR_NO; + break; + } + } + + /* Too many cores and this core is not populated, just return */ + if (indx < 0) return 0; - pd = platform_get_drvdata(pdev); tdata = pd->core_data[indx]; cpumask_clear_cpu(cpu, &pd->cpumask); diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c index 1fe37418ff46c..f29ce49294daf 100644 --- a/drivers/hwmon/gsc-hwmon.c +++ b/drivers/hwmon/gsc-hwmon.c @@ -267,6 +267,7 @@ gsc_hwmon_get_devtree_pdata(struct device *dev) pdata->nchannels = nchannels; /* fan controller base address */ + of_node_get(dev->parent->of_node); fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan"); if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) { dev_err(dev, "fan node without base\n"); diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c index 360f5aee13947..d4be03f43fb45 100644 --- a/drivers/hwmon/i5500_temp.c +++ b/drivers/hwmon/i5500_temp.c @@ -108,7 +108,7 @@ static int i5500_temp_probe(struct pci_dev *pdev, u32 tstimer; s8 tsfsc; - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Failed to enable device\n"); return err; diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c index b2ab83c9fd9a8..fe90f0536d76c 100644 --- a/drivers/hwmon/ibmpex.c +++ b/drivers/hwmon/ibmpex.c @@ -502,6 +502,7 @@ static void ibmpex_register_bmc(int iface, struct device *dev) return; out_register: + list_del(&data->list); hwmon_device_unregister(data->hwmon_dev); out_user: ipmi_destroy_user(data->user); diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index ad11cbddc3a7b..d3c98115042b5 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -230,7 +230,7 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg, * Shunt Voltage Sum register has 14-bit value with 1-bit shift * Other Shunt Voltage registers have 12 bits with 3-bit shift */ - if (reg == INA3221_SHUNT_SUM) + if (reg == INA3221_SHUNT_SUM || reg == INA3221_CRIT_SUM) *val = sign_extend32(regval >> 1, 14); else *val = sign_extend32(regval >> 3, 12); @@ -465,7 +465,7 @@ static int ina3221_write_curr(struct device *dev, u32 attr, * SHUNT_SUM: (1 / 40uV) << 1 = 1 / 20uV * SHUNT[1-3]: (1 / 40uV) << 3 = 1 / 5uV */ - if (reg == INA3221_SHUNT_SUM) + if (reg == INA3221_SHUNT_SUM || reg == INA3221_CRIT_SUM) regval = DIV_ROUND_CLOSEST(voltage_uv, 20) & 0xfffe; else regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8; diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 4a03d010ec5a8..52f341d46029b 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -19,6 +19,7 @@ #include #include #include +#include /* Addresses to scan */ static const unsigned short normal_i2c[] = { @@ -189,31 +190,14 @@ static struct jc42_chips jc42_chips[] = { { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK }, }; -enum temp_index { - t_input = 0, - t_crit, - t_min, - t_max, - t_num_temp -}; - -static const u8 temp_regs[t_num_temp] = { - [t_input] = JC42_REG_TEMP, - [t_crit] = JC42_REG_TEMP_CRITICAL, - [t_min] = JC42_REG_TEMP_LOWER, - [t_max] = JC42_REG_TEMP_UPPER, -}; - /* Each client has this additional data */ struct jc42_data { - struct i2c_client *client; struct mutex update_lock; /* protect register access */ + struct regmap *regmap; bool extended; /* true if extended range supported */ bool valid; - unsigned long last_updated; /* In jiffies */ u16 orig_config; /* original configuration */ u16 config; /* current configuration */ - u16 temp[t_num_temp];/* Temperatures */ }; #define JC42_TEMP_MIN_EXTENDED (-40000) @@ -238,85 +222,102 @@ static int jc42_temp_from_reg(s16 reg) return reg * 125 / 2; } -static struct jc42_data *jc42_update_device(struct device *dev) -{ - struct jc42_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - struct jc42_data *ret = data; - int i, val; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { - for (i = 0; i < t_num_temp; i++) { - val = i2c_smbus_read_word_swapped(client, temp_regs[i]); - if (val < 0) { - ret = ERR_PTR(val); - goto abort; - } - data->temp[i] = val; - } - data->last_updated = jiffies; - data->valid = true; - } -abort: - mutex_unlock(&data->update_lock); - return ret; -} - static int jc42_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { - struct jc42_data *data = jc42_update_device(dev); - int temp, hyst; + struct jc42_data *data = dev_get_drvdata(dev); + unsigned int regval; + int ret, temp, hyst; - if (IS_ERR(data)) - return PTR_ERR(data); + mutex_lock(&data->update_lock); switch (attr) { case hwmon_temp_input: - *val = jc42_temp_from_reg(data->temp[t_input]); - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); + if (ret) + break; + + *val = jc42_temp_from_reg(regval); + break; case hwmon_temp_min: - *val = jc42_temp_from_reg(data->temp[t_min]); - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP_LOWER, ®val); + if (ret) + break; + + *val = jc42_temp_from_reg(regval); + break; case hwmon_temp_max: - *val = jc42_temp_from_reg(data->temp[t_max]); - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, ®val); + if (ret) + break; + + *val = jc42_temp_from_reg(regval); + break; case hwmon_temp_crit: - *val = jc42_temp_from_reg(data->temp[t_crit]); - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, + ®val); + if (ret) + break; + + *val = jc42_temp_from_reg(regval); + break; case hwmon_temp_max_hyst: - temp = jc42_temp_from_reg(data->temp[t_max]); + ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, ®val); + if (ret) + break; + + temp = jc42_temp_from_reg(regval); hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK) >> JC42_CFG_HYST_SHIFT]; *val = temp - hyst; - return 0; + break; case hwmon_temp_crit_hyst: - temp = jc42_temp_from_reg(data->temp[t_crit]); + ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, + ®val); + if (ret) + break; + + temp = jc42_temp_from_reg(regval); hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK) >> JC42_CFG_HYST_SHIFT]; *val = temp - hyst; - return 0; + break; case hwmon_temp_min_alarm: - *val = (data->temp[t_input] >> JC42_ALARM_MIN_BIT) & 1; - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); + if (ret) + break; + + *val = (regval >> JC42_ALARM_MIN_BIT) & 1; + break; case hwmon_temp_max_alarm: - *val = (data->temp[t_input] >> JC42_ALARM_MAX_BIT) & 1; - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); + if (ret) + break; + + *val = (regval >> JC42_ALARM_MAX_BIT) & 1; + break; case hwmon_temp_crit_alarm: - *val = (data->temp[t_input] >> JC42_ALARM_CRIT_BIT) & 1; - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); + if (ret) + break; + + *val = (regval >> JC42_ALARM_CRIT_BIT) & 1; + break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + break; } + + mutex_unlock(&data->update_lock); + + return ret; } static int jc42_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { struct jc42_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; + unsigned int regval; int diff, hyst; int ret; @@ -324,21 +325,23 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, switch (attr) { case hwmon_temp_min: - data->temp[t_min] = jc42_temp_to_reg(val, data->extended); - ret = i2c_smbus_write_word_swapped(client, temp_regs[t_min], - data->temp[t_min]); + ret = regmap_write(data->regmap, JC42_REG_TEMP_LOWER, + jc42_temp_to_reg(val, data->extended)); break; case hwmon_temp_max: - data->temp[t_max] = jc42_temp_to_reg(val, data->extended); - ret = i2c_smbus_write_word_swapped(client, temp_regs[t_max], - data->temp[t_max]); + ret = regmap_write(data->regmap, JC42_REG_TEMP_UPPER, + jc42_temp_to_reg(val, data->extended)); break; case hwmon_temp_crit: - data->temp[t_crit] = jc42_temp_to_reg(val, data->extended); - ret = i2c_smbus_write_word_swapped(client, temp_regs[t_crit], - data->temp[t_crit]); + ret = regmap_write(data->regmap, JC42_REG_TEMP_CRITICAL, + jc42_temp_to_reg(val, data->extended)); break; case hwmon_temp_crit_hyst: + ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, + ®val); + if (ret) + break; + /* * JC42.4 compliant chips only support four hysteresis values. * Pick best choice and go from there. @@ -346,7 +349,7 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED : JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX); - diff = jc42_temp_from_reg(data->temp[t_crit]) - val; + diff = jc42_temp_from_reg(regval) - val; hyst = 0; if (diff > 0) { if (diff < 2250) @@ -358,9 +361,8 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, } data->config = (data->config & ~JC42_CFG_HYST_MASK) | (hyst << JC42_CFG_HYST_SHIFT); - ret = i2c_smbus_write_word_swapped(data->client, - JC42_REG_CONFIG, - data->config); + ret = regmap_write(data->regmap, JC42_REG_CONFIG, + data->config); break; default: ret = -EOPNOTSUPP; @@ -458,51 +460,80 @@ static const struct hwmon_chip_info jc42_chip_info = { .info = jc42_info, }; +static bool jc42_readable_reg(struct device *dev, unsigned int reg) +{ + return (reg >= JC42_REG_CAP && reg <= JC42_REG_DEVICEID) || + reg == JC42_REG_SMBUS; +} + +static bool jc42_writable_reg(struct device *dev, unsigned int reg) +{ + return (reg >= JC42_REG_CONFIG && reg <= JC42_REG_TEMP_CRITICAL) || + reg == JC42_REG_SMBUS; +} + +static bool jc42_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == JC42_REG_CONFIG || reg == JC42_REG_TEMP; +} + +static const struct regmap_config jc42_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .val_format_endian = REGMAP_ENDIAN_BIG, + .max_register = JC42_REG_SMBUS, + .writeable_reg = jc42_writable_reg, + .readable_reg = jc42_readable_reg, + .volatile_reg = jc42_volatile_reg, + .cache_type = REGCACHE_RBTREE, +}; + static int jc42_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct device *hwmon_dev; + unsigned int config, cap; struct jc42_data *data; - int config, cap; + int ret; data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL); if (!data) return -ENOMEM; - data->client = client; + data->regmap = devm_regmap_init_i2c(client, &jc42_regmap_config); + if (IS_ERR(data->regmap)) + return PTR_ERR(data->regmap); + i2c_set_clientdata(client, data); mutex_init(&data->update_lock); - cap = i2c_smbus_read_word_swapped(client, JC42_REG_CAP); - if (cap < 0) - return cap; + ret = regmap_read(data->regmap, JC42_REG_CAP, &cap); + if (ret) + return ret; data->extended = !!(cap & JC42_CAP_RANGE); if (device_property_read_bool(dev, "smbus-timeout-disable")) { - int smbus; - /* * Not all chips support this register, but from a * quick read of various datasheets no chip appears * incompatible with the below attempt to disable * the timeout. And the whole thing is opt-in... */ - smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS); - if (smbus < 0) - return smbus; - i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS, - smbus | SMBUS_STMOUT); + ret = regmap_set_bits(data->regmap, JC42_REG_SMBUS, + SMBUS_STMOUT); + if (ret) + return ret; } - config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG); - if (config < 0) - return config; + ret = regmap_read(data->regmap, JC42_REG_CONFIG, &config); + if (ret) + return ret; data->orig_config = config; if (config & JC42_CFG_SHUTDOWN) { config &= ~JC42_CFG_SHUTDOWN; - i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config); + regmap_write(data->regmap, JC42_REG_CONFIG, config); } data->config = config; @@ -523,7 +554,7 @@ static int jc42_remove(struct i2c_client *client) config = (data->orig_config & ~JC42_CFG_HYST_MASK) | (data->config & JC42_CFG_HYST_MASK); - i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config); + regmap_write(data->regmap, JC42_REG_CONFIG, config); } return 0; } @@ -535,8 +566,11 @@ static int jc42_suspend(struct device *dev) struct jc42_data *data = dev_get_drvdata(dev); data->config |= JC42_CFG_SHUTDOWN; - i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG, - data->config); + regmap_write(data->regmap, JC42_REG_CONFIG, data->config); + + regcache_cache_only(data->regmap, true); + regcache_mark_dirty(data->regmap); + return 0; } @@ -544,10 +578,13 @@ static int jc42_resume(struct device *dev) { struct jc42_data *data = dev_get_drvdata(dev); + regcache_cache_only(data->regmap, false); + data->config &= ~JC42_CFG_SHUTDOWN; - i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG, - data->config); - return 0; + regmap_write(data->regmap, JC42_REG_CONFIG, data->config); + + /* Restore cached register values to hardware */ + return regcache_sync(data->regmap); } static const struct dev_pm_ops jc42_dev_pm_ops = { diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c index 5423466de697a..e918490f3ff75 100644 --- a/drivers/hwmon/ltc2947-core.c +++ b/drivers/hwmon/ltc2947-core.c @@ -396,7 +396,7 @@ static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val, return ret; /* in milidegrees celcius, temp is given by: */ - *val = (__val * 204) + 550; + *val = (__val * 204) + 5500; return 0; } diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c index 046523d47c29b..41e3d3b54baff 100644 --- a/drivers/hwmon/mr75203.c +++ b/drivers/hwmon/mr75203.c @@ -68,8 +68,9 @@ /* VM Individual Macro Register */ #define VM_COM_REG_SIZE 0x200 -#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n)) -#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n)) +#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm)) +#define VM_SDIF_DATA(vm, ch) \ + (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch)) /* SDA Slave Register */ #define IP_CTRL 0x00 @@ -115,6 +116,7 @@ struct pvt_device { u32 t_num; u32 p_num; u32 v_num; + u32 c_num; u32 ip_freq; u8 *vm_idx; }; @@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val) { struct pvt_device *pvt = dev_get_drvdata(dev); struct regmap *v_map = pvt->v_map; + u8 vm_idx, ch_idx; u32 n, stat; - u8 vm_idx; int ret; - if (channel >= pvt->v_num) + if (channel >= pvt->v_num * pvt->c_num) return -EINVAL; - vm_idx = pvt->vm_idx[channel]; + vm_idx = pvt->vm_idx[channel / pvt->c_num]; + ch_idx = channel % pvt->c_num; switch (attr) { case hwmon_in_input: @@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val) if (ret) return ret; - ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n); + ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n); if(ret < 0) return ret; n &= SAMPLE_DATA_MSK; - /* Convert the N bitstream count into voltage */ - *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS; + /* + * Convert the N bitstream count into voltage. + * To support negative voltage calculation for 64bit machines + * n must be cast to long, since n and *val differ both in + * signedness and in size. + * Division is used instead of right shift, because for signed + * numbers, the sign bit is used to fill the vacated bit + * positions, and if the number is negative, 1 is used. + * BIT(x) may not be used instead of (1 << x) because it's + * unsigned. + */ + *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS); return 0; default: @@ -385,6 +398,19 @@ static int pvt_init(struct pvt_device *pvt) if (ret) return ret; + val = (BIT(pvt->c_num) - 1) | VM_CH_INIT | + IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG; + ret = regmap_write(v_map, SDIF_W, val); + if (ret < 0) + return ret; + + ret = regmap_read_poll_timeout(v_map, SDIF_STAT, + val, !(val & SDIF_BUSY), + PVT_POLL_DELAY_US, + PVT_POLL_TIMEOUT_US); + if (ret) + return ret; + val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT | CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG; @@ -499,8 +525,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt static int mr75203_probe(struct platform_device *pdev) { + u32 ts_num, vm_num, pd_num, ch_num, val, index, i; const struct hwmon_channel_info **pvt_info; - u32 ts_num, vm_num, pd_num, val, index, i; struct device *dev = &pdev->dev; u32 *temp_config, *in_config; struct device *hwmon_dev; @@ -541,9 +567,11 @@ static int mr75203_probe(struct platform_device *pdev) ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT; pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT; vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT; + ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT; pvt->t_num = ts_num; pvt->p_num = pd_num; pvt->v_num = vm_num; + pvt->c_num = ch_num; val = 0; if (ts_num) val++; @@ -580,7 +608,7 @@ static int mr75203_probe(struct platform_device *pdev) } if (vm_num) { - u32 num = vm_num; + u32 total_ch; ret = pvt_get_regmap(pdev, "vm", pvt); if (ret) @@ -594,30 +622,30 @@ static int mr75203_probe(struct platform_device *pdev) ret = device_property_read_u8_array(dev, "intel,vm-map", pvt->vm_idx, vm_num); if (ret) { - num = 0; + /* + * Incase intel,vm-map property is not defined, we + * assume incremental channel numbers. + */ + for (i = 0; i < vm_num; i++) + pvt->vm_idx[i] = i; } else { for (i = 0; i < vm_num; i++) if (pvt->vm_idx[i] >= vm_num || pvt->vm_idx[i] == 0xff) { - num = i; + pvt->v_num = i; + vm_num = i; break; } } - /* - * Incase intel,vm-map property is not defined, we assume - * incremental channel numbers. - */ - for (i = num; i < vm_num; i++) - pvt->vm_idx[i] = i; - - in_config = devm_kcalloc(dev, num + 1, + total_ch = ch_num * vm_num; + in_config = devm_kcalloc(dev, total_ch + 1, sizeof(*in_config), GFP_KERNEL); if (!in_config) return -ENOMEM; - memset32(in_config, HWMON_I_INPUT, num); - in_config[num] = 0; + memset32(in_config, HWMON_I_INPUT, total_ch); + in_config[total_ch] = 0; pvt_in.config = in_config; pvt_info[index++] = &pvt_in; diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c index 3647109666658..e499146648639 100644 --- a/drivers/hwspinlock/qcom_hwspinlock.c +++ b/drivers/hwspinlock/qcom_hwspinlock.c @@ -105,7 +105,7 @@ static const struct regmap_config tcsr_mutex_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = 0x40000, + .max_register = 0x20000, .fast_io = true, }; diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c index 0276700c246d5..90270696206c9 100644 --- a/drivers/hwtracing/coresight/coresight-cti-core.c +++ b/drivers/hwtracing/coresight/coresight-cti-core.c @@ -90,11 +90,9 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata) static int cti_enable_hw(struct cti_drvdata *drvdata) { struct cti_config *config = &drvdata->config; - struct device *dev = &drvdata->csdev->dev; unsigned long flags; int rc = 0; - pm_runtime_get_sync(dev->parent); spin_lock_irqsave(&drvdata->spinlock, flags); /* no need to do anything if enabled or unpowered*/ @@ -119,7 +117,6 @@ static int cti_enable_hw(struct cti_drvdata *drvdata) /* cannot enable due to error */ cti_err_not_enabled: spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(dev->parent); return rc; } @@ -153,7 +150,6 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata) static int cti_disable_hw(struct cti_drvdata *drvdata) { struct cti_config *config = &drvdata->config; - struct device *dev = &drvdata->csdev->dev; spin_lock(&drvdata->spinlock); @@ -174,7 +170,6 @@ static int cti_disable_hw(struct cti_drvdata *drvdata) coresight_disclaim_device_unlocked(drvdata->base); CS_LOCK(drvdata->base); spin_unlock(&drvdata->spinlock); - pm_runtime_put(dev->parent); return 0; /* not disabled this call */ diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 9468c6c89b3f5..682fffaab2b40 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "i2c-designware-core.h" @@ -347,7 +348,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) * * If your hardware is free from tHD;STA issue, try this one. */ - return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) - + 8 + offset; else /* * Conditional expression: @@ -363,8 +365,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) * The reason why we need to take into account "tf" here, * is the same as described in i2c_dw_scl_lcnt(). */ - return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000 - - 3 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) - + 3 + offset; } u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) @@ -380,7 +382,8 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) * account the fall time of SCL signal (tf). Default tf value * should be 0.3 us, for safety. */ - return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) - + 1 + offset; } int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index ad91c7c0faa54..4747541517252 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -32,12 +32,13 @@ #include #include #include +#include #include "i2c-designware-core.h" static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) { - return clk_get_rate(dev->clk)/1000; + return clk_get_rate(dev->clk) / KILO; } #ifdef CONFIG_ACPI @@ -284,7 +285,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (!dev->sda_hold_time && t->sda_hold_ns) dev->sda_hold_time = - div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); + DIV_S64_ROUND_CLOSEST(clk_khz * t->sda_hold_ns, MICRO); } adap = &dev->adapter; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 5618c1ff34dc3..45682d30d7056 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1275,6 +1275,7 @@ static const struct { */ { "Latitude 5480", 0x29 }, { "Vostro V131", 0x1d }, + { "Vostro 5568", 0x29 }, }; static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv) diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index d3719df1c40dc..b4fb4336b4e8b 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -843,7 +843,8 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, int i, result; unsigned int temp; int block_data = msgs->flags & I2C_M_RECV_LEN; - int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data; + int use_dma = i2c_imx->dma && msgs->flags & I2C_M_DMA_SAFE && + msgs->len >= DMA_THRESHOLD && !block_data; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", @@ -1011,7 +1012,8 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter, result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic); } else { if (!atomic && - i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD) + i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD && + msgs[i].flags & I2C_M_DMA_SAFE) result = i2c_imx_dma_write(i2c_imx, &msgs[i]); else result = i2c_imx_write(i2c_imx, &msgs[i], atomic); @@ -1289,7 +1291,7 @@ static int i2c_imx_remove(struct platform_device *pdev) if (i2c_imx->dma) i2c_imx_dma_free(i2c_imx); - if (ret == 0) { + if (ret >= 0) { /* setup chip registers to defaults */ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 3d2d92640651e..cec2b2ae7684f 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -507,6 +507,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, if (read_write == I2C_SMBUS_WRITE) { /* Block Write */ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n"); + if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + dma_size = data->block[0] + 1; dma_direction = DMA_TO_DEVICE; desc->wr_len_cmd = dma_size; diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index ab261d762dea3..90c488a606934 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -63,13 +64,14 @@ */ #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000) /* Reference clock for Bluefield - 156 MHz. */ -#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000) +#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL /* Constant used to determine the PLL frequency. */ -#define MLNXBF_I2C_COREPLL_CONST 16384 +#define MLNXBF_I2C_COREPLL_CONST 16384ULL + +#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL /* PLL registers. */ -#define MLXBF_I2C_CORE_PLL_REG0 0x0 #define MLXBF_I2C_CORE_PLL_REG1 0x4 #define MLXBF_I2C_CORE_PLL_REG2 0x8 @@ -187,22 +189,15 @@ enum { #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ /* Core PLL TYU configuration. */ -#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0) -#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0) -#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0) - -#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3 -#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16 -#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20 +#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3) +#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16) +#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20) /* Core PLL YU configuration. */ #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0) #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0) -#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0) +#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26) -#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0 -#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1 -#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26 /* Core PLL frequency. */ static u64 mlxbf_i2c_corepll_frequency; @@ -317,6 +312,7 @@ static u64 mlxbf_i2c_corepll_frequency; * exact. */ #define MLXBF_I2C_SMBUS_TIMEOUT (300 * 1000) /* 300ms */ +#define MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT (300 * 1000) /* 300ms */ /* Encapsulates timing parameters. */ struct mlxbf_i2c_timings { @@ -485,8 +481,6 @@ static struct mutex mlxbf_i2c_bus_lock; #define MLXBF_I2C_MASK_8 GENMASK(7, 0) #define MLXBF_I2C_MASK_16 GENMASK(15, 0) -#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000 - /* * Function to poll a set of bits at a specific address; it checks whether * the bits are equal to zero when eq_zero is set to 'true', and not equal @@ -527,6 +521,25 @@ static bool mlxbf_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv) return false; } +/* + * wait for the lock to be released before acquiring it. + */ +static bool mlxbf_i2c_smbus_master_lock(struct mlxbf_i2c_priv *priv) +{ + if (mlxbf_smbus_poll(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW, + MLXBF_I2C_MASTER_LOCK_BIT, true, + MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT)) + return true; + + return false; +} + +static void mlxbf_i2c_smbus_master_unlock(struct mlxbf_i2c_priv *priv) +{ + /* Clear the gw to clear the lock */ + writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW); +} + static bool mlxbf_i2c_smbus_transaction_success(u32 master_status, u32 cause_status) { @@ -675,7 +688,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, /* Clear status bits. */ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS); /* Set the cause data. */ - writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR); + writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); /* Zero PEC byte. */ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC); /* Zero byte count. */ @@ -718,10 +731,19 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, slave = request->slave & GENMASK(6, 0); addr = slave << 1; - /* First of all, check whether the HW is idle. */ - if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv))) + /* + * Try to acquire the smbus gw lock before any reads of the GW register since + * a read sets the lock. + */ + if (WARN_ON(!mlxbf_i2c_smbus_master_lock(priv))) return -EBUSY; + /* Check whether the HW is idle */ + if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv))) { + ret = -EBUSY; + goto out_unlock; + } + /* Set first byte. */ data_desc[data_idx++] = addr; @@ -744,6 +766,11 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, if (flags & MLXBF_I2C_F_WRITE) { write_en = 1; write_len += operation->length; + if (data_idx + operation->length > + MLXBF_I2C_MASTER_DATA_DESC_SIZE) { + ret = -ENOBUFS; + goto out_unlock; + } memcpy(data_desc + data_idx, operation->buffer, operation->length); data_idx += operation->length; @@ -775,7 +802,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en, pec_en, 0); if (ret) - return ret; + goto out_unlock; } if (read_en) { @@ -802,6 +829,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM); } +out_unlock: + mlxbf_i2c_smbus_master_unlock(priv); + return ret; } @@ -1413,24 +1443,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev, return 0; } -static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) +static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) { - u64 core_frequency, pad_frequency; + u64 core_frequency; u8 core_od, core_r; u32 corepll_val; u16 core_f; - pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); /* Get Core PLL configuration bits. */ - core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_F_TYU_MASK; - core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK; - core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_R_TYU_MASK; + core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val); + core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val); + core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val); /* * Compute PLL output frequency as follow: @@ -1442,31 +1467,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ - core_frequency = pad_frequency * (++core_f); + core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f); core_frequency /= (++core_r) * (++core_od); return core_frequency; } -static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) +static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) { u32 corepll_reg1_val, corepll_reg2_val; - u64 corepll_frequency, pad_frequency; + u64 corepll_frequency; u8 core_od, core_r; u32 core_f; - pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2); /* Get Core PLL configuration bits */ - core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_F_YU_MASK; - core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_R_YU_MASK; - core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_OD_YU_MASK; + core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val); + core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val); + core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val); /* * Compute PLL output frequency as follow: @@ -1478,7 +1498,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ - corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST; + corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST; corepll_frequency /= (++core_r) * (++core_od); return corepll_frequency; @@ -2186,14 +2206,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = { [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1], [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1] }, - .calculate_freq = mlxbf_calculate_freq_from_tyu + .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu }, [MLXBF_I2C_CHIP_TYPE_2] = { .type = MLXBF_I2C_CHIP_TYPE_2, .shared_res = { [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2] }, - .calculate_freq = mlxbf_calculate_freq_from_yu + .calculate_freq = mlxbf_i2c_calculate_freq_from_yu } }; diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index c4b08a9244614..abad24808e855 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -842,8 +842,8 @@ static int mxs_i2c_probe(struct platform_device *pdev) /* Setup the DMA */ i2c->dmach = dma_request_chan(dev, "rx-tx"); if (IS_ERR(i2c->dmach)) { - dev_err(dev, "Failed to request dma\n"); - return PTR_ERR(i2c->dmach); + return dev_err_probe(dev, PTR_ERR(i2c->dmach), + "Failed to request dma\n"); } platform_set_drvdata(pdev, i2c); diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 31e3d2c9d6bc5..c1b6797372409 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2362,8 +2362,17 @@ static struct platform_driver npcm_i2c_bus_driver = { static int __init npcm_i2c_init(void) { + int ret; + npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL); - return platform_driver_register(&npcm_i2c_bus_driver); + + ret = platform_driver_register(&npcm_i2c_bus_driver); + if (ret) { + debugfs_remove_recursive(npcm_i2c_debugfs_dir); + return ret; + } + + return 0; } module_init(npcm_i2c_init); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 8c1b31ed0c429..aa1d3657ab4e6 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -961,6 +961,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) "", &piix4_main_adapters[0]); if (retval < 0) return retval; + piix4_adapter_count = 1; } /* Check for auxiliary SMBus on some AMD chipsets */ diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c index f614cade432bb..30e38bc8b6db8 100644 --- a/drivers/i2c/busses/i2c-pxa-pci.c +++ b/drivers/i2c/busses/i2c-pxa-pci.c @@ -105,7 +105,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev, int i; struct ce4100_devices *sds; - ret = pci_enable_device_mem(dev); + ret = pcim_enable_device(dev); if (ret) return ret; @@ -114,10 +114,8 @@ static int ce4100_i2c_probe(struct pci_dev *dev, return -EINVAL; } sds = kzalloc(sizeof(*sds), GFP_KERNEL); - if (!sds) { - ret = -ENOMEM; - goto err_mem; - } + if (!sds) + return -ENOMEM; for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { sds->pdev[i] = add_i2c_device(dev, i); @@ -133,8 +131,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev, err_dev_add: kfree(sds); -err_mem: - pci_disable_device(dev); return ret; } diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index 09e599069a81d..06c87c79bae76 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -638,6 +638,11 @@ static int cci_probe(struct platform_device *pdev) if (ret < 0) goto error; + pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + for (i = 0; i < cci->data->num_masters; i++) { if (!cci->master[i].cci) continue; @@ -649,14 +654,12 @@ static int cci_probe(struct platform_device *pdev) } } - pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); - pm_runtime_use_autosuspend(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - return 0; error_i2c: + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + for (--i ; i >= 0; i--) { if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 02ddb237f69af..13c14eb175e94 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -80,7 +80,7 @@ enum { #define DEFAULT_SCL_RATE (100 * 1000) /* Hz */ /** - * struct i2c_spec_values: + * struct i2c_spec_values - I2C specification values for various modes * @min_hold_start_ns: min hold time (repeated) START condition * @min_low_ns: min LOW period of the SCL clock * @min_high_ns: min HIGH period of the SCL cloc @@ -136,7 +136,7 @@ static const struct i2c_spec_values fast_mode_plus_spec = { }; /** - * struct rk3x_i2c_calced_timings: + * struct rk3x_i2c_calced_timings - calculated V1 timings * @div_low: Divider output for low * @div_high: Divider output for high * @tuning: Used to adjust setup/hold data time, @@ -159,7 +159,7 @@ enum rk3x_i2c_state { }; /** - * struct rk3x_i2c_soc_data: + * struct rk3x_i2c_soc_data - SOC-specific data * @grf_offset: offset inside the grf regmap for setting the i2c type * @calc_timings: Callback function for i2c timing information calculated */ @@ -239,7 +239,8 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c) } /** - * Generate a START condition, which triggers a REG_INT_START interrupt. + * rk3x_i2c_start - Generate a START condition, which triggers a REG_INT_START interrupt. + * @i2c: target controller data */ static void rk3x_i2c_start(struct rk3x_i2c *i2c) { @@ -258,8 +259,8 @@ static void rk3x_i2c_start(struct rk3x_i2c *i2c) } /** - * Generate a STOP condition, which triggers a REG_INT_STOP interrupt. - * + * rk3x_i2c_stop - Generate a STOP condition, which triggers a REG_INT_STOP interrupt. + * @i2c: target controller data * @error: Error code to return in rk3x_i2c_xfer */ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error) @@ -298,7 +299,8 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error) } /** - * Setup a read according to i2c->msg + * rk3x_i2c_prepare_read - Setup a read according to i2c->msg + * @i2c: target controller data */ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c) { @@ -329,7 +331,8 @@ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c) } /** - * Fill the transmit buffer with data from i2c->msg + * rk3x_i2c_fill_transmit_buf - Fill the transmit buffer with data from i2c->msg + * @i2c: target controller data */ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c) { @@ -532,11 +535,10 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id) } /** - * Get timing values of I2C specification - * + * rk3x_i2c_get_spec - Get timing values of I2C specification * @speed: Desired SCL frequency * - * Returns: Matched i2c spec values. + * Return: Matched i2c_spec_values. */ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed) { @@ -549,13 +551,12 @@ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed) } /** - * Calculate divider values for desired SCL frequency - * + * rk3x_i2c_v0_calc_timings - Calculate divider values for desired SCL frequency * @clk_rate: I2C input clock rate * @t: Known I2C timing information * @t_calc: Caculated rk3x private timings that would be written into regs * - * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case + * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case * a best-effort divider value is returned in divs. If the target rate is * too high, we silently use the highest possible rate. */ @@ -710,13 +711,12 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate, } /** - * Calculate timing values for desired SCL frequency - * + * rk3x_i2c_v1_calc_timings - Calculate timing values for desired SCL frequency * @clk_rate: I2C input clock rate * @t: Known I2C timing information * @t_calc: Caculated rk3x private timings that would be written into regs * - * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case + * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case * a best-effort divider value is returned in divs. If the target rate is * too high, we silently use the highest possible rate. * The following formulas are v1's method to calculate timings. @@ -960,14 +960,14 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long } /** - * Setup I2C registers for an I2C operation specified by msgs, num. - * - * Must be called with i2c->lock held. - * + * rk3x_i2c_setup - Setup I2C registers for an I2C operation specified by msgs, num. + * @i2c: target controller data * @msgs: I2C msgs to process * @num: Number of msgs * - * returns: Number of I2C msgs processed or negative in case of error + * Must be called with i2c->lock held. + * + * Return: Number of I2C msgs processed or negative in case of error */ static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num) { diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 8b113ae32dc71..42f1db60ad6f8 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -283,6 +283,7 @@ struct tegra_i2c_dev { struct dma_chan *tx_dma_chan; struct dma_chan *rx_dma_chan; unsigned int dma_buf_size; + struct device *dma_dev; dma_addr_t dma_phys; void *dma_buf; @@ -419,7 +420,7 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len) static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev) { if (i2c_dev->dma_buf) { - dma_free_coherent(i2c_dev->dev, i2c_dev->dma_buf_size, + dma_free_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, i2c_dev->dma_buf, i2c_dev->dma_phys); i2c_dev->dma_buf = NULL; } @@ -466,10 +467,13 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev) i2c_dev->tx_dma_chan = chan; + WARN_ON(i2c_dev->tx_dma_chan->device != i2c_dev->rx_dma_chan->device); + i2c_dev->dma_dev = chan->device->dev; + i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len + I2C_PACKET_HEADER_SIZE; - dma_buf = dma_alloc_coherent(i2c_dev->dev, i2c_dev->dma_buf_size, + dma_buf = dma_alloc_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, &dma_phys, GFP_KERNEL | __GFP_NOWARN); if (!dma_buf) { dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n"); @@ -1255,7 +1259,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (i2c_dev->dma_mode) { if (i2c_dev->msg_read) { - dma_sync_single_for_device(i2c_dev->dev, + dma_sync_single_for_device(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_FROM_DEVICE); @@ -1263,7 +1267,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (err) return err; } else { - dma_sync_single_for_cpu(i2c_dev->dev, + dma_sync_single_for_cpu(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_TO_DEVICE); } @@ -1276,7 +1280,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE, msg->buf, msg->len); - dma_sync_single_for_device(i2c_dev->dev, + dma_sync_single_for_device(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_TO_DEVICE); @@ -1327,7 +1331,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, } if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) { - dma_sync_single_for_cpu(i2c_dev->dev, + dma_sync_single_for_cpu(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_FROM_DEVICE); diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 8dabb6ffb1a4f..3b564e68130b5 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -935,6 +935,7 @@ static struct platform_driver xiic_i2c_driver = { module_platform_driver(xiic_i2c_driver); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("info@mocean-labs.com"); MODULE_DESCRIPTION("Xilinx I2C bus driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index 0e0679f65cf77..30a6de1694e07 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c @@ -183,13 +183,12 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) if (!mux->data.reg) { dev_info(&pdev->dev, "Register not set, using platform resource\n"); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mux->data.reg_size = resource_size(res); - mux->data.reg = devm_ioremap_resource(&pdev->dev, res); + mux->data.reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(mux->data.reg)) { ret = PTR_ERR(mux->data.reg); goto err_put_parent; } + mux->data.reg_size = resource_size(res); } if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c index 84bbdfd2f2ba3..b4ae4f86da3e1 100644 --- a/drivers/iio/accel/adis16201.c +++ b/drivers/iio/accel/adis16201.c @@ -304,3 +304,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16201"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c index 4a841aec6268b..e6e465f397d9d 100644 --- a/drivers/iio/accel/adis16209.c +++ b/drivers/iio/accel/adis16209.c @@ -314,3 +314,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16209"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index f05840d17fb71..8d929a4f9110f 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -277,6 +277,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev, hid_sensor_convert_timestamp( &accel_state->common_attributes, *(int64_t *)raw_data); + ret = 0; break; default: break; diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c index 8c1e866f72e85..96eeda433ad66 100644 --- a/drivers/iio/adc/ad7923.c +++ b/drivers/iio/adc/ad7923.c @@ -93,6 +93,7 @@ enum ad7923_id { .sign = 'u', \ .realbits = (bits), \ .storagebits = 16, \ + .shift = 12 - (bits), \ .endianness = IIO_BE, \ }, \ } @@ -274,7 +275,8 @@ static int ad7923_read_raw(struct iio_dev *indio_dev, return ret; if (chan->address == EXTRACT(ret, 12, 4)) - *val = EXTRACT(ret, 0, 12); + *val = EXTRACT(ret, chan->scan_type.shift, + chan->scan_type.realbits); else return -EIO; diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index 3a6f239d4acca..496cb2b26bfda 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -280,10 +280,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, unsigned int data_reg; int ret = 0; - if (iio_buffer_enabled(indio_dev)) - return -EBUSY; + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; - mutex_lock(&indio_dev->mlock); ad_sigma_delta_set_channel(sigma_delta, chan->address); spi_bus_lock(sigma_delta->spi->master); @@ -322,7 +322,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); sigma_delta->bus_locked = false; spi_bus_unlock(sigma_delta->spi->master); - mutex_unlock(&indio_dev->mlock); + iio_device_release_direct_mode(indio_dev); if (ret) return ret; diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 4ede7e766765e..250b78ee16251 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -74,7 +74,7 @@ #define AT91_SAMA5D2_MR_ANACH BIT(23) /* Tracking Time */ #define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24) -#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff +#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xf /* Transfer Time */ #define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28) #define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3 @@ -1353,10 +1353,12 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev, ret = at91_adc_read_position(st, chan->channel, &tmp_val); *val = tmp_val; + if (ret > 0) + ret = at91_adc_adjust_val_osr(st, val); mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); - return at91_adc_adjust_val_osr(st, val); + return ret; } if (chan->type == IIO_PRESSURE) { ret = iio_device_claim_direct_mode(indio_dev); @@ -1367,10 +1369,12 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev, ret = at91_adc_read_pressure(st, chan->channel, &tmp_val); *val = tmp_val; + if (ret > 0) + ret = at91_adc_adjust_val_osr(st, val); mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); - return at91_adc_adjust_val_osr(st, val); + return ret; } /* in this case we have a voltage channel */ @@ -1461,16 +1465,20 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev, /* if no change, optimize out */ if (val == st->oversampling_ratio) return 0; + mutex_lock(&st->lock); st->oversampling_ratio = val; /* update ratio */ at91_adc_config_emr(st); + mutex_unlock(&st->lock); return 0; case IIO_CHAN_INFO_SAMP_FREQ: if (val < st->soc_info.min_sample_rate || val > st->soc_info.max_sample_rate) return -EINVAL; + mutex_lock(&st->lock); at91_adc_setup_samp_freq(indio_dev, val); + mutex_unlock(&st->lock); return 0; default: return -EINVAL; @@ -1899,6 +1907,9 @@ static __maybe_unused int at91_adc_suspend(struct device *dev) struct iio_dev *indio_dev = dev_get_drvdata(dev); struct at91_adc_state *st = iio_priv(indio_dev); + if (iio_buffer_enabled(indio_dev)) + at91_adc_buffer_postdisable(indio_dev); + /* * Do a sofware reset of the ADC before we go to suspend. * this will ensure that all pins are free from being muxed by the ADC @@ -1942,14 +1953,11 @@ static __maybe_unused int at91_adc_resume(struct device *dev) if (!iio_buffer_enabled(indio_dev)) return 0; - /* check if we are enabling triggered buffer or the touchscreen */ - if (at91_adc_current_chan_is_touch(indio_dev)) - return at91_adc_configure_touch(st, true); - else - return at91_adc_configure_trigger(st->trig, true); + ret = at91_adc_buffer_prepare(indio_dev); + if (ret) + goto vref_disable_resume; - /* not needed but more explicit */ - return 0; + return at91_adc_configure_trigger(st->trig, true); vref_disable_resume: regulator_disable(st->vref); diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 0a793e7cd53ee..38d4a910bc525 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -616,8 +616,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev, trig->ops = &at91_adc_trigger_ops; ret = iio_trigger_register(trig); - if (ret) + if (ret) { + iio_trigger_free(trig); return NULL; + } return trig; } diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c index 8b04b95b7b7ae..fa2c87946e16f 100644 --- a/drivers/iio/adc/berlin2-adc.c +++ b/drivers/iio/adc/berlin2-adc.c @@ -289,8 +289,10 @@ static int berlin2_adc_probe(struct platform_device *pdev) int ret; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv)); - if (!indio_dev) + if (!indio_dev) { + of_node_put(parent_np); return -ENOMEM; + } priv = iio_priv(indio_dev); platform_set_drvdata(pdev, indio_dev); diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c index 1adddf5a88a94..61f373fab9a11 100644 --- a/drivers/iio/adc/ltc2497.c +++ b/drivers/iio/adc/ltc2497.c @@ -41,6 +41,19 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata, } *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17); + + /* + * The part started a new conversion at the end of the above i2c + * transfer, so if the address didn't change since the last call + * everything is fine and we can return early. + * If not (which should only happen when some sort of bulk + * conversion is implemented) we have to program the new + * address. Note that this probably fails as the conversion that + * was triggered above is like not complete yet and the two + * operations have to be done in a single transfer. + */ + if (ddata->addr_prev == address) + return 0; } ret = i2c_smbus_write_byte(st->client, diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c index 331a9a7282170..acd9420c04162 100644 --- a/drivers/iio/adc/mp2629_adc.c +++ b/drivers/iio/adc/mp2629_adc.c @@ -56,7 +56,8 @@ static struct iio_map mp2629_adc_maps[] = { MP2629_MAP(SYSTEM_VOLT, "system-volt"), MP2629_MAP(INPUT_VOLT, "input-volt"), MP2629_MAP(BATT_CURRENT, "batt-current"), - MP2629_MAP(INPUT_CURRENT, "input-current") + MP2629_MAP(INPUT_CURRENT, "input-current"), + { } }; static int mp2629_read_raw(struct iio_dev *indio_dev, @@ -73,7 +74,7 @@ static int mp2629_read_raw(struct iio_dev *indio_dev, if (ret) return ret; - if (chan->address == MP2629_INPUT_VOLT) + if (chan->channel == MP2629_INPUT_VOLT) rval &= GENMASK(6, 0); *val = rval; return IIO_VAL_INT; diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 9234f14167b7a..171d73efb2f88 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -1521,6 +1521,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = { }, {} }; +MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match); static int stm32_dfsdm_adc_probe(struct platform_device *pdev) { diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c index 83c1ae07b3e9a..8618ae7bc0671 100644 --- a/drivers/iio/adc/ti-adc128s052.c +++ b/drivers/iio/adc/ti-adc128s052.c @@ -193,13 +193,13 @@ static int adc128_remove(struct spi_device *spi) } static const struct of_device_id adc128_of_match[] = { - { .compatible = "ti,adc128s052", }, - { .compatible = "ti,adc122s021", }, - { .compatible = "ti,adc122s051", }, - { .compatible = "ti,adc122s101", }, - { .compatible = "ti,adc124s021", }, - { .compatible = "ti,adc124s051", }, - { .compatible = "ti,adc124s101", }, + { .compatible = "ti,adc128s052", .data = (void*)0L, }, + { .compatible = "ti,adc122s021", .data = (void*)1L, }, + { .compatible = "ti,adc122s051", .data = (void*)1L, }, + { .compatible = "ti,adc122s101", .data = (void*)1L, }, + { .compatible = "ti,adc124s021", .data = (void*)2L, }, + { .compatible = "ti,adc124s051", .data = (void*)2L, }, + { .compatible = "ti,adc124s101", .data = (void*)2L, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, adc128_of_match); diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index 256177b15c511..024bdc1ef77e6 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -57,6 +57,18 @@ #define TWL6030_GPADCS BIT(1) #define TWL6030_GPADCR BIT(0) +#define USB_VBUS_CTRL_SET 0x04 +#define USB_ID_CTRL_SET 0x06 + +#define TWL6030_MISC1 0xE4 +#define VBUS_MEAS 0x01 +#define ID_MEAS 0x01 + +#define VAC_MEAS 0x04 +#define VBAT_MEAS 0x02 +#define BB_MEAS 0x01 + + /** * struct twl6030_chnl_calib - channel calibration * @gain: slope coefficient for ideal curve @@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev) return ret; } + ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET); + if (ret < 0) { + dev_err(dev, "failed to wire up inputs\n"); + return ret; + } + + ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET); + if (ret < 0) { + dev_err(dev, "failed to wire up inputs\n"); + return ret; + } + + ret = twl_i2c_write_u8(TWL6030_MODULE_ID0, + VBAT_MEAS | BB_MEAS | VAC_MEAS, + TWL6030_MISC1); + if (ret < 0) { + dev_err(dev, "failed to wire up inputs\n"); + return ret; + } + indio_dev->name = DRIVER_NAME; indio_dev->info = &twl6030_gpadc_iio_info; indio_dev->modes = INDIO_DIRECT_MODE; diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c index 5b4df36fdc2ad..4cc855c781218 100644 --- a/drivers/iio/dac/ad5593r.c +++ b/drivers/iio/dac/ad5593r.c @@ -13,6 +13,8 @@ #include #include +#include + #define AD5593R_MODE_CONF (0 << 4) #define AD5593R_MODE_DAC_WRITE (1 << 4) #define AD5593R_MODE_ADC_READBACK (4 << 4) @@ -20,6 +22,24 @@ #define AD5593R_MODE_GPIO_READBACK (6 << 4) #define AD5593R_MODE_REG_READBACK (7 << 4) +static int ad5593r_read_word(struct i2c_client *i2c, u8 reg, u16 *value) +{ + int ret; + u8 buf[2]; + + ret = i2c_smbus_write_byte(i2c, reg); + if (ret < 0) + return ret; + + ret = i2c_master_recv(i2c, buf, sizeof(buf)); + if (ret < 0) + return ret; + + *value = get_unaligned_be16(buf); + + return 0; +} + static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value) { struct i2c_client *i2c = to_i2c_client(st->dev); @@ -38,13 +58,7 @@ static int ad5593r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value) if (val < 0) return (int) val; - val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK); - if (val < 0) - return (int) val; - - *value = (u16) val; - - return 0; + return ad5593r_read_word(i2c, AD5593R_MODE_ADC_READBACK, value); } static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value) @@ -58,25 +72,19 @@ static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value) static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value) { struct i2c_client *i2c = to_i2c_client(st->dev); - s32 val; - - val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg); - if (val < 0) - return (int) val; - *value = (u16) val; - - return 0; + return ad5593r_read_word(i2c, AD5593R_MODE_REG_READBACK | reg, value); } static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value) { struct i2c_client *i2c = to_i2c_client(st->dev); - s32 val; + u16 val; + int ret; - val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK); - if (val < 0) - return (int) val; + ret = ad5593r_read_word(i2c, AD5593R_MODE_GPIO_READBACK, &val); + if (ret) + return ret; *value = (u8) val; diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c index a11ae9db0d11b..74db8edb42834 100644 --- a/drivers/iio/gyro/adis16136.c +++ b/drivers/iio/gyro/adis16136.c @@ -599,3 +599,4 @@ module_spi_driver(adis16136_driver); MODULE_AUTHOR("Lars-Peter Clausen "); MODULE_DESCRIPTION("Analog Devices ADIS16133/ADIS16135/ADIS16136 gyroscope driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c index e7c9a3e31c45a..1e45d93de5b7c 100644 --- a/drivers/iio/gyro/adis16260.c +++ b/drivers/iio/gyro/adis16260.c @@ -438,3 +438,4 @@ module_spi_driver(adis16260_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 38734e4ce3605..82d01ac36128b 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -245,14 +245,14 @@ static int afe4403_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct afe4403_data *afe = iio_priv(indio_dev); - unsigned int reg = afe4403_channel_values[chan->address]; - unsigned int field = afe4403_channel_leds[chan->address]; + unsigned int reg, field; int ret; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_RAW: + reg = afe4403_channel_values[chan->address]; ret = afe4403_read(afe, reg, val); if (ret) return ret; @@ -262,6 +262,7 @@ static int afe4403_read_raw(struct iio_dev *indio_dev, case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + field = afe4403_channel_leds[chan->address]; ret = regmap_field_read(afe->fields[field], val); if (ret) return ret; diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 61fe4932d81d0..0eaa34da59a87 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -250,20 +250,20 @@ static int afe4404_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct afe4404_data *afe = iio_priv(indio_dev); - unsigned int value_reg = afe4404_channel_values[chan->address]; - unsigned int led_field = afe4404_channel_leds[chan->address]; - unsigned int offdac_field = afe4404_channel_offdacs[chan->address]; + unsigned int value_reg, led_field, offdac_field; int ret; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_RAW: + value_reg = afe4404_channel_values[chan->address]; ret = regmap_read(afe->regmap, value_reg, val); if (ret) return ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: + offdac_field = afe4404_channel_offdacs[chan->address]; ret = regmap_field_read(afe->fields[offdac_field], val); if (ret) return ret; @@ -273,6 +273,7 @@ static int afe4404_read_raw(struct iio_dev *indio_dev, case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + led_field = afe4404_channel_leds[chan->address]; ret = regmap_field_read(afe->fields[led_field], val); if (ret) return ret; @@ -295,19 +296,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct afe4404_data *afe = iio_priv(indio_dev); - unsigned int led_field = afe4404_channel_leds[chan->address]; - unsigned int offdac_field = afe4404_channel_offdacs[chan->address]; + unsigned int led_field, offdac_field; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_OFFSET: + offdac_field = afe4404_channel_offdacs[chan->address]; return regmap_field_write(afe->fields[offdac_field], val); } break; case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + led_field = afe4404_channel_leds[chan->address]; return regmap_field_write(afe->fields[led_field], val); } break; diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index 715eef81bc248..e9821814afec1 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -34,8 +34,8 @@ * @value: The value to write to device (up to 4 bytes) * @size: The size of the @value (in bytes) */ -int __adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int value, unsigned int size) +int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value, + unsigned int size) { unsigned int page = reg / ADIS_PAGE_SIZE; int ret, i; @@ -118,14 +118,14 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, ret = spi_sync(adis->spi, &msg); if (ret) { dev_err(&adis->spi->dev, "Failed to write register 0x%02X: %d\n", - reg, ret); + reg, ret); } else { adis->current_page = page; } return ret; } -EXPORT_SYMBOL_GPL(__adis_write_reg); +EXPORT_SYMBOL_NS_GPL(__adis_write_reg, IIO_ADISLIB); /** * __adis_read_reg() - read N bytes from register (unlocked version) @@ -134,8 +134,8 @@ EXPORT_SYMBOL_GPL(__adis_write_reg); * @val: The value read back from the device * @size: The size of the @val buffer */ -int __adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size) +int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, + unsigned int size) { unsigned int page = reg / ADIS_PAGE_SIZE; struct spi_message msg; @@ -205,12 +205,12 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, ret = spi_sync(adis->spi, &msg); if (ret) { dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n", - reg, ret); + reg, ret); return ret; - } else { - adis->current_page = page; } + adis->current_page = page; + switch (size) { case 4: *val = get_unaligned_be32(adis->rx); @@ -222,7 +222,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, return ret; } -EXPORT_SYMBOL_GPL(__adis_read_reg); +EXPORT_SYMBOL_NS_GPL(__adis_read_reg, IIO_ADISLIB); /** * __adis_update_bits_base() - ADIS Update bits function - Unlocked version * @adis: The adis device @@ -247,17 +247,17 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, return __adis_write_reg(adis, reg, __val, size); } -EXPORT_SYMBOL_GPL(__adis_update_bits_base); +EXPORT_SYMBOL_NS_GPL(__adis_update_bits_base, IIO_ADISLIB); #ifdef CONFIG_DEBUG_FS -int adis_debugfs_reg_access(struct iio_dev *indio_dev, - unsigned int reg, unsigned int writeval, unsigned int *readval) +int adis_debugfs_reg_access(struct iio_dev *indio_dev, unsigned int reg, + unsigned int writeval, unsigned int *readval) { struct adis *adis = iio_device_get_drvdata(indio_dev); if (readval) { - uint16_t val16; + u16 val16; int ret; ret = adis_read_reg_16(adis, reg, &val16); @@ -265,36 +265,41 @@ int adis_debugfs_reg_access(struct iio_dev *indio_dev, *readval = val16; return ret; - } else { - return adis_write_reg_16(adis, reg, writeval); } + + return adis_write_reg_16(adis, reg, writeval); } -EXPORT_SYMBOL(adis_debugfs_reg_access); +EXPORT_SYMBOL_NS(adis_debugfs_reg_access, IIO_ADISLIB); #endif /** - * adis_enable_irq() - Enable or disable data ready IRQ + * __adis_enable_irq() - Enable or disable data ready IRQ (unlocked) * @adis: The adis device * @enable: Whether to enable the IRQ * * Returns 0 on success, negative error code otherwise */ -int adis_enable_irq(struct adis *adis, bool enable) +int __adis_enable_irq(struct adis *adis, bool enable) { - int ret = 0; - uint16_t msc; + int ret; + u16 msc; - mutex_lock(&adis->state_lock); + if (adis->data->enable_irq) + return adis->data->enable_irq(adis, enable); - if (adis->data->enable_irq) { - ret = adis->data->enable_irq(adis, enable); - goto out_unlock; + if (adis->data->unmasked_drdy) { + if (enable) + enable_irq(adis->spi->irq); + else + disable_irq(adis->spi->irq); + + return 0; } ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc); if (ret) - goto out_unlock; + return ret; msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH; msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2; @@ -303,13 +308,9 @@ int adis_enable_irq(struct adis *adis, bool enable) else msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN; - ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc); - -out_unlock: - mutex_unlock(&adis->state_lock); - return ret; + return __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc); } -EXPORT_SYMBOL(adis_enable_irq); +EXPORT_SYMBOL_NS(__adis_enable_irq, IIO_ADISLIB); /** * __adis_check_status() - Check the device for error conditions (unlocked) @@ -319,7 +320,7 @@ EXPORT_SYMBOL(adis_enable_irq); */ int __adis_check_status(struct adis *adis) { - uint16_t status; + u16 status; int ret; int i; @@ -341,7 +342,7 @@ int __adis_check_status(struct adis *adis) return -EIO; } -EXPORT_SYMBOL_GPL(__adis_check_status); +EXPORT_SYMBOL_NS_GPL(__adis_check_status, IIO_ADISLIB); /** * __adis_reset() - Reset the device (unlocked version) @@ -355,7 +356,7 @@ int __adis_reset(struct adis *adis) const struct adis_timeout *timeouts = adis->data->timeouts; ret = __adis_write_reg_8(adis, adis->data->glob_cmd_reg, - ADIS_GLOB_CMD_SW_RESET); + ADIS_GLOB_CMD_SW_RESET); if (ret) { dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret); return ret; @@ -365,7 +366,7 @@ int __adis_reset(struct adis *adis) return 0; } -EXPORT_SYMBOL_GPL(__adis_reset); +EXPORT_SYMBOL_NS_GPL(__adis_reset, IIO_ADIS_LIB); static int adis_self_test(struct adis *adis) { @@ -411,7 +412,7 @@ int __adis_initial_startup(struct adis *adis) { const struct adis_timeout *timeouts = adis->data->timeouts; struct gpio_desc *gpio; - uint16_t prod_id; + u16 prod_id; int ret; /* check if the device has rst pin low */ @@ -420,7 +421,7 @@ int __adis_initial_startup(struct adis *adis) return PTR_ERR(gpio); if (gpio) { - msleep(10); + usleep_range(10, 12); /* bring device out of reset */ gpiod_set_value_cansleep(gpio, 0); msleep(timeouts->reset_ms); @@ -434,7 +435,13 @@ int __adis_initial_startup(struct adis *adis) if (ret) return ret; - adis_enable_irq(adis, false); + /* + * don't bother calling this if we can't unmask the IRQ as in this case + * the IRQ is most likely not yet requested and we will request it + * with 'IRQF_NO_AUTOEN' anyways. + */ + if (!adis->data->unmasked_drdy) + __adis_enable_irq(adis, false); if (!adis->data->prod_id_reg) return 0; @@ -450,7 +457,7 @@ int __adis_initial_startup(struct adis *adis) return 0; } -EXPORT_SYMBOL_GPL(__adis_initial_startup); +EXPORT_SYMBOL_NS_GPL(__adis_initial_startup, IIO_ADISLIB); /** * adis_single_conversion() - Performs a single sample conversion @@ -468,7 +475,8 @@ EXPORT_SYMBOL_GPL(__adis_initial_startup); * a error bit in the channels raw value set error_mask to 0. */ int adis_single_conversion(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, unsigned int error_mask, int *val) + const struct iio_chan_spec *chan, + unsigned int error_mask, int *val) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int uval; @@ -477,7 +485,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, mutex_lock(&adis->state_lock); ret = __adis_read_reg(adis, chan->address, &uval, - chan->scan_type.storagebits / 8); + chan->scan_type.storagebits / 8); if (ret) goto err_unlock; @@ -497,7 +505,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, mutex_unlock(&adis->state_lock); return ret; } -EXPORT_SYMBOL_GPL(adis_single_conversion); +EXPORT_SYMBOL_NS_GPL(adis_single_conversion, IIO_ADISLIB); /** * adis_init() - Initialize adis device structure @@ -512,7 +520,7 @@ EXPORT_SYMBOL_GPL(adis_single_conversion); * called. */ int adis_init(struct adis *adis, struct iio_dev *indio_dev, - struct spi_device *spi, const struct adis_data *data) + struct spi_device *spi, const struct adis_data *data) { if (!data || !data->timeouts) { dev_err(&spi->dev, "No config data or timeouts not defined!\n"); @@ -534,7 +542,7 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev, return 0; } -EXPORT_SYMBOL_GPL(adis_init); +EXPORT_SYMBOL_NS_GPL(adis_init, IIO_ADISLIB); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lars-Peter Clausen "); diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c index 4aff16466da02..c5255116954a7 100644 --- a/drivers/iio/imu/adis16400.c +++ b/drivers/iio/imu/adis16400.c @@ -1252,3 +1252,4 @@ module_spi_driver(adis16400_driver); MODULE_AUTHOR("Manuel Stahl "); MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c index 74a161e397332..a28143a19d3a4 100644 --- a/drivers/iio/imu/adis16460.c +++ b/drivers/iio/imu/adis16460.c @@ -403,12 +403,12 @@ static int adis16460_probe(struct spi_device *spi) if (ret) return ret; + /* We cannot mask the interrupt, so ensure it isn't auto enabled */ + st->adis.irq_flag |= IRQF_NO_AUTOEN; ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL); if (ret) return ret; - adis16460_enable_irq(&st->adis, 0); - ret = __adis_initial_startup(&st->adis); if (ret) return ret; @@ -447,3 +447,4 @@ module_spi_driver(adis16460_driver); MODULE_AUTHOR("Dragos Bogdan "); MODULE_DESCRIPTION("Analog Devices ADIS16460 IMU driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c index 3c4e4deb87608..aed1cf3bfa13c 100644 --- a/drivers/iio/imu/adis16475.c +++ b/drivers/iio/imu/adis16475.c @@ -1196,6 +1196,9 @@ static int adis16475_config_irq_pin(struct adis16475 *st) return -EINVAL; } + /* We cannot mask the interrupt so ensure it's not enabled at request */ + st->adis.irq_flag |= IRQF_NO_AUTOEN; + val = ADIS16475_MSG_CTRL_DR_POL(polarity); ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL, ADIS16475_MSG_CTRL_DR_POL_MASK, val); @@ -1300,8 +1303,6 @@ static int adis16475_probe(struct spi_device *spi) if (ret) return ret; - adis16475_enable_irq(&st->adis, false); - ret = devm_iio_device_register(&spi->dev, indio_dev); if (ret) return ret; @@ -1323,3 +1324,4 @@ module_spi_driver(adis16475_driver); MODULE_AUTHOR("Nuno Sa "); MODULE_DESCRIPTION("Analog Devices ADIS16475 IMU driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index dfe86c5893254..c6a3d9a04fce3 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -1340,3 +1340,4 @@ module_spi_driver(adis16480_driver); MODULE_AUTHOR("Lars-Peter Clausen "); MODULE_DESCRIPTION("Analog Devices ADIS16480 IMU driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c index 175af154e4437..7cc1145910f68 100644 --- a/drivers/iio/imu/adis_buffer.c +++ b/drivers/iio/imu/adis_buffer.c @@ -20,7 +20,7 @@ #include static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, - const unsigned long *scan_mask) + const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int burst_length, burst_max_length; @@ -63,7 +63,7 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, } int adis_update_scan_mode(struct iio_dev *indio_dev, - const unsigned long *scan_mask) + const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); const struct iio_chan_spec *chan; @@ -120,7 +120,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, return 0; } -EXPORT_SYMBOL_GPL(adis_update_scan_mode); +EXPORT_SYMBOL_NS_GPL(adis_update_scan_mode, IIO_ADISLIB); static irqreturn_t adis_trigger_handler(int irq, void *p) { @@ -149,7 +149,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, - pf->timestamp); + pf->timestamp); iio_trigger_notify_done(indio_dev->trig); @@ -202,5 +202,5 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, return devm_add_action_or_reset(&adis->spi->dev, adis_buffer_cleanup, adis); } -EXPORT_SYMBOL_GPL(devm_adis_setup_buffer_and_trigger); +EXPORT_SYMBOL_NS_GPL(devm_adis_setup_buffer_and_trigger, IIO_ADISLIB); diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index 64e0ba51cb18e..80adfa58e50cb 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c @@ -15,8 +15,7 @@ #include #include -static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig, - bool state) +static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct adis *adis = iio_trigger_get_drvdata(trig); @@ -36,18 +35,23 @@ static void adis_trigger_setup(struct adis *adis) static int adis_validate_irq_flag(struct adis *adis) { + unsigned long direction = adis->irq_flag & IRQF_TRIGGER_MASK; + + /* We cannot mask the interrupt so ensure it's not enabled at request */ + if (adis->data->unmasked_drdy) + adis->irq_flag |= IRQF_NO_AUTOEN; /* * Typically this devices have data ready either on the rising edge or * on the falling edge of the data ready pin. This checks enforces that * one of those is set in the drivers... It defaults to - * IRQF_TRIGGER_RISING for backward compatibility wiht devices that + * IRQF_TRIGGER_RISING for backward compatibility with devices that * don't support changing the pin polarity. */ - if (!adis->irq_flag) { - adis->irq_flag = IRQF_TRIGGER_RISING; + if (direction == IRQF_TRIGGER_NONE) { + adis->irq_flag |= IRQF_TRIGGER_RISING; return 0; - } else if (adis->irq_flag != IRQF_TRIGGER_RISING && - adis->irq_flag != IRQF_TRIGGER_FALLING) { + } else if (direction != IRQF_TRIGGER_RISING && + direction != IRQF_TRIGGER_FALLING) { dev_err(&adis->spi->dev, "Invalid IRQ mask: %08lx\n", adis->irq_flag); return -EINVAL; @@ -88,5 +92,5 @@ int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) return devm_iio_trigger_register(&adis->spi->dev, adis->trig); } -EXPORT_SYMBOL_GPL(devm_adis_probe_trigger); +EXPORT_SYMBOL_NS_GPL(devm_adis_probe_trigger, IIO_ADISLIB); diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c index ab288186f36e4..04d3778fcc153 100644 --- a/drivers/iio/imu/fxos8700_core.c +++ b/drivers/iio/imu/fxos8700_core.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -144,9 +145,8 @@ #define FXOS8700_NVM_DATA_BNK0 0xa7 /* Bit definitions for FXOS8700_CTRL_REG1 */ -#define FXOS8700_CTRL_ODR_MSK 0x38 #define FXOS8700_CTRL_ODR_MAX 0x00 -#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3) +#define FXOS8700_CTRL_ODR_MSK GENMASK(5, 3) /* Bit definitions for FXOS8700_M_CTRL_REG1 */ #define FXOS8700_HMS_MASK GENMASK(1, 0) @@ -320,7 +320,7 @@ static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type) switch (iio_type) { case IIO_ACCEL: return FXOS8700_ACCEL; - case IIO_ANGL_VEL: + case IIO_MAGN: return FXOS8700_MAGN; default: return -EINVAL; @@ -345,15 +345,35 @@ static int fxos8700_set_active_mode(struct fxos8700_data *data, static int fxos8700_set_scale(struct fxos8700_data *data, enum fxos8700_sensor t, int uscale) { - int i; + int i, ret, val; + bool active_mode; static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale); struct device *dev = regmap_get_device(data->regmap); if (t == FXOS8700_MAGN) { - dev_err(dev, "Magnetometer scale is locked at 1200uT\n"); + dev_err(dev, "Magnetometer scale is locked at 0.001Gs\n"); return -EINVAL; } + /* + * When device is in active mode, it failed to set an ACCEL + * full-scale range(2g/4g/8g) in FXOS8700_XYZ_DATA_CFG. + * This is not align with the datasheet, but it is a fxos8700 + * chip behavier. Set the device in standby mode before setting + * an ACCEL full-scale range. + */ + ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val); + if (ret) + return ret; + + active_mode = val & FXOS8700_ACTIVE; + if (active_mode) { + ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, + val & ~FXOS8700_ACTIVE); + if (ret) + return ret; + } + for (i = 0; i < scale_num; i++) if (fxos8700_accel_scale[i].uscale == uscale) break; @@ -361,8 +381,12 @@ static int fxos8700_set_scale(struct fxos8700_data *data, if (i == scale_num) return -EINVAL; - return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, + ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, fxos8700_accel_scale[i].bits); + if (ret) + return ret; + return regmap_write(data->regmap, FXOS8700_CTRL_REG1, + active_mode); } static int fxos8700_get_scale(struct fxos8700_data *data, @@ -372,7 +396,7 @@ static int fxos8700_get_scale(struct fxos8700_data *data, static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale); if (t == FXOS8700_MAGN) { - *uscale = 1200; /* Magnetometer is locked at 1200uT */ + *uscale = 1000; /* Magnetometer is locked at 0.001Gs */ return 0; } @@ -394,22 +418,61 @@ static int fxos8700_get_data(struct fxos8700_data *data, int chan_type, int axis, int *val) { u8 base, reg; + s16 tmp; int ret; - enum fxos8700_sensor type = fxos8700_to_sensor(chan_type); - base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB; + /* + * Different register base addresses varies with channel types. + * This bug hasn't been noticed before because using an enum is + * really hard to read. Use an a switch statement to take over that. + */ + switch (chan_type) { + case IIO_ACCEL: + base = FXOS8700_OUT_X_MSB; + break; + case IIO_MAGN: + base = FXOS8700_M_OUT_X_MSB; + break; + default: + return -EINVAL; + } /* Block read 6 bytes of device output registers to avoid data loss */ ret = regmap_bulk_read(data->regmap, base, data->buf, - FXOS8700_DATA_BUF_SIZE); + sizeof(data->buf)); if (ret) return ret; /* Convert axis to buffer index */ reg = axis - IIO_MOD_X; + /* + * Convert to native endianness. The accel data and magn data + * are signed, so a forced type conversion is needed. + */ + tmp = be16_to_cpu(data->buf[reg]); + + /* + * ACCEL output data registers contain the X-axis, Y-axis, and Z-axis + * 14-bit left-justified sample data and MAGN output data registers + * contain the X-axis, Y-axis, and Z-axis 16-bit sample data. Apply + * a signed 2 bits right shift to the readback raw data from ACCEL + * output data register and keep that from MAGN sensor as the origin. + * Value should be extended to 32 bit. + */ + switch (chan_type) { + case IIO_ACCEL: + tmp = tmp >> 2; + break; + case IIO_MAGN: + /* Nothing to do */ + break; + default: + return -EINVAL; + } + /* Convert to native endianness */ - *val = sign_extend32(be16_to_cpu(data->buf[reg]), 15); + *val = sign_extend32(tmp, 15); return 0; } @@ -445,10 +508,9 @@ static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t, if (i >= odr_num) return -EINVAL; - return regmap_update_bits(data->regmap, - FXOS8700_CTRL_REG1, - FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE, - fxos8700_odr[i].bits << 3 | active_mode); + val &= ~FXOS8700_CTRL_ODR_MSK; + val |= FIELD_PREP(FXOS8700_CTRL_ODR_MSK, fxos8700_odr[i].bits) | FXOS8700_ACTIVE; + return regmap_write(data->regmap, FXOS8700_CTRL_REG1, val); } static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t, @@ -461,7 +523,7 @@ static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t, if (ret) return ret; - val &= FXOS8700_CTRL_ODR_MSK; + val = FIELD_GET(FXOS8700_CTRL_ODR_MSK, val); for (i = 0; i < odr_num; i++) if (val == fxos8700_odr[i].bits) @@ -526,7 +588,7 @@ static IIO_CONST_ATTR(in_accel_sampling_frequency_available, static IIO_CONST_ATTR(in_magn_sampling_frequency_available, "1.5625 6.25 12.5 50 100 200 400 800"); static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976"); -static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200"); +static IIO_CONST_ATTR(in_magn_scale_available, "0.001000"); static struct attribute *fxos8700_attrs[] = { &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr, @@ -592,14 +654,19 @@ static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi) if (ret) return ret; - /* Max ODR (800Hz individual or 400Hz hybrid), active mode */ - ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, - FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE); + /* + * Set max full-scale range (+/-8G) for ACCEL sensor in chip + * initialization then activate the device. + */ + ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G); if (ret) return ret; - /* Set for max full-scale range (+/-8G) */ - return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G); + /* Max ODR (800Hz individual or 400Hz hybrid), active mode */ + return regmap_update_bits(data->regmap, FXOS8700_CTRL_REG1, + FXOS8700_CTRL_ODR_MSK | FXOS8700_ACTIVE, + FIELD_PREP(FXOS8700_CTRL_ODR_MSK, FXOS8700_CTRL_ODR_MAX) | + FXOS8700_ACTIVE); } static void fxos8700_chip_uninit(void *data) diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c index 9ae793a70b8bf..a7714d32a6418 100644 --- a/drivers/iio/industrialio-sw-trigger.c +++ b/drivers/iio/industrialio-sw-trigger.c @@ -58,8 +58,12 @@ int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t) t->group = configfs_register_default_group(iio_triggers_group, t->name, &iio_trigger_type_group_type); - if (IS_ERR(t->group)) + if (IS_ERR(t->group)) { + mutex_lock(&iio_trigger_types_lock); + list_del(&t->list); + mutex_unlock(&iio_trigger_types_lock); ret = PTR_ERR(t->group); + } return ret; } diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 8c3faa7972842..c32b2577dd991 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -136,9 +136,10 @@ static int __of_iio_channel_get(struct iio_channel *channel, idev = bus_find_device(&iio_bus_type, NULL, iiospec.np, iio_dev_node_match); - of_node_put(iiospec.np); - if (idev == NULL) + if (idev == NULL) { + of_node_put(iiospec.np); return -EPROBE_DEFER; + } indio_dev = dev_to_iio_dev(idev); channel->indio_dev = indio_dev; @@ -146,6 +147,7 @@ static int __of_iio_channel_get(struct iio_channel *channel, index = indio_dev->info->of_xlate(indio_dev, &iiospec); else index = __of_iio_simple_xlate(indio_dev, &iiospec); + of_node_put(iiospec.np); if (index < 0) goto err_put; channel->channel = &indio_dev->channels[index]; diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 917f9becf9c75..dd52eff9ba2a3 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -294,6 +294,8 @@ config RPR0521 tristate "ROHM RPR0521 ALS and proximity sensor driver" depends on I2C select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say Y here if you want to build support for ROHM's RPR0521 ambient light and proximity sensor device. diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index 9afb3fcc74e62..4a7ccf268ebf4 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -53,9 +53,6 @@ #define APDS9960_REG_CONTROL_PGAIN_MASK_SHIFT 2 #define APDS9960_REG_CONFIG_2 0x90 -#define APDS9960_REG_CONFIG_2_GGAIN_MASK 0x60 -#define APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT 5 - #define APDS9960_REG_ID 0x92 #define APDS9960_REG_STATUS 0x93 @@ -76,6 +73,9 @@ #define APDS9960_REG_GCONF_1_GFIFO_THRES_MASK_SHIFT 6 #define APDS9960_REG_GCONF_2 0xa3 +#define APDS9960_REG_GCONF_2_GGAIN_MASK 0x60 +#define APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT 5 + #define APDS9960_REG_GOFFSET_U 0xa4 #define APDS9960_REG_GOFFSET_D 0xa5 #define APDS9960_REG_GPULSE 0xa6 @@ -395,9 +395,9 @@ static int apds9960_set_pxs_gain(struct apds9960_data *data, int val) } ret = regmap_update_bits(data->regmap, - APDS9960_REG_CONFIG_2, - APDS9960_REG_CONFIG_2_GGAIN_MASK, - idx << APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT); + APDS9960_REG_GCONF_2, + APDS9960_REG_GCONF_2_GGAIN_MASK, + idx << APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT); if (!ret) data->pxs_gain = idx; mutex_unlock(&data->lock); diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c index 40b7dd266b314..e39d512145a67 100644 --- a/drivers/iio/light/tsl2583.c +++ b/drivers/iio/light/tsl2583.c @@ -856,7 +856,7 @@ static int tsl2583_probe(struct i2c_client *clientp, TSL2583_POWER_OFF_DELAY_MS); pm_runtime_use_autosuspend(&clientp->dev); - ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev); + ret = iio_device_register(indio_dev); if (ret) { dev_err(&clientp->dev, "%s: iio registration failed\n", __func__); diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c index 0730380ceb692..cf8b92fae1b3d 100644 --- a/drivers/iio/pressure/dps310.c +++ b/drivers/iio/pressure/dps310.c @@ -89,6 +89,7 @@ struct dps310_data { s32 c00, c10, c20, c30, c01, c11, c21; s32 pressure_raw; s32 temp_raw; + bool timeout_recovery_failed; }; static const struct iio_chan_spec dps310_channels[] = { @@ -159,6 +160,102 @@ static int dps310_get_coefs(struct dps310_data *data) return 0; } +/* + * Some versions of the chip will read temperatures in the ~60C range when + * it's actually ~20C. This is the manufacturer recommended workaround + * to correct the issue. The registers used below are undocumented. + */ +static int dps310_temp_workaround(struct dps310_data *data) +{ + int rc; + int reg; + + rc = regmap_read(data->regmap, 0x32, ®); + if (rc) + return rc; + + /* + * If bit 1 is set then the device is okay, and the workaround does not + * need to be applied + */ + if (reg & BIT(1)) + return 0; + + rc = regmap_write(data->regmap, 0x0e, 0xA5); + if (rc) + return rc; + + rc = regmap_write(data->regmap, 0x0f, 0x96); + if (rc) + return rc; + + rc = regmap_write(data->regmap, 0x62, 0x02); + if (rc) + return rc; + + rc = regmap_write(data->regmap, 0x0e, 0x00); + if (rc) + return rc; + + return regmap_write(data->regmap, 0x0f, 0x00); +} + +static int dps310_startup(struct dps310_data *data) +{ + int rc; + int ready; + + /* + * Set up pressure sensor in single sample, one measurement per second + * mode + */ + rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0); + if (rc) + return rc; + + /* + * Set up external (MEMS) temperature sensor in single sample, one + * measurement per second mode + */ + rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT); + if (rc) + return rc; + + /* Temp and pressure shifts are disabled when PRC <= 8 */ + rc = regmap_write_bits(data->regmap, DPS310_CFG_REG, + DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0); + if (rc) + return rc; + + /* MEAS_CFG doesn't update correctly unless first written with 0 */ + rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG, + DPS310_MEAS_CTRL_BITS, 0); + if (rc) + return rc; + + /* Turn on temperature and pressure measurement in the background */ + rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG, + DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN | + DPS310_TEMP_EN | DPS310_BACKGROUND); + if (rc) + return rc; + + /* + * Calibration coefficients required for reporting temperature. + * They are available 40ms after the device has started + */ + rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, + ready & DPS310_COEF_RDY, 10000, 40000); + if (rc) + return rc; + + rc = dps310_get_coefs(data); + if (rc) + return rc; + + return dps310_temp_workaround(data); +} + static int dps310_get_pres_precision(struct dps310_data *data) { int rc; @@ -297,11 +394,69 @@ static int dps310_get_temp_k(struct dps310_data *data) return scale_factors[ilog2(rc)]; } +static int dps310_reset_wait(struct dps310_data *data) +{ + int rc; + + rc = regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC); + if (rc) + return rc; + + /* Wait for device chip access: 2.5ms in specification */ + usleep_range(2500, 12000); + return 0; +} + +static int dps310_reset_reinit(struct dps310_data *data) +{ + int rc; + + rc = dps310_reset_wait(data); + if (rc) + return rc; + + return dps310_startup(data); +} + +static int dps310_ready_status(struct dps310_data *data, int ready_bit, int timeout) +{ + int sleep = DPS310_POLL_SLEEP_US(timeout); + int ready; + + return regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, ready & ready_bit, + sleep, timeout); +} + +static int dps310_ready(struct dps310_data *data, int ready_bit, int timeout) +{ + int rc; + + rc = dps310_ready_status(data, ready_bit, timeout); + if (rc) { + if (rc == -ETIMEDOUT && !data->timeout_recovery_failed) { + /* Reset and reinitialize the chip. */ + if (dps310_reset_reinit(data)) { + data->timeout_recovery_failed = true; + } else { + /* Try again to get sensor ready status. */ + if (dps310_ready_status(data, ready_bit, timeout)) + data->timeout_recovery_failed = true; + else + return 0; + } + } + + return rc; + } + + data->timeout_recovery_failed = false; + return 0; +} + static int dps310_read_pres_raw(struct dps310_data *data) { int rc; int rate; - int ready; int timeout; s32 raw; u8 val[3]; @@ -313,9 +468,7 @@ static int dps310_read_pres_raw(struct dps310_data *data) timeout = DPS310_POLL_TIMEOUT_US(rate); /* Poll for sensor readiness; base the timeout upon the sample rate. */ - rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, - ready & DPS310_PRS_RDY, - DPS310_POLL_SLEEP_US(timeout), timeout); + rc = dps310_ready(data, DPS310_PRS_RDY, timeout); if (rc) goto done; @@ -352,7 +505,6 @@ static int dps310_read_temp_raw(struct dps310_data *data) { int rc; int rate; - int ready; int timeout; if (mutex_lock_interruptible(&data->lock)) @@ -362,10 +514,8 @@ static int dps310_read_temp_raw(struct dps310_data *data) timeout = DPS310_POLL_TIMEOUT_US(rate); /* Poll for sensor readiness; base the timeout upon the sample rate. */ - rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, - ready & DPS310_TMP_RDY, - DPS310_POLL_SLEEP_US(timeout), timeout); - if (rc < 0) + rc = dps310_ready(data, DPS310_TMP_RDY, timeout); + if (rc) goto done; rc = dps310_read_temp_ready(data); @@ -660,7 +810,7 @@ static void dps310_reset(void *action_data) { struct dps310_data *data = action_data; - regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC); + dps310_reset_wait(data); } static const struct regmap_config dps310_regmap_config = { @@ -677,52 +827,12 @@ static const struct iio_info dps310_info = { .write_raw = dps310_write_raw, }; -/* - * Some verions of chip will read temperatures in the ~60C range when - * its actually ~20C. This is the manufacturer recommended workaround - * to correct the issue. The registers used below are undocumented. - */ -static int dps310_temp_workaround(struct dps310_data *data) -{ - int rc; - int reg; - - rc = regmap_read(data->regmap, 0x32, ®); - if (rc < 0) - return rc; - - /* - * If bit 1 is set then the device is okay, and the workaround does not - * need to be applied - */ - if (reg & BIT(1)) - return 0; - - rc = regmap_write(data->regmap, 0x0e, 0xA5); - if (rc < 0) - return rc; - - rc = regmap_write(data->regmap, 0x0f, 0x96); - if (rc < 0) - return rc; - - rc = regmap_write(data->regmap, 0x62, 0x02); - if (rc < 0) - return rc; - - rc = regmap_write(data->regmap, 0x0e, 0x00); - if (rc < 0) - return rc; - - return regmap_write(data->regmap, 0x0f, 0x00); -} - static int dps310_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct dps310_data *data; struct iio_dev *iio; - int rc, ready; + int rc; iio = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!iio) @@ -747,54 +857,8 @@ static int dps310_probe(struct i2c_client *client, if (rc) return rc; - /* - * Set up pressure sensor in single sample, one measurement per second - * mode - */ - rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0); - - /* - * Set up external (MEMS) temperature sensor in single sample, one - * measurement per second mode - */ - rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT); - if (rc < 0) - return rc; - - /* Temp and pressure shifts are disabled when PRC <= 8 */ - rc = regmap_write_bits(data->regmap, DPS310_CFG_REG, - DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0); - if (rc < 0) - return rc; - - /* MEAS_CFG doesn't update correctly unless first written with 0 */ - rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG, - DPS310_MEAS_CTRL_BITS, 0); - if (rc < 0) - return rc; - - /* Turn on temperature and pressure measurement in the background */ - rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG, - DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN | - DPS310_TEMP_EN | DPS310_BACKGROUND); - if (rc < 0) - return rc; - - /* - * Calibration coefficients required for reporting temperature. - * They are available 40ms after the device has started - */ - rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, - ready & DPS310_COEF_RDY, 10000, 40000); - if (rc < 0) - return rc; - - rc = dps310_get_coefs(data); - if (rc < 0) - return rc; - - rc = dps310_temp_workaround(data); - if (rc < 0) + rc = dps310_startup(data); + if (rc) return rc; rc = devm_iio_device_register(&client->dev, iio); diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h index bc06271fa38bc..5e2d2d4d87b56 100644 --- a/drivers/iio/pressure/ms5611.h +++ b/drivers/iio/pressure/ms5611.h @@ -25,13 +25,6 @@ enum { MS5607, }; -struct ms5611_chip_info { - u16 prom[MS5611_PROM_WORDS_NB]; - - int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info, - s32 *temp, s32 *pressure); -}; - /* * OverSampling Rate descriptor. * Warning: cmd MUST be kept aligned on a word boundary (see @@ -50,12 +43,15 @@ struct ms5611_state { const struct ms5611_osr *pressure_osr; const struct ms5611_osr *temp_osr; - int (*reset)(struct device *dev); - int (*read_prom_word)(struct device *dev, int index, u16 *word); - int (*read_adc_temp_and_pressure)(struct device *dev, + u16 prom[MS5611_PROM_WORDS_NB]; + + int (*reset)(struct ms5611_state *st); + int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word); + int (*read_adc_temp_and_pressure)(struct ms5611_state *st, s32 *temp, s32 *pressure); - struct ms5611_chip_info *chip_info; + int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp, + s32 *pressure); struct regulator *vdd; }; diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 214b0d25f5980..874a73b3ea9d6 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -85,8 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev) struct ms5611_state *st = iio_priv(indio_dev); for (i = 0; i < MS5611_PROM_WORDS_NB; i++) { - ret = st->read_prom_word(&indio_dev->dev, - i, &st->chip_info->prom[i]); + ret = st->read_prom_word(st, i, &st->prom[i]); if (ret < 0) { dev_err(&indio_dev->dev, "failed to read prom at %d\n", i); @@ -94,7 +93,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev) } } - if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) { + if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) { dev_err(&indio_dev->dev, "PROM integrity check failed\n"); return -ENODEV; } @@ -108,28 +107,27 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev, int ret; struct ms5611_state *st = iio_priv(indio_dev); - ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure); + ret = st->read_adc_temp_and_pressure(st, temp, pressure); if (ret < 0) { dev_err(&indio_dev->dev, "failed to read temperature and pressure\n"); return ret; } - return st->chip_info->temp_and_pressure_compensate(st->chip_info, - temp, pressure); + return st->compensate_temp_and_pressure(st, temp, pressure); } -static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info, +static int ms5611_temp_and_pressure_compensate(struct ms5611_state *st, s32 *temp, s32 *pressure) { s32 t = *temp, p = *pressure; s64 off, sens, dt; - dt = t - (chip_info->prom[5] << 8); - off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7); - sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8); + dt = t - (st->prom[5] << 8); + off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7); + sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8); - t = 2000 + ((chip_info->prom[6] * dt) >> 23); + t = 2000 + ((st->prom[6] * dt) >> 23); if (t < 2000) { s64 off2, sens2, t2; @@ -155,17 +153,17 @@ static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf return 0; } -static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info, +static int ms5607_temp_and_pressure_compensate(struct ms5611_state *st, s32 *temp, s32 *pressure) { s32 t = *temp, p = *pressure; s64 off, sens, dt; - dt = t - (chip_info->prom[5] << 8); - off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6); - sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7); + dt = t - (st->prom[5] << 8); + off = ((s64)st->prom[2] << 17) + ((st->prom[4] * dt) >> 6); + sens = ((s64)st->prom[1] << 16) + ((st->prom[3] * dt) >> 7); - t = 2000 + ((chip_info->prom[6] * dt) >> 23); + t = 2000 + ((st->prom[6] * dt) >> 23); if (t < 2000) { s64 off2, sens2, t2, tmp; @@ -196,7 +194,7 @@ static int ms5611_reset(struct iio_dev *indio_dev) int ret; struct ms5611_state *st = iio_priv(indio_dev); - ret = st->reset(&indio_dev->dev); + ret = st->reset(st); if (ret < 0) { dev_err(&indio_dev->dev, "failed to reset device\n"); return ret; @@ -343,15 +341,6 @@ static int ms5611_write_raw(struct iio_dev *indio_dev, static const unsigned long ms5611_scan_masks[] = {0x3, 0}; -static struct ms5611_chip_info chip_info_tbl[] = { - [MS5611] = { - .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate, - }, - [MS5607] = { - .temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate, - } -}; - static const struct iio_chan_spec ms5611_channels[] = { { .type = IIO_PRESSURE, @@ -434,7 +423,20 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, struct ms5611_state *st = iio_priv(indio_dev); mutex_init(&st->lock); - st->chip_info = &chip_info_tbl[type]; + + switch (type) { + case MS5611: + st->compensate_temp_and_pressure = + ms5611_temp_and_pressure_compensate; + break; + case MS5607: + st->compensate_temp_and_pressure = + ms5607_temp_and_pressure_compensate; + break; + default: + return -EINVAL; + } + st->temp_osr = &ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1]; st->pressure_osr = diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c index 7c04f730430c7..cccc40f7df0b9 100644 --- a/drivers/iio/pressure/ms5611_i2c.c +++ b/drivers/iio/pressure/ms5611_i2c.c @@ -20,17 +20,15 @@ #include "ms5611.h" -static int ms5611_i2c_reset(struct device *dev) +static int ms5611_i2c_reset(struct ms5611_state *st) { - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); - return i2c_smbus_write_byte(st->client, MS5611_RESET); } -static int ms5611_i2c_read_prom_word(struct device *dev, int index, u16 *word) +static int ms5611_i2c_read_prom_word(struct ms5611_state *st, int index, + u16 *word) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = i2c_smbus_read_word_swapped(st->client, MS5611_READ_PROM_WORD + (index << 1)); @@ -57,11 +55,10 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val) return 0; } -static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev, +static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st, s32 *temp, s32 *pressure) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); const struct ms5611_osr *osr = st->temp_osr; ret = i2c_smbus_write_byte(st->client, osr->cmd); diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c index 45d3a7d5be8e4..3039fe8aa2a2d 100644 --- a/drivers/iio/pressure/ms5611_spi.c +++ b/drivers/iio/pressure/ms5611_spi.c @@ -15,18 +15,17 @@ #include "ms5611.h" -static int ms5611_spi_reset(struct device *dev) +static int ms5611_spi_reset(struct ms5611_state *st) { u8 cmd = MS5611_RESET; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); return spi_write_then_read(st->client, &cmd, 1, NULL, 0); } -static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word) +static int ms5611_spi_read_prom_word(struct ms5611_state *st, int index, + u16 *word) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = spi_w8r16be(st->client, MS5611_READ_PROM_WORD + (index << 1)); if (ret < 0) @@ -37,11 +36,10 @@ static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word) return 0; } -static int ms5611_spi_read_adc(struct device *dev, s32 *val) +static int ms5611_spi_read_adc(struct ms5611_state *st, s32 *val) { int ret; u8 buf[3] = { MS5611_READ_ADC }; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = spi_write_then_read(st->client, buf, 1, buf, 3); if (ret < 0) @@ -52,11 +50,10 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val) return 0; } -static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, +static int ms5611_spi_read_adc_temp_and_pressure(struct ms5611_state *st, s32 *temp, s32 *pressure) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); const struct ms5611_osr *osr = st->temp_osr; /* @@ -68,7 +65,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, return ret; usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL)); - ret = ms5611_spi_read_adc(dev, temp); + ret = ms5611_spi_read_adc(st, temp); if (ret < 0) return ret; @@ -78,7 +75,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, return ret; usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL)); - return ms5611_spi_read_adc(dev, pressure); + return ms5611_spi_read_adc(st, pressure); } static int ms5611_spi_probe(struct spi_device *spi) @@ -94,7 +91,7 @@ static int ms5611_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, indio_dev); spi->mode = SPI_MODE_0; - spi->max_speed_hz = 20000000; + spi->max_speed_hz = min(spi->max_speed_hz, 20000000U); spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c index 3b4a0e60e6059..b2ae2d2c7eefc 100644 --- a/drivers/iio/temperature/ltc2983.c +++ b/drivers/iio/temperature/ltc2983.c @@ -205,6 +205,7 @@ struct ltc2983_data { * Holds the converted temperature */ __be32 temp ____cacheline_aligned; + __be32 chan_val; }; struct ltc2983_sensor { @@ -309,19 +310,18 @@ static int __ltc2983_fault_handler(const struct ltc2983_data *st, return 0; } -static int __ltc2983_chan_assign_common(const struct ltc2983_data *st, +static int __ltc2983_chan_assign_common(struct ltc2983_data *st, const struct ltc2983_sensor *sensor, u32 chan_val) { u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan); - __be32 __chan_val; chan_val |= LTC2983_CHAN_TYPE(sensor->type); dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg, chan_val); - __chan_val = cpu_to_be32(chan_val); - return regmap_bulk_write(st->regmap, reg, &__chan_val, - sizeof(__chan_val)); + st->chan_val = cpu_to_be32(chan_val); + return regmap_bulk_write(st->regmap, reg, &st->chan_val, + sizeof(st->chan_val)); } static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st, @@ -1376,13 +1376,6 @@ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio) return ret; } - st->iio_chan = devm_kzalloc(&st->spi->dev, - st->iio_channels * sizeof(*st->iio_chan), - GFP_KERNEL); - - if (!st->iio_chan) - return -ENOMEM; - ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG, LTC2983_NOTCH_FREQ_MASK, LTC2983_NOTCH_FREQ(st->filter_notch_freq)); @@ -1494,6 +1487,12 @@ static int ltc2983_probe(struct spi_device *spi) if (ret) return ret; + st->iio_chan = devm_kzalloc(&spi->dev, + st->iio_channels * sizeof(*st->iio_chan), + GFP_KERNEL); + if (!st->iio_chan) + return -ENOMEM; + ret = ltc2983_setup(st, true); if (ret) return ret; diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index 2277d6336ac06..9ed5b9405ade0 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -209,9 +209,13 @@ static int iio_sysfs_trigger_remove(int id) static int __init iio_sysfs_trig_init(void) { + int ret; device_initialize(&iio_sysfs_trig_dev); dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger"); - return device_add(&iio_sysfs_trig_dev); + ret = device_add(&iio_sysfs_trig_dev); + if (ret) + put_device(&iio_sysfs_trig_dev); + return ret; } module_init(iio_sysfs_trig_init); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 3cc7a23fa69fe..3133b6be6cab9 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1643,14 +1643,13 @@ static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num, static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, struct sa_path_rec *primary_path, - struct sa_path_rec *alt_path) + struct sa_path_rec *alt_path, + struct ib_wc *wc) { u32 lid; if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) { - sa_path_set_dlid(primary_path, - IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, - req_msg)); + sa_path_set_dlid(primary_path, wc->slid); sa_path_set_slid(primary_path, IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg)); @@ -1687,7 +1686,8 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, static void cm_format_paths_from_req(struct cm_req_msg *req_msg, struct sa_path_rec *primary_path, - struct sa_path_rec *alt_path) + struct sa_path_rec *alt_path, + struct ib_wc *wc) { primary_path->dgid = *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg); @@ -1745,7 +1745,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, if (sa_path_is_roce(alt_path)) alt_path->roce.route_resolved = false; } - cm_format_path_lid_from_req(req_msg, primary_path, alt_path); + cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc); } static u16 cm_get_bth_pkey(struct cm_work *work) @@ -2163,7 +2163,7 @@ static int cm_req_handler(struct cm_work *work) if (cm_req_has_alt_path(req_msg)) work->path[1].rec_type = work->path[0].rec_type; cm_format_paths_from_req(req_msg, &work->path[0], - &work->path[1]); + &work->path[1], work->mad_recv_wc->wc); if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) sa_path_set_dmac(&work->path[0], cm_id_priv->av.ah_attr.roce.dmac); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3c40aa50cd60c..9ed5de38e372f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1437,7 +1437,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev, return false; memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_iif = net_dev->ifindex; + fl4.flowi4_oif = net_dev->ifindex; fl4.daddr = daddr; fl4.saddr = saddr; @@ -1722,8 +1722,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, } if (!validate_net_dev(*net_dev, - (struct sockaddr *)&req->listen_addr_storage, - (struct sockaddr *)&req->src_addr_storage)) { + (struct sockaddr *)&req->src_addr_storage, + (struct sockaddr *)&req->listen_addr_storage)) { id_priv = ERR_PTR(-EHOSTUNREACH); goto err; } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index aa526c5ca0cf3..5b7abcf102fe9 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2759,10 +2759,18 @@ static int __init ib_core_init(void) nldev_init(); rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); - roce_gid_mgmt_init(); + ret = roce_gid_mgmt_init(); + if (ret) { + pr_warn("Couldn't init RoCE GID management\n"); + goto err_parent; + } return 0; +err_parent: + rdma_nl_unregister(RDMA_NL_LS); + nldev_exit(); + unregister_pernet_device(&rdma_dev_net_ops); err_compat: unregister_blocking_lsm_notifier(&ibdev_lsm_nb); err_sa: @@ -2785,8 +2793,8 @@ static int __init ib_core_init(void) static void __exit ib_core_cleanup(void) { roce_gid_mgmt_cleanup(); - nldev_exit(); rdma_nl_unregister(RDMA_NL_LS); + nldev_exit(); unregister_pernet_device(&rdma_dev_net_ops); unregister_blocking_lsm_notifier(&ibdev_lsm_nb); ib_sa_cleanup(); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 12d29d54a0812..f7f80707af4b6 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -502,7 +502,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, /* In create_qp() port is not set yet */ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) - return -EINVAL; + return -EMSGSIZE; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); if (ret) @@ -541,7 +541,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_cm_id *cm_id = &id_priv->id; if (port && port != cm_id->port_num) - return 0; + return -EAGAIN; if (cm_id->port_num && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) @@ -754,6 +754,8 @@ static int fill_stat_counter_qps(struct sk_buff *msg, int ret = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); + if (!table_attr) + return -EMSGSIZE; rt = &counter->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); @@ -2181,7 +2183,7 @@ void __init nldev_init(void) rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); } -void __exit nldev_exit(void) +void nldev_exit(void) { rdma_nl_unregister(RDMA_NL_NLDEV); } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 323f6cf006824..af4af4789ef27 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -466,7 +466,7 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt, mutex_unlock(&umem_odp->umem_mutex); out_put_mm: - mmput(owning_mm); + mmput_async(owning_mm); out_put_task: if (owning_process) put_task_struct(owning_process); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 466026825dd75..d7c90da9ce7f1 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -749,6 +749,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) mr->uobject = uobj; atomic_inc(&pd->usecnt); mr->iova = cmd.hca_va; + mr->length = cmd.length; rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_set_name(&mr->res, NULL); @@ -832,8 +833,10 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) atomic_dec(&old_pd->usecnt); } - if (cmd.flags & IB_MR_REREG_TRANS) + if (cmd.flags & IB_MR_REREG_TRANS) { mr->iova = cmd.hca_va; + mr->length = cmd.length; + } memset(&resp, 0, sizeof(resp)); resp.lkey = mr->lkey; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 597e889ba8312..5123be0ab02f5 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2082,6 +2082,8 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->pd = pd; mr->dm = NULL; atomic_inc(&pd->usecnt); + mr->iova = virt_addr; + mr->length = length; rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_parent_name(&mr->res, &pd->res); @@ -2909,15 +2911,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start); bool __rdma_block_iter_next(struct ib_block_iter *biter) { unsigned int block_offset; + unsigned int sg_delta; if (!biter->__sg_nents || !biter->__sg) return false; biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); - biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset; + sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; - if (biter->__sg_advance >= sg_dma_len(biter->__sg)) { + if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { + biter->__sg_advance += sg_delta; + } else { biter->__sg_advance = 0; biter->__sg = sg_next(biter->__sg); biter->__sg_nents--; diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 04b1e8f021f64..d5a8d0173709a 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -219,6 +219,8 @@ int node_affinity_init(void) for (node = 0; node < node_affinity.num_possible_nodes; node++) hfi1_per_node_cntr[node] = 1; + pci_dev_put(dev); + return 0; } diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d84b1098762c1..7828fb5931203 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -1359,12 +1359,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) - return -EFAULT; + ret = -EFAULT; addr = arg + offsetof(struct hfi1_tid_info, length); - if (copy_to_user((void __user *)addr, &tinfo.length, + if (!ret && copy_to_user((void __user *)addr, &tinfo.length, sizeof(tinfo.length))) ret = -EFAULT; + + if (ret) + hfi1_user_exp_rcv_invalid(fd, &tinfo); } return ret; diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c index 2cf102b5abd44..f3e64c850aa51 100644 --- a/drivers/infiniband/hw/hfi1/firmware.c +++ b/drivers/infiniband/hw/hfi1/firmware.c @@ -1786,6 +1786,7 @@ int parse_platform_config(struct hfi1_devdata *dd) if (!dd->platform_config.data) { dd_dev_err(dd, "%s: Missing config file\n", __func__); + ret = -EINVAL; goto bail; } ptr = (u32 *)dd->platform_config.data; @@ -1794,6 +1795,7 @@ int parse_platform_config(struct hfi1_devdata *dd) ptr++; if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) { dd_dev_err(dd, "%s: Bad config file\n", __func__); + ret = -EINVAL; goto bail; } @@ -1817,6 +1819,7 @@ int parse_platform_config(struct hfi1_devdata *dd) if (file_length > dd->platform_config.size) { dd_dev_info(dd, "%s:File claims to be larger than read size\n", __func__); + ret = -EINVAL; goto bail; } else if (file_length < dd->platform_config.size) { dd_dev_info(dd, @@ -1837,6 +1840,7 @@ int parse_platform_config(struct hfi1_devdata *dd) dd_dev_err(dd, "%s: Failed validation at offset %ld\n", __func__, (ptr - (u32 *) dd->platform_config.data)); + ret = -EINVAL; goto bail; } @@ -1880,6 +1884,7 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__, table_type, (ptr - (u32 *) dd->platform_config.data)); + ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table = ptr; @@ -1899,6 +1904,7 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__, table_type, (ptr - (u32 *)dd->platform_config.data)); + ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table_metadata = diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 1cd8f80f097a7..60eb3a64518f3 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -955,8 +955,7 @@ void sc_disable(struct send_context *sc) spin_unlock(&sc->release_lock); write_seqlock(&sc->waitlock); - if (!list_empty(&sc->piowait)) - list_move(&sc->piowait, &wake_list); + list_splice_init(&sc->piowait, &wake_list); write_sequnlock(&sc->waitlock); while (!list_empty(&wake_list)) { struct iowait *wait; diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index b94fc7fd75a96..897923981855d 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -65,18 +65,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq); +static bool tid_cover_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq); static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *, struct tid_group *grp, unsigned int start, u16 count, u32 *tidlist, unsigned int *tididx, unsigned int *pmapped); -static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, - struct tid_group **grp); +static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo); +static void __clear_tid_node(struct hfi1_filedata *fd, + struct tid_rb_node *node); static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node); static const struct mmu_interval_notifier_ops tid_mn_ops = { .invalidate = tid_rb_invalidate, }; +static const struct mmu_interval_notifier_ops tid_cover_ops = { + .invalidate = tid_cover_invalidate, +}; /* * Initialize context and file private data needed for Expected @@ -295,53 +302,65 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, tididx = 0, mapped, mapped_pages = 0; u32 *tidlist = NULL; struct tid_user_buf *tidbuf; + unsigned long mmu_seq = 0; if (!PAGE_ALIGNED(tinfo->vaddr)) return -EINVAL; + if (tinfo->length == 0) + return -EINVAL; tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); if (!tidbuf) return -ENOMEM; + mutex_init(&tidbuf->cover_mutex); tidbuf->vaddr = tinfo->vaddr; tidbuf->length = tinfo->length; tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets), GFP_KERNEL); if (!tidbuf->psets) { - kfree(tidbuf); - return -ENOMEM; + ret = -ENOMEM; + goto fail_release_mem; + } + + if (fd->use_mn) { + ret = mmu_interval_notifier_insert( + &tidbuf->notifier, current->mm, + tidbuf->vaddr, tidbuf->npages * PAGE_SIZE, + &tid_cover_ops); + if (ret) + goto fail_release_mem; + mmu_seq = mmu_interval_read_begin(&tidbuf->notifier); } pinned = pin_rcv_pages(fd, tidbuf); if (pinned <= 0) { - kfree(tidbuf->psets); - kfree(tidbuf); - return pinned; + ret = (pinned < 0) ? pinned : -ENOSPC; + goto fail_unpin; } /* Find sets of physically contiguous pages */ tidbuf->n_psets = find_phys_blocks(tidbuf, pinned); - /* - * We don't need to access this under a lock since tid_used is per - * process and the same process cannot be in hfi1_user_exp_rcv_clear() - * and hfi1_user_exp_rcv_setup() at the same time. - */ + /* Reserve the number of expected tids to be used. */ spin_lock(&fd->tid_lock); if (fd->tid_used + tidbuf->n_psets > fd->tid_limit) pageset_count = fd->tid_limit - fd->tid_used; else pageset_count = tidbuf->n_psets; + fd->tid_used += pageset_count; spin_unlock(&fd->tid_lock); - if (!pageset_count) - goto bail; + if (!pageset_count) { + ret = -ENOSPC; + goto fail_unreserve; + } ngroups = pageset_count / dd->rcv_entries.group_size; tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL); if (!tidlist) { ret = -ENOMEM; - goto nomem; + goto fail_unreserve; } tididx = 0; @@ -437,43 +456,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, } unlock: mutex_unlock(&uctxt->exp_mutex); -nomem: hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx, mapped_pages, ret); - if (tididx) { - spin_lock(&fd->tid_lock); - fd->tid_used += tididx; - spin_unlock(&fd->tid_lock); - tinfo->tidcnt = tididx; - tinfo->length = mapped_pages * PAGE_SIZE; - - if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), - tidlist, sizeof(tidlist[0]) * tididx)) { - /* - * On failure to copy to the user level, we need to undo - * everything done so far so we don't leak resources. - */ - tinfo->tidlist = (unsigned long)&tidlist; - hfi1_user_exp_rcv_clear(fd, tinfo); - tinfo->tidlist = 0; - ret = -EFAULT; - goto bail; + + /* fail if nothing was programmed, set error if none provided */ + if (tididx == 0) { + if (ret >= 0) + ret = -ENOSPC; + goto fail_unreserve; + } + + /* adjust reserved tid_used to actual count */ + spin_lock(&fd->tid_lock); + fd->tid_used -= pageset_count - tididx; + spin_unlock(&fd->tid_lock); + + /* unpin all pages not covered by a TID */ + unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages, + false); + + if (fd->use_mn) { + /* check for an invalidate during setup */ + bool fail = false; + + mutex_lock(&tidbuf->cover_mutex); + fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq); + mutex_unlock(&tidbuf->cover_mutex); + + if (fail) { + ret = -EBUSY; + goto fail_unprogram; } } - /* - * If not everything was mapped (due to insufficient RcvArray entries, - * for example), unpin all unmapped pages so we can pin them nex time. - */ - if (mapped_pages != pinned) - unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, - (pinned - mapped_pages), false); -bail: + tinfo->tidcnt = tididx; + tinfo->length = mapped_pages * PAGE_SIZE; + + if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), + tidlist, sizeof(tidlist[0]) * tididx)) { + ret = -EFAULT; + goto fail_unprogram; + } + + if (fd->use_mn) + mmu_interval_notifier_remove(&tidbuf->notifier); + kfree(tidbuf->pages); kfree(tidbuf->psets); + kfree(tidbuf); kfree(tidlist); + return 0; + +fail_unprogram: + /* unprogram, unmap, and unpin all allocated TIDs */ + tinfo->tidlist = (unsigned long)tidlist; + hfi1_user_exp_rcv_clear(fd, tinfo); + tinfo->tidlist = 0; + pinned = 0; /* nothing left to unpin */ + pageset_count = 0; /* nothing left reserved */ +fail_unreserve: + spin_lock(&fd->tid_lock); + fd->tid_used -= pageset_count; + spin_unlock(&fd->tid_lock); +fail_unpin: + if (fd->use_mn) + mmu_interval_notifier_remove(&tidbuf->notifier); + if (pinned > 0) + unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false); +fail_release_mem: kfree(tidbuf->pages); + kfree(tidbuf->psets); kfree(tidbuf); - return ret > 0 ? 0 : ret; + kfree(tidlist); + return ret; } int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, @@ -494,7 +548,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, mutex_lock(&uctxt->exp_mutex); for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { - ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL); + ret = unprogram_rcvarray(fd, tidinfo[tididx]); if (ret) { hfi1_cdbg(TID, "Failed to unprogram rcv array %d", ret); @@ -750,6 +804,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, } node->fdata = fd; + mutex_init(&node->invalidate_mutex); node->phys = page_to_phys(pages[0]); node->npages = npages; node->rcventry = rcventry; @@ -765,11 +820,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, &tid_mn_ops); if (ret) goto out_unmap; - /* - * FIXME: This is in the wrong order, the notifier should be - * established before the pages are pinned by pin_rcv_pages. - */ - mmu_interval_read_begin(&node->notifier); } fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node; @@ -789,8 +839,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, return -EFAULT; } -static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, - struct tid_group **grp) +static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; @@ -813,9 +862,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, if (!node || node->rcventry != (uctxt->expected_base + rcventry)) return -EBADF; - if (grp) - *grp = node->grp; - if (fd->use_mn) mmu_interval_notifier_remove(&node->notifier); cacheless_tid_rb_remove(fd, node); @@ -823,23 +869,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, return 0; } -static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) +static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; + mutex_lock(&node->invalidate_mutex); + if (node->freed) + goto done; + node->freed = true; + trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, node->npages, node->notifier.interval_tree.start, node->phys, node->dma_addr); - /* - * Make sure device has seen the write before we unpin the - * pages. - */ + /* Make sure device has seen the write before pages are unpinned */ hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0); unpin_rcv_pages(fd, NULL, node, 0, node->npages, true); +done: + mutex_unlock(&node->invalidate_mutex); +} + +static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) +{ + struct hfi1_ctxtdata *uctxt = fd->uctxt; + + __clear_tid_node(fd, node); node->grp->used--; node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); @@ -898,10 +955,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, if (node->freed) return true; + /* take action only if unmapping */ + if (range->event != MMU_NOTIFY_UNMAP) + return true; + trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->notifier.interval_tree.start, node->rcventry, node->npages, node->dma_addr); - node->freed = true; + + /* clear the hardware rcvarray entry */ + __clear_tid_node(fdata, node); spin_lock(&fdata->invalid_lock); if (fdata->invalid_tid_idx < uctxt->expected_count) { @@ -931,6 +994,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, return true; } +static bool tid_cover_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct tid_user_buf *tidbuf = + container_of(mni, struct tid_user_buf, notifier); + + /* take action only if unmapping */ + if (range->event == MMU_NOTIFY_UNMAP) { + mutex_lock(&tidbuf->cover_mutex); + mmu_interval_set_seq(mni, cur_seq); + mutex_unlock(&tidbuf->cover_mutex); + } + + return true; +} + static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, struct tid_rb_node *tnode) { diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h index d45c7b6988d4d..849f265f2f118 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h @@ -57,6 +57,8 @@ struct tid_pageset { }; struct tid_user_buf { + struct mmu_interval_notifier notifier; + struct mutex cover_mutex; unsigned long vaddr; unsigned long length; unsigned int npages; @@ -68,6 +70,7 @@ struct tid_user_buf { struct tid_rb_node { struct mmu_interval_notifier notifier; struct hfi1_filedata *fdata; + struct mutex invalidate_mutex; /* covers hw removal */ unsigned long phys; struct tid_group *grp; u32 rcventry; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 6dab03b7aca80..76ed547b76ea7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -154,21 +154,29 @@ static void set_atomic_seg(const struct ib_send_wr *wr, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); } +static unsigned int get_std_sge_num(struct hns_roce_qp *qp) +{ + if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) + return 0; + + return HNS_ROCE_SGE_IN_WQE; +} + static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, const struct ib_send_wr *wr, unsigned int *sge_idx, u32 msg_len) { struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; - unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg); - unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len; unsigned int left_len_in_pg; unsigned int idx = *sge_idx; + unsigned int std_sge_num; unsigned int i = 0; unsigned int len; void *addr; void *dseg; - if (msg_len > ext_sge_sz) { + std_sge_num = get_std_sge_num(qp); + if (msg_len > (qp->sq.max_gs - std_sge_num) * HNS_ROCE_SGE_SIZE) { ibdev_err(ibdev, "no enough extended sge space for inline data.\n"); return -EINVAL; @@ -188,7 +196,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, if (len <= left_len_in_pg) { memcpy(dseg, addr, len); - idx += len / dseg_len; + idx += len / HNS_ROCE_SGE_SIZE; i++; if (i >= wr->num_sge) @@ -203,7 +211,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, len -= left_len_in_pg; addr += left_len_in_pg; - idx += left_len_in_pg / dseg_len; + idx += left_len_in_pg / HNS_ROCE_SGE_SIZE; dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT; @@ -2158,6 +2166,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num, 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX); + if (!(caps->page_size_cap & PAGE_SIZE)) + caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; + return 0; } @@ -2730,7 +2741,8 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, int i, count; count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, - ARRAY_SIZE(pages), &pbl_ba); + min_t(int, ARRAY_SIZE(pages), mr->npages), + &pbl_ba); if (count < 1) { ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", count); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index be7f2fe1e8839..8a92faeb3d237 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -92,7 +92,7 @@ #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE -#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 +#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 #define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 027ec8413ac25..1c342a7bd7dff 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -286,7 +286,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto err_alloc_pbl; mr->ibmr.rkey = mr->ibmr.lkey = mr->key; - mr->ibmr.length = length; return &mr->ibmr; @@ -457,10 +456,10 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, return &mr->ibmr; -err_key: - free_mr_key(hr_dev, mr); err_pbl: free_mr_pbl(hr_dev, mr); +err_key: + free_mr_key(hr_dev, mr); err_free: kfree(mr); return ERR_PTR(ret); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 291e06d631505..6fe98af7741b5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -386,11 +386,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); - if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) - hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); - else - hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * - hr_qp->rq.max_gs); + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * + hr_qp->rq.max_gs); hr_qp->rq.wqe_cnt = cnt; if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 426fed005d538..811b4bb345247 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto err_mr; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; - mr->ibmr.length = length; mr->ibmr.page_size = 1U << shift; return &mr->ibmr; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 9bb9bb058932f..cca7a4a6bd82d 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, mdev = dev->mdev; mdev_port_num = 1; } + if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) { + /* set local port to one for Function-Per-Port HCA. */ + mdev = dev->mdev; + mdev_port_num = 1; + } + /* Declaring support of extended counters */ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { struct ib_class_port_info cpi = {}; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7a2bec0ac0055..0caff276f2c18 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4258,6 +4258,40 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev, return false; } +static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr, + int attr_mask, enum ib_qp_type qp_type) +{ + int log_max_ra_res; + int log_max_ra_req; + + if (qp_type == MLX5_IB_QPT_DCI) { + log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_res_dc); + log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_req_dc); + } else { + log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_res_qp); + log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_req_qp); + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > log_max_ra_res) { + mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", + attr->max_rd_atomic); + return false; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > log_max_ra_req) { + mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", + attr->max_dest_rd_atomic); + return false; + } + return true; +} + int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { @@ -4352,21 +4386,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } } - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) { - mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", - attr->max_rd_atomic); - goto out; - } - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) { - mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", - attr->max_dest_rd_atomic); + if (!validate_rd_atomic(dev, attr, attr_mask, qp_type)) goto out; - } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 967641662b24a..d0bb61b7e419f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -374,6 +374,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev) if (IS_IWARP(dev)) { xa_init(&dev->qps); dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); + if (!dev->iwarp_wq) { + rc = -ENOMEM; + goto err1; + } } /* Allocate Status blocks for CNQ */ @@ -381,7 +385,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) GFP_KERNEL); if (!dev->sb_array) { rc = -ENOMEM; - goto err1; + goto err_destroy_wq; } dev->cnq_array = kcalloc(dev->num_cnq, @@ -432,6 +436,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev) kfree(dev->cnq_array); err2: kfree(dev->sb_array); +err_destroy_wq: + if (IS_IWARP(dev)) + destroy_workqueue(dev->iwarp_wq); err1: kfree(dev->sgid_tbl); return rc; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 760b254ba42d6..48a57568cad69 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -281,8 +281,8 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, size = pa_end - pa_start + PAGE_SIZE; usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", va_start, &pa_start, size, flags); - err = iommu_map(pd->domain, va_start, pa_start, - size, flags); + err = iommu_map_atomic(pd->domain, va_start, + pa_start, size, flags); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); @@ -298,8 +298,8 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, size = pa - pa_start + PAGE_SIZE; usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", va_start, &pa_start, size, flags); - err = iommu_map(pd->domain, va_start, pa_start, - size, flags); + err = iommu_map_atomic(pd->domain, va_start, + pa_start, size, flags); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 2847ab4d9a5f0..99c1b3553e6e0 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -775,7 +775,9 @@ void rxe_qp_destroy(struct rxe_qp *qp) rxe_cleanup_task(&qp->comp.task); /* flush out any receive wr's or pending requests */ - __rxe_do_task(&qp->req.task); + if (qp->req.task.func) + __rxe_do_task(&qp->req.task); + if (qp->sq.queue) { __rxe_do_task(&qp->comp.task); __rxe_do_task(&qp->req.task); @@ -810,13 +812,15 @@ static void rxe_qp_do_cleanup(struct work_struct *work) qp->resp.mr = NULL; } - if (qp_type(qp) == IB_QPT_RC) - sk_dst_reset(qp->sk->sk); - free_rd_atomic_resources(qp); - kernel_sock_shutdown(qp->sk, SHUT_RDWR); - sock_release(qp->sk); + if (qp->sk) { + if (qp_type(qp) == IB_QPT_RC) + sk_dst_reset(qp->sk->sk); + + kernel_sock_shutdown(qp->sk, SHUT_RDWR); + sock_release(qp->sk); + } } /* called when the last reference to the qp is dropped */ diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c index d68e37859e73b..403029de6b92d 100644 --- a/drivers/infiniband/sw/siw/siw_cq.c +++ b/drivers/infiniband/sw/siw/siw_cq.c @@ -56,8 +56,6 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { memset(wc, 0, sizeof(*wc)); wc->wr_id = cqe->id; - wc->status = map_cqe_status[cqe->status].ib; - wc->opcode = map_wc_opcode[cqe->opcode]; wc->byte_len = cqe->bytes; /* @@ -71,10 +69,32 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) wc->wc_flags = IB_WC_WITH_INVALIDATE; } wc->qp = cqe->base_qp; + wc->opcode = map_wc_opcode[cqe->opcode]; + wc->status = map_cqe_status[cqe->status].ib; siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%pK\n", cq->cq_get % cq->num_cqe, cqe->opcode, cqe->flags, (void *)(uintptr_t)cqe->id); + } else { + /* + * A malicious user may set invalid opcode or + * status in the user mmapped CQE array. + * Sanity check and correct values in that case + * to avoid out-of-bounds access to global arrays + * for opcode and status mapping. + */ + u8 opcode = cqe->opcode; + u16 status = cqe->status; + + if (opcode >= SIW_NUM_OPCODES) { + opcode = 0; + status = SIW_WC_GENERAL_ERR; + } else if (status >= SIW_NUM_WC_STATUS) { + status = SIW_WC_GENERAL_ERR; + } + wc->opcode = map_wc_opcode[opcode]; + wc->status = map_cqe_status[status].ib; + } WRITE_ONCE(cqe->flags, 0); cq->cq_get++; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 875ea6f1b04a2..fd721cc19682e 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -961,27 +961,28 @@ int siw_proc_terminate(struct siw_qp *qp) static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx) { struct sk_buff *skb = srx->skb; + int avail = min(srx->skb_new, srx->fpdu_part_rem); u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad; __wsum crc_in, crc_own = 0; siw_dbg_qp(qp, "expected %d, available %d, pad %u\n", srx->fpdu_part_rem, srx->skb_new, srx->pad); - if (srx->skb_new < srx->fpdu_part_rem) - return -EAGAIN; - - skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem); + skb_copy_bits(skb, srx->skb_offset, tbuf, avail); - if (srx->mpa_crc_hd && srx->pad) - crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); + srx->skb_new -= avail; + srx->skb_offset += avail; + srx->skb_copied += avail; + srx->fpdu_part_rem -= avail; - srx->skb_new -= srx->fpdu_part_rem; - srx->skb_offset += srx->fpdu_part_rem; - srx->skb_copied += srx->fpdu_part_rem; + if (srx->fpdu_part_rem) + return -EAGAIN; if (!srx->mpa_crc_hd) return 0; + if (srx->pad) + crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); /* * CRC32 is computed, transmitted and received directly in NBO, * so there's never a reason to convert byte order. @@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx) * completely received. */ if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) { - bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR; + int hdrlen = iwarp_pktinfo[opcode].hdr_len; - if (srx->skb_new < bytes) - return -EAGAIN; + bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new); skb_copy_bits(skb, srx->skb_offset, (char *)c_hdr + srx->fpdu_part_rcvd, bytes); @@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx) srx->skb_new -= bytes; srx->skb_offset += bytes; srx->skb_copied += bytes; + + if (srx->fpdu_part_rcvd < hdrlen) + return -EAGAIN; } /* diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 7989c4043db4e..df8802b4981cf 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) - return virt_to_page(paddr); + return virt_to_page((void *)(uintptr_t)paddr); return NULL; } @@ -523,13 +523,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) kunmap(p); } } else { - u64 va = sge->laddr + sge_off; + /* + * Cast to an uintptr_t to preserve all 64 bits + * in sge->laddr. + */ + uintptr_t va = (uintptr_t)(sge->laddr + sge_off); - page_array[seg] = virt_to_page(va & PAGE_MASK); + /* + * virt_to_page() takes a (void *) pointer + * so cast to a (void *) meaning it will be 64 + * bits on a 64 bit platform and 32 bits on a + * 32 bit platform. + */ + page_array[seg] = virt_to_page((void *)(va & PAGE_MASK)); if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, - (void *)(uintptr_t)va, + (void *)va, plen); } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 34e847a91eb80..d043793ff0f53 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -672,13 +672,45 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { - struct siw_sqe sqe = {}; int rv = 0; while (wr) { - sqe.id = wr->wr_id; - sqe.opcode = wr->opcode; - rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); + struct siw_sqe sqe = {}; + + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + sqe.opcode = SIW_OP_WRITE; + break; + case IB_WR_RDMA_READ: + sqe.opcode = SIW_OP_READ; + break; + case IB_WR_RDMA_READ_WITH_INV: + sqe.opcode = SIW_OP_READ_LOCAL_INV; + break; + case IB_WR_SEND: + sqe.opcode = SIW_OP_SEND; + break; + case IB_WR_SEND_WITH_IMM: + sqe.opcode = SIW_OP_SEND_WITH_IMM; + break; + case IB_WR_SEND_WITH_INV: + sqe.opcode = SIW_OP_SEND_REMOTE_INV; + break; + case IB_WR_LOCAL_INV: + sqe.opcode = SIW_OP_INVAL_STAG; + break; + case IB_WR_REG_MR: + sqe.opcode = SIW_OP_REG_MR; + break; + default: + rv = -EINVAL; + break; + } + if (!rv) { + sqe.id = wr->wr_id; + rv = siw_sqe_complete(qp, &sqe, 0, + SIW_WC_WR_FLUSH_ERR); + } if (rv) { if (bad_wr) *bad_wr = wr; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index abfab89423f41..35322d23fc340 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2188,6 +2188,14 @@ int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name, rn->attach_mcast = ipoib_mcast_attach; rn->detach_mcast = ipoib_mcast_detach; rn->hca = hca; + + rc = netif_set_real_num_tx_queues(dev, 1); + if (rc) + goto out; + + rc = netif_set_real_num_rx_queues(dev, 1); + if (rc) + goto out; } priv->rn_ops = dev->netdev_ops; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index 5b05cf3837da1..28e9b70844e44 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -42,6 +42,11 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = { [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 }, }; +static unsigned int ipoib_get_max_num_queues(void) +{ + return min_t(unsigned int, num_possible_cpus(), 128); +} + static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -173,6 +178,8 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = { .changelink = ipoib_changelink, .get_size = ipoib_get_size, .fill_info = ipoib_fill_info, + .get_num_rx_queues = ipoib_get_max_num_queues, + .get_num_tx_queues = ipoib_get_max_num_queues, }; struct rtnl_link_ops *ipoib_get_link_ops(void) diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 5c39e4c4bef7f..1a5805260778b 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -902,7 +902,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, req->need_inv_comp = false; req->inv_errno = 0; - iov_iter_kvec(&iter, READ, vec, 1, usr_len); + iov_iter_kvec(&iter, WRITE, vec, 1, usr_len); len = _copy_from_iter(req->iu->buf, usr_len, &iter); WARN_ON(len != usr_len); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index b4ccb333a8342..adbd56af379ff 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3397,7 +3397,8 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_PKEY: - if (match_hex(args, &token)) { + ret = match_hex(args, &token); + if (ret) { pr_warn("bad P_Key parameter '%s'\n", p); goto out; } @@ -3457,7 +3458,8 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_MAX_SECT: - if (match_int(args, &token)) { + ret = match_int(args, &token); + if (ret) { pr_warn("bad max sect parameter '%s'\n", p); goto out; } @@ -3465,8 +3467,15 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_QUEUE_SIZE: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad queue_size parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->scsi_host->can_queue = token; @@ -3477,25 +3486,40 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_MAX_CMD_PER_LUN: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad max cmd_per_lun parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->scsi_host->cmd_per_lun = token; break; case SRP_OPT_TARGET_CAN_QUEUE: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad max target_can_queue parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->target_can_queue = token; break; case SRP_OPT_IO_CLASS: - if (match_hex(args, &token)) { + ret = match_hex(args, &token); + if (ret) { pr_warn("bad IO class parameter '%s'\n", p); goto out; } @@ -3504,6 +3528,7 @@ static int srp_parse_options(struct net *net, const char *buf, pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); + ret = -EINVAL; goto out; } target->io_class = token; @@ -3526,16 +3551,24 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_CMD_SG_ENTRIES: - if (match_int(args, &token) || token < 1 || token > 255) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1 || token > 255) { pr_warn("bad max cmd_sg_entries parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->cmd_sg_cnt = token; break; case SRP_OPT_ALLOW_EXT_SG: - if (match_int(args, &token)) { + ret = match_int(args, &token); + if (ret) { pr_warn("bad allow_ext_sg parameter '%s'\n", p); goto out; } @@ -3543,43 +3576,77 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_SG_TABLESIZE: - if (match_int(args, &token) || token < 1 || - token > SG_MAX_SEGMENTS) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1 || token > SG_MAX_SEGMENTS) { pr_warn("bad max sg_tablesize parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->sg_tablesize = token; break; case SRP_OPT_COMP_VECTOR: - if (match_int(args, &token) || token < 0) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 0) { pr_warn("bad comp_vector parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->comp_vector = token; break; case SRP_OPT_TL_RETRY_COUNT: - if (match_int(args, &token) || token < 2 || token > 7) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 2 || token > 7) { pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", p); + ret = -EINVAL; goto out; } target->tl_retry_count = token; break; case SRP_OPT_MAX_IT_IU_SIZE: - if (match_int(args, &token) || token < 0) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 0) { pr_warn("bad maximum initiator to target IU size '%s'\n", p); + ret = -EINVAL; goto out; } target->max_it_iu_size = token; break; case SRP_OPT_CH_COUNT: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for channel count parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad channel count %s\n", p); + ret = -EINVAL; goto out; } target->ch_count = token; @@ -3588,6 +3655,7 @@ static int srp_parse_options(struct net *net, const char *buf, default: pr_warn("unknown parameter or missing value '%s' in target creation request\n", p); + ret = -EINVAL; goto out; } } diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 6818cac0a3b78..85bac20d9007d 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -62,9 +62,6 @@ enum { SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, - SRP_TAG_NO_REQ = ~0U, - SRP_TAG_TSK_MGMT = 1U << 31, - SRP_MAX_PAGES_PER_MR = 512, SRP_MAX_ADD_CDB_LEN = 16, @@ -79,6 +76,11 @@ enum { sizeof(struct srp_imm_buf), }; +enum { + SRP_TAG_NO_REQ = ~0U, + SRP_TAG_TSK_MGMT = BIT(31), +}; + enum srp_target_state { SRP_TARGET_SCANNING, SRP_TARGET_LIVE, diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index b080f0cfb068f..d8ec5193a941f 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -45,6 +45,7 @@ config JOYSTICK_A3D config JOYSTICK_ADC tristate "Simple joystick connected over ADC" depends on IIO + select IIO_BUFFER select IIO_BUFFER_CB help Say Y here if you have a simple joystick connected over ADC. diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c index b2a68bc9f0b4d..84b87526b7ba3 100644 --- a/drivers/input/joystick/iforce/iforce-main.c +++ b/drivers/input/joystick/iforce/iforce-main.c @@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = { { 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce }, { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce }, { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce }, + { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce }, { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, @@ -272,22 +273,22 @@ int iforce_init_device(struct device *parent, u16 bustype, * Get device info. */ - if (!iforce_get_id_packet(iforce, 'M', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3) input_dev->id.vendor = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n"); - if (!iforce_get_id_packet(iforce, 'P', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3) input_dev->id.product = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n"); - if (!iforce_get_id_packet(iforce, 'B', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3) iforce->device_memory.end = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n"); - if (!iforce_get_id_packet(iforce, 'N', buf, &len) || len < 2) + if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2) ff_effects = buf[1]; else dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n"); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index ba101afcfc27f..70dedc0f7827c 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -112,6 +112,8 @@ static const struct xpad_device { u8 xtype; } xpad_device[] = { { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, + { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 }, + { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, @@ -242,6 +244,7 @@ static const struct xpad_device { { 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE }, { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, @@ -258,6 +261,7 @@ static const struct xpad_device { { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, + { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, @@ -322,6 +326,7 @@ static const struct xpad_device { { 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE }, @@ -331,6 +336,14 @@ static const struct xpad_device { { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE }, + { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 }, { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 }, { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, @@ -416,6 +429,7 @@ static const signed short xpad_abs_triggers[] = { static const struct usb_device_id xpad_table[] = { { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */ + XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ @@ -426,6 +440,7 @@ static const struct usb_device_id xpad_table[] = { { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */ + XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ @@ -446,8 +461,12 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */ + XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */ + XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */ XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */ { } }; @@ -1964,7 +1983,6 @@ static struct usb_driver xpad_driver = { .disconnect = xpad_disconnect, .suspend = xpad_suspend, .resume = xpad_resume, - .reset_resume = xpad_resume, .id_table = xpad_table, }; diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c index 65286762b02ab..ad8660be0127c 100644 --- a/drivers/input/keyboard/snvs_pwrkey.c +++ b/drivers/input/keyboard/snvs_pwrkey.c @@ -20,7 +20,7 @@ #include #include -#define SNVS_HPVIDR1_REG 0xF8 +#define SNVS_HPVIDR1_REG 0xBF8 #define SNVS_LPSR_REG 0x4C /* LP Status Register */ #define SNVS_LPCR_REG 0x38 /* LP Control Register */ #define SNVS_HPSR_REG 0x14 diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index efffcf0ebd3b4..31c02c2019c1c 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -18,6 +18,10 @@ #include #include +static bool use_low_level_irq; +module_param(use_low_level_irq, bool, 0444); +MODULE_PARM_DESC(use_low_level_irq, "Use low-level triggered IRQ instead of edge triggered"); + struct soc_button_info { const char *name; int acpi_index; @@ -73,6 +77,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), }, }, + { + /* Acer Switch V 10 SW5-017, same issue as Acer Switch 10 SW5-012. */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + }, { /* * Acer One S1003. _LID method messes with power-button GPIO @@ -164,7 +175,8 @@ soc_button_device_create(struct platform_device *pdev, } /* See dmi_use_low_level_irq[] comment */ - if (!autorepeat && dmi_check_system(dmi_use_low_level_irq)) { + if (!autorepeat && (use_low_level_irq || + dmi_check_system(dmi_use_low_level_irq))) { irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); gpio_keys[n_buttons].irq = irq; gpio_keys[n_buttons].gpio = -ENOENT; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 148a7c5fd0e22..65c0081838e3d 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -67,25 +67,84 @@ static inline void i8042_write_command(int val) #include -static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { +#define SERIO_QUIRK_NOKBD BIT(0) +#define SERIO_QUIRK_NOAUX BIT(1) +#define SERIO_QUIRK_NOMUX BIT(2) +#define SERIO_QUIRK_FORCEMUX BIT(3) +#define SERIO_QUIRK_UNLOCK BIT(4) +#define SERIO_QUIRK_PROBE_DEFER BIT(5) +#define SERIO_QUIRK_RESET_ALWAYS BIT(6) +#define SERIO_QUIRK_RESET_NEVER BIT(7) +#define SERIO_QUIRK_DIECT BIT(8) +#define SERIO_QUIRK_DUMBKBD BIT(9) +#define SERIO_QUIRK_NOLOOP BIT(10) +#define SERIO_QUIRK_NOTIMEOUT BIT(11) +#define SERIO_QUIRK_KBDRESET BIT(12) +#define SERIO_QUIRK_DRITEK BIT(13) +#define SERIO_QUIRK_NOPNP BIT(14) + +/* Quirk table for different mainboards. Options similar or identical to i8042 + * module parameters. + * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored. + * This allows entries to overwrite vendor wide quirks on a per device basis. + * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR + * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries. + */ +static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { { - /* - * Arima-Rioworks HDAMB - - * AUX LOOP command does not raise AUX IRQ - */ .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"), - DMI_MATCH(DMI_BOARD_NAME, "HDAMB"), - DMI_MATCH(DMI_BOARD_VERSION, "Rev E"), + DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* ASUS G1S */ .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_BOARD_NAME, "G1S"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) + }, + { + /* Asus X450LCP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER) + }, + { + /* ASUS ZenBook UX425UA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"), + }, + .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER) + }, + { + /* ASUS ZenBook UM325UA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"), + }, + .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER) + }, + /* + * On some Asus laptops, just running self tests cause problems. + */ + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ + }, + .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER) + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ + }, + .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER) }, { /* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */ @@ -94,585 +153,681 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"), DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { + /* ASUS G1S */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"), + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_BOARD_NAME, "G1S"), + DMI_MATCH(DMI_BOARD_VERSION, "1.0"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), - DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), - DMI_MATCH(DMI_PRODUCT_VERSION, "8500"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Acer Aspire 5710 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), - DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), - DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Dell Embedded Box PC 3000 */ + /* Acer Aspire 7738 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* OQO Model 01 */ + /* Acer Aspire 5536 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "OQO"), - DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "00"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* ULI EV4873 - AUX LOOP does not work properly */ + /* + * Acer Aspire 5738z + * Touchpad stops working in mux mode when dis- + re-enabled + * with the touchpad enable/disable toggle hotkey + */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ULI"), - DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"), - DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Microsoft Virtual Machine */ + /* Acer Aspire One 150 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), - DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Medion MAM 2070 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), - DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"), - DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Medion Akoya E7225 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Medion"), - DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Blue FB5601 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "blue"), - DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"), - DMI_MATCH(DMI_PRODUCT_VERSION, "M606"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Gigabyte M912 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "M912"), - DMI_MATCH(DMI_PRODUCT_VERSION, "01"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Gigabyte M1022M netbook */ .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."), - DMI_MATCH(DMI_BOARD_NAME, "M1022E"), - DMI_MATCH(DMI_BOARD_VERSION, "1.02"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Gigabyte Spring Peak - defines wrong chassis type */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Gigabyte T1005 - defines wrong chassis type ("Other") */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, + /* + * Some Wistron based laptops need us to explicitly enable the 'Dritek + * keyboard extension' to make their extra keys start generating scancodes. + * Originally, this was just confined to older laptops, but a few Acer laptops + * have turned up in 2007 that also need this again. + */ { - /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ + /* Acer Aspire 5100 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { + /* Acer Aspire 5610 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { + /* Acer Aspire 5630 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), - DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { + /* Acer Aspire 5650 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), - DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, - { } -}; - -/* - * Some Fujitsu notebooks are having trouble with touchpads if - * active multiplexing mode is activated. Luckily they don't have - * external PS/2 ports so we can safely disable it. - * ... apparently some Toshibas don't like MUX mode either and - * die horrible death on reboot. - */ -static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { { - /* Fujitsu Lifebook P7010/P7010D */ + /* Acer Aspire 5680 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "P7010"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook P7010 */ + /* Acer Aspire 5720 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook P5020D */ + /* Acer Aspire 9110 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook S2000 */ + /* Acer TravelMate 660 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook S6230 */ + /* Acer TravelMate 2490 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook T725 laptop */ + /* Acer TravelMate 4280 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook U745 */ + /* Amoi M636/A737 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"), + DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Fujitsu T70H */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"), + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Fujitsu-Siemens Lifebook T3010 */ + /* Compal HEL80I */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"), + DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"), + DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Fujitsu-Siemens Lifebook E4010 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"), + DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"), + DMI_MATCH(DMI_PRODUCT_VERSION, "8500"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Fujitsu-Siemens Amilo Pro 2010 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"), + DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"), + DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Fujitsu-Siemens Amilo Pro 2030 */ + /* Advent 4211 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), + DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"), + DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* - * No data is coming from the touchscreen unless KBC - * is in legacy mode. - */ - /* Panasonic CF-29 */ + /* Dell Embedded Box PC 3000 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), - DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* - * HP Pavilion DV4017EA - - * errors on MUX ports are reported without raising AUXDATA - * causing "spurious NAK" messages. - */ + /* Dell XPS M1530 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* - * HP Pavilion ZT1000 - - * like DV4017EA does not raise AUXERR for errors on MUX ports. - */ + /* Dell Vostro 1510 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"), - DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* - * HP Pavilion DV4270ca - - * like DV4017EA does not raise AUXERR for errors on MUX ports. - */ + /* Dell Vostro V13 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT) }, { + /* Dell Vostro 1320 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { + /* Dell Vostro 1520 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { + /* Dell Vostro 1720 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), + }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) + }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS) }, + /* + * Some Fujitsu notebooks are having trouble with touchpads if + * active multiplexing mode is activated. Luckily they don't have + * external PS/2 ports so we can safely disable it. + * ... apparently some Toshibas don't like MUX mode either and + * die horrible death on reboot. + */ { + /* Fujitsu Lifebook P7010/P7010D */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), - DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "P7010"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Sharp Actius MM20 */ + /* Fujitsu Lifebook P5020D */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), - DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Sony Vaio FS-115b */ + /* Fujitsu Lifebook S2000 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* - * Sony Vaio FZ-240E - - * reset and GET ID commands issued via KBD port are - * sometimes being delivered to AUX3. - */ + /* Fujitsu Lifebook S6230 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* - * Most (all?) VAIOs do not have external PS/2 ports nor - * they implement active multiplexing properly, and - * MUX discovery usually messes up keyboard/touchpad. - */ + /* Fujitsu Lifebook T725 laptop */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_BOARD_NAME, "VAIO"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT) }, { - /* Amoi M636/A737 */ + /* Fujitsu Lifebook U745 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Lenovo 3000 n100 */ + /* Fujitsu T70H */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Lenovo XiaoXin Air 12 */ + /* Fujitsu A544 laptop */ + /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"), }, + .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT) }, { + /* Fujitsu AH544 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT) + }, + { + /* Fujitsu U574 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT) + }, + { + /* Fujitsu UH554 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT) + }, + { + /* Fujitsu Lifebook P7010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) + }, + { + /* Fujitsu-Siemens Lifebook T3010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Acer Aspire 5710 */ + /* Fujitsu-Siemens Lifebook E4010 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Acer Aspire 7738 */ + /* Fujitsu-Siemens Amilo Pro 2010 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Gericom Bellagio */ + /* Fujitsu-Siemens Amilo Pro 2030 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), - DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* IBM 2656 */ + /* Gigabyte M912 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), - DMI_MATCH(DMI_PRODUCT_NAME, "2656"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "M912"), + DMI_MATCH(DMI_PRODUCT_VERSION, "01"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Dell XPS M1530 */ + /* Gigabyte Spring Peak - defines wrong chassis type */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Compal HEL80I */ + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"), - DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Dell Vostro 1510 */ + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, + /* + * Some laptops need keyboard reset before probing for the trackpad to get + * it detected, initialised & finally work. + */ { - /* Acer Aspire 5536 */ + /* Gigabyte P35 v2 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), - DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, - { - /* Dell Vostro V13 */ + { + /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "X3"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, { - /* Newer HP Pavilion dv4 models */ + /* Gigabyte P34 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P34"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, { - /* Asus X450LCP */ + /* Gigabyte P57 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P57"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, { - /* Avatar AVIU-145A6 */ + /* Gericom Bellagio */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Intel"), - DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), + DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), + DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* TUXEDO BU1406 */ + /* Gigabyte M1022M netbook */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), - DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "M1022E"), + DMI_MATCH(DMI_BOARD_VERSION, "1.02"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Lenovo LaVie Z */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { /* - * Acer Aspire 5738z - * Touchpad stops working in mux mode when dis- + re-enabled - * with the touchpad enable/disable toggle hotkey + * HP Pavilion DV4017EA - + * errors on MUX ports are reported without raising AUXDATA + * causing "spurious NAK" messages. */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Entroware Proteus */ + /* + * HP Pavilion ZT1000 - + * like DV4017EA does not raise AUXERR for errors on MUX ports. + */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), - DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), - DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"), + DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, - { } -}; - -static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = { { /* - * Sony Vaio VGN-CS series require MUX or the touch sensor - * buttons will disturb touchpad operation + * HP Pavilion DV4270ca - + * like DV4017EA does not raise AUXERR for errors on MUX ports. */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, - { } -}; - -/* - * On some Asus laptops, just running self tests cause problems. - */ -static const struct dmi_system_id i8042_dmi_noselftest_table[] = { { + /* Newer HP Pavilion dv4 models */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ - }, - }, { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT) }, - { } -}; -static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { { - /* MSI Wind U-100 */ + /* IBM 2656 */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "U-100"), - DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "2656"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* LG Electronics X110 */ + /* Avatar AVIU-145A6 */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "X110"), - DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Acer Aspire One 150 */ + /* Intel MBO Desktop D845PESV */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), + DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "D845PESV"), }, + .driver_data = (void *)(SERIO_QUIRK_NOPNP) }, { + /* + * Intel NUC D54250WYK - does not have i8042 controller but + * declares PS/2 devices in DSDT. + */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"), + DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"), }, + .driver_data = (void *)(SERIO_QUIRK_NOPNP) }, { + /* Lenovo 3000 n100 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Lenovo XiaoXin Air 12 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Lenovo LaVie Z */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Lenovo Ideapad U455 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20046"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { + /* Lenovo ThinkPad L460 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { + /* Lenovo ThinkPad Twist S230u */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Advent 4211 */ + /* LG Electronics X110 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"), - DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"), + DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), + DMI_MATCH(DMI_BOARD_NAME, "X110"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { /* Medion Akoya Mini E1210 */ @@ -680,6 +835,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), DMI_MATCH(DMI_PRODUCT_NAME, "E1210"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { /* Medion Akoya E1222 */ @@ -687,48 +843,62 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), DMI_MATCH(DMI_PRODUCT_NAME, "E122X"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Mivvy M310 */ + /* MSI Wind U-100 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"), - DMI_MATCH(DMI_PRODUCT_NAME, "N10"), + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_BOARD_NAME, "U-100"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOPNP) }, { - /* Dell Vostro 1320 */ + /* + * No data is coming from the touchscreen unless KBC + * is in legacy mode. + */ + /* Panasonic CF-29 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"), + DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), + DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Dell Vostro 1520 */ + /* Medion Akoya E7225 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"), + DMI_MATCH(DMI_SYS_VENDOR, "Medion"), + DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Dell Vostro 1720 */ + /* Microsoft Virtual Machine */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), + DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Lenovo Ideapad U455 */ + /* Medion MAM 2070 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "20046"), + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"), + DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Lenovo ThinkPad L460 */ + /* TUXEDO BU1406 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"), + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ @@ -736,282 +906,318 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) + }, + { + /* OQO Model 01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "OQO"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "00"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Lenovo ThinkPad Twist S230u */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Entroware Proteus */ + /* Acer Aspire 5 A515 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), - DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), - DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + DMI_MATCH(DMI_BOARD_VENDOR, "PK"), + DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), }, + .driver_data = (void *)(SERIO_QUIRK_NOPNP) }, - { } -}; - -#ifdef CONFIG_PNP -static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { { - /* Intel MBO Desktop D845PESV */ + /* ULI EV4873 - AUX LOOP does not work properly */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "D845PESV"), - DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_SYS_VENDOR, "ULI"), + DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"), + DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { /* - * Intel NUC D54250WYK - does not have i8042 controller but - * declares PS/2 devices in DSDT. + * Arima-Rioworks HDAMB - + * AUX LOOP command does not raise AUX IRQ */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"), - DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"), + DMI_MATCH(DMI_BOARD_NAME, "HDAMB"), + DMI_MATCH(DMI_BOARD_VERSION, "Rev E"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* MSI Wind U-100 */ + /* Sharp Actius MM20 */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "U-100"), - DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), + DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Acer Aspire 5 A515 */ + /* + * Sony Vaio FZ-240E - + * reset and GET ID commands issued via KBD port are + * sometimes being delivered to AUX3. + */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), - DMI_MATCH(DMI_BOARD_VENDOR, "PK"), + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, - { } -}; - -static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { { + /* + * Most (all?) VAIOs do not have external PS/2 ports nor + * they implement active multiplexing properly, and + * MUX discovery usually messes up keyboard/touchpad. + */ .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "VAIO"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Sony Vaio FS-115b */ .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */ + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* + * Sony Vaio VGN-CS series require MUX or the touch sensor + * buttons will disturb touchpad operation + */ .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), }, + .driver_data = (void *)(SERIO_QUIRK_FORCEMUX) }, { .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */ + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, - { } -}; -#endif - -static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { { - /* Dell Vostro V13 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Newer HP Pavilion dv4 models */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, + /* + * A lot of modern Clevo barebones have touchpad and/or keyboard issues + * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, + * none of them have an external PS/2 port so this can safely be set for + * all of them. These two are based on a Clevo design, but have the + * board_name changed. + */ { - /* Fujitsu A544 laptop */ - /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"), + DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Fujitsu AH544 laptop */ - /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"), + DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Fujitsu Lifebook T725 laptop */ + /* Mivvy M310 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), + DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"), + DMI_MATCH(DMI_PRODUCT_NAME, "N10"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, + /* + * Some laptops need keyboard reset before probing for the trackpad to get + * it detected, initialised & finally work. + */ { - /* Fujitsu U574 laptop */ - /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + /* Schenker XMG C504 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), + DMI_MATCH(DMI_SYS_VENDOR, "XMG"), + DMI_MATCH(DMI_PRODUCT_NAME, "C504"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, { - /* Fujitsu UH554 laptop */ + /* Blue FB5601 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + DMI_MATCH(DMI_SYS_VENDOR, "blue"), + DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"), + DMI_MATCH(DMI_PRODUCT_VERSION, "M606"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, - { } -}; - -/* - * Some Wistron based laptops need us to explicitly enable the 'Dritek - * keyboard extension' to make their extra keys start generating scancodes. - * Originally, this was just confined to older laptops, but a few Acer laptops - * have turned up in 2007 that also need this again. - */ -static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { + /* + * A lot of modern Clevo barebones have touchpad and/or keyboard issues + * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, + * none of them have an external PS/2 port so this can safely be set for + * all of them. + * Clevo barebones come with board_vendor and/or system_vendor set to + * either the very generic string "Notebook" and/or a different value + * for each individual reseller. The only somewhat universal way to + * identify them is by board_name. + */ { - /* Acer Aspire 5100 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), + DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5610 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), + DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5630 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), + DMI_MATCH(DMI_BOARD_NAME, "N140CU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5650 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), + DMI_MATCH(DMI_BOARD_NAME, "N141CU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5680 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), + DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5720 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, + /* + * At least one modern Clevo barebone has the touchpad connected both + * via PS/2 and i2c interface. This causes a race condition between the + * psmouse and i2c-hid driver. Since the full capability of the touchpad + * is available via the i2c interface and the device has no external + * PS/2 port, it is safe to just ignore all ps2 mouses here to avoid + * this issue. The known affected device is the + * TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU which comes with one of + * the two different dmi strings below. NS50MU is not a typo! + */ { - /* Acer Aspire 9110 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), + DMI_MATCH(DMI_BOARD_NAME, "NS50MU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | + SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | + SERIO_QUIRK_NOPNP) }, { - /* Acer TravelMate 660 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"), + DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | + SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | + SERIO_QUIRK_NOPNP) }, { - /* Acer TravelMate 2490 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), + DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer TravelMate 4280 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), + DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, - { } -}; - -/* - * Some laptops need keyboard reset before probing for the trackpad to get - * it detected, initialised & finally work. - */ -static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { { - /* Gigabyte P35 v2 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), + DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, - { - /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ + { .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "X3"), + DMI_MATCH(DMI_BOARD_NAME, "X170SM"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Gigabyte P34 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "P34"), + DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, + { } +}; + +#ifdef CONFIG_PNP +static const struct dmi_system_id i8042_dmi_laptop_table[] __initconst = { { - /* Gigabyte P57 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "P57"), + DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ }, }, { - /* Schenker XMG C504 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "XMG"), - DMI_MATCH(DMI_PRODUCT_NAME, "C504"), + DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */ }, }, - { } -}; - -static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = { { - /* ASUS ZenBook UX425UA */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"), + DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ }, }, { - /* ASUS ZenBook UM325UA */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"), + DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */ }, }, { } }; +#endif #endif /* CONFIG_X86 */ @@ -1167,11 +1373,6 @@ static int __init i8042_pnp_init(void) bool pnp_data_busted = false; int err; -#ifdef CONFIG_X86 - if (dmi_check_system(i8042_dmi_nopnp_table)) - i8042_nopnp = true; -#endif - if (i8042_nopnp) { pr_info("PNP detection disabled\n"); return 0; @@ -1275,6 +1476,59 @@ static inline int i8042_pnp_init(void) { return 0; } static inline void i8042_pnp_exit(void) { } #endif /* CONFIG_PNP */ + +#ifdef CONFIG_X86 +static void __init i8042_check_quirks(void) +{ + const struct dmi_system_id *device_quirk_info; + uintptr_t quirks; + + device_quirk_info = dmi_first_match(i8042_dmi_quirk_table); + if (!device_quirk_info) + return; + + quirks = (uintptr_t)device_quirk_info->driver_data; + + if (quirks & SERIO_QUIRK_NOKBD) + i8042_nokbd = true; + if (quirks & SERIO_QUIRK_NOAUX) + i8042_noaux = true; + if (quirks & SERIO_QUIRK_NOMUX) + i8042_nomux = true; + if (quirks & SERIO_QUIRK_FORCEMUX) + i8042_nomux = false; + if (quirks & SERIO_QUIRK_UNLOCK) + i8042_unlock = true; + if (quirks & SERIO_QUIRK_PROBE_DEFER) + i8042_probe_defer = true; + /* Honor module parameter when value is not default */ + if (i8042_reset == I8042_RESET_DEFAULT) { + if (quirks & SERIO_QUIRK_RESET_ALWAYS) + i8042_reset = I8042_RESET_ALWAYS; + if (quirks & SERIO_QUIRK_RESET_NEVER) + i8042_reset = I8042_RESET_NEVER; + } + if (quirks & SERIO_QUIRK_DIECT) + i8042_direct = true; + if (quirks & SERIO_QUIRK_DUMBKBD) + i8042_dumbkbd = true; + if (quirks & SERIO_QUIRK_NOLOOP) + i8042_noloop = true; + if (quirks & SERIO_QUIRK_NOTIMEOUT) + i8042_notimeout = true; + if (quirks & SERIO_QUIRK_KBDRESET) + i8042_kbdreset = true; + if (quirks & SERIO_QUIRK_DRITEK) + i8042_dritek = true; +#ifdef CONFIG_PNP + if (quirks & SERIO_QUIRK_NOPNP) + i8042_nopnp = true; +#endif +} +#else +static inline void i8042_check_quirks(void) {} +#endif + static int __init i8042_platform_init(void) { int retval; @@ -1297,45 +1551,17 @@ static int __init i8042_platform_init(void) i8042_kbd_irq = I8042_MAP_IRQ(1); i8042_aux_irq = I8042_MAP_IRQ(12); - retval = i8042_pnp_init(); - if (retval) - return retval; - #if defined(__ia64__) - i8042_reset = I8042_RESET_ALWAYS; + i8042_reset = I8042_RESET_ALWAYS; #endif -#ifdef CONFIG_X86 - /* Honor module parameter when value is not default */ - if (i8042_reset == I8042_RESET_DEFAULT) { - if (dmi_check_system(i8042_dmi_reset_table)) - i8042_reset = I8042_RESET_ALWAYS; - - if (dmi_check_system(i8042_dmi_noselftest_table)) - i8042_reset = I8042_RESET_NEVER; - } - - if (dmi_check_system(i8042_dmi_noloop_table)) - i8042_noloop = true; - - if (dmi_check_system(i8042_dmi_nomux_table)) - i8042_nomux = true; - - if (dmi_check_system(i8042_dmi_forcemux_table)) - i8042_nomux = false; - - if (dmi_check_system(i8042_dmi_notimeout_table)) - i8042_notimeout = true; - - if (dmi_check_system(i8042_dmi_dritek_table)) - i8042_dritek = true; - - if (dmi_check_system(i8042_dmi_kbdreset_table)) - i8042_kbdreset = true; + i8042_check_quirks(); - if (dmi_check_system(i8042_dmi_probe_defer_table)) - i8042_probe_defer = true; + retval = i8042_pnp_init(); + if (retval) + return retval; +#ifdef CONFIG_X86 /* * A20 was already enabled during early kernel init. But some buggy * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index a9f68f535b727..8648b4c46138b 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1543,8 +1543,6 @@ static int i8042_probe(struct platform_device *dev) { int error; - i8042_platform_device = dev; - if (i8042_reset == I8042_RESET_ALWAYS) { error = i8042_controller_selftest(); if (error) @@ -1582,7 +1580,6 @@ static int i8042_probe(struct platform_device *dev) i8042_free_aux_ports(); /* in case KBD failed but AUX not */ i8042_free_irqs(); i8042_controller_reset(false); - i8042_platform_device = NULL; return error; } @@ -1592,7 +1589,6 @@ static int i8042_remove(struct platform_device *dev) i8042_unregister_ports(); i8042_free_irqs(); i8042_controller_reset(false); - i8042_platform_device = NULL; return 0; } diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index c09aefa2661dd..ca9cee5851a05 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1219,14 +1219,12 @@ static int elants_i2c_power_on(struct elants_data *ts) if (IS_ERR_OR_NULL(ts->reset_gpio)) return 0; - gpiod_set_value_cansleep(ts->reset_gpio, 1); - error = regulator_enable(ts->vcc33); if (error) { dev_err(&ts->client->dev, "failed to enable vcc33 regulator: %d\n", error); - goto release_reset_gpio; + return error; } error = regulator_enable(ts->vccio); @@ -1235,7 +1233,7 @@ static int elants_i2c_power_on(struct elants_data *ts) "failed to enable vccio regulator: %d\n", error); regulator_disable(ts->vcc33); - goto release_reset_gpio; + return error; } /* @@ -1244,7 +1242,6 @@ static int elants_i2c_power_on(struct elants_data *ts) */ udelay(ELAN_POWERON_DELAY_USEC); -release_reset_gpio: gpiod_set_value_cansleep(ts->reset_gpio, 0); if (error) return error; @@ -1352,7 +1349,7 @@ static int elants_i2c_probe(struct i2c_client *client, return error; } - ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW); + ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ts->reset_gpio)) { error = PTR_ERR(ts->reset_gpio); diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 5fc789f717c8a..b7f87ad4b9a95 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -154,6 +154,7 @@ static const struct goodix_chip_data gt9x_chip_data = { static const struct goodix_chip_id goodix_chip_ids[] = { { .id = "1151", .data = >1x_chip_data }, + { .id = "1158", .data = >1x_chip_data }, { .id = "5663", .data = >1x_chip_data }, { .id = "5688", .data = >1x_chip_data }, { .id = "917S", .data = >1x_chip_data }, @@ -1058,6 +1059,7 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); +retry_read_config: /* Read configuration and apply touchscreen parameters */ goodix_read_config(ts); @@ -1065,6 +1067,16 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) touchscreen_parse_properties(ts->input_dev, true, &ts->prop); if (!ts->prop.max_x || !ts->prop.max_y || !ts->max_touch_num) { + if (!ts->reset_controller_at_probe && + ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) { + dev_info(&ts->client->dev, "Config not set, resetting controller\n"); + /* Retry after a controller reset */ + ts->reset_controller_at_probe = true; + error = goodix_reset(ts); + if (error) + return error; + goto retry_read_config; + } dev_err(&ts->client->dev, "Invalid config (%d, %d, %d), using defaults\n", ts->prop.max_x, ts->prop.max_y, ts->max_touch_num); @@ -1385,6 +1397,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match); #ifdef CONFIG_OF static const struct of_device_id goodix_of_match[] = { { .compatible = "goodix,gt1151" }, + { .compatible = "goodix,gt1158" }, { .compatible = "goodix,gt5663" }, { .compatible = "goodix,gt5688" }, { .compatible = "goodix,gt911" }, diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c index f67efdd040b24..43fcc8c84e2f5 100644 --- a/drivers/input/touchscreen/melfas_mip4.c +++ b/drivers/input/touchscreen/melfas_mip4.c @@ -1453,7 +1453,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id) "ce", GPIOD_OUT_LOW); if (IS_ERR(ts->gpio_ce)) { error = PTR_ERR(ts->gpio_ce); - if (error != EPROBE_DEFER) + if (error != -EPROBE_DEFER) dev_err(&client->dev, "Failed to get gpio: %d\n", error); return error; diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 4d2d22a869773..bdb3e2c3ab797 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -210,12 +210,14 @@ static int raydium_i2c_send(struct i2c_client *client, error = raydium_i2c_xfer(client, addr, xfer, ARRAY_SIZE(xfer)); if (likely(!error)) - return 0; + goto out; msleep(RM_RETRY_DELAY_MS); } while (++tries < RM_MAX_RETRIES); dev_err(&client->dev, "%s failed: %d\n", __func__, error); +out: + kfree(tx_buf); return error; } diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index f6fae64861ce8..27cc5f03611cb 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -20,13 +20,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node) { size_t i; struct qcom_icc_node *qn; + struct qcom_icc_provider *qp; qn = node->data; + qp = to_qcom_provider(node->provider); for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { qn->sum_avg[i] = 0; qn->max_peak[i] = 0; } + + for (i = 0; i < qn->num_bcms; i++) + qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); } EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate); @@ -44,10 +49,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, { size_t i; struct qcom_icc_node *qn; - struct qcom_icc_provider *qp; qn = node->data; - qp = to_qcom_provider(node->provider); if (!tag) tag = QCOM_ICC_TAG_ALWAYS; @@ -67,9 +70,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, *agg_avg += avg_bw; *agg_peak = max_t(u32, *agg_peak, peak_bw); - for (i = 0; i < qn->num_bcms; i++) - qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); - return 0; } EXPORT_SYMBOL_GPL(qcom_icc_aggregate); diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c index c76b2c7f9b106..b936196c229c8 100644 --- a/drivers/interconnect/qcom/sm8150.c +++ b/drivers/interconnect/qcom/sm8150.c @@ -627,7 +627,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8150", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c index cc558fec74e38..40820043c8d36 100644 --- a/drivers/interconnect/qcom/sm8250.c +++ b/drivers/interconnect/qcom/sm8250.c @@ -643,7 +643,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8250", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index e988f6f198c5c..ce822347f7470 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -85,6 +85,10 @@ #define ACPI_DEVFLAG_ATSDIS 0x10000000 #define LOOP_TIMEOUT 2000000 + +#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ + | ((dev & 0x1f) << 3) | (fn & 0x7)) + /* * ACPI table definitions * @@ -3046,24 +3050,32 @@ static int __init parse_amd_iommu_options(char *str) static int __init parse_ivrs_ioapic(char *str) { - unsigned int bus, dev, fn; - int ret, id, i; - u16 devid; + u32 seg = 0, bus, dev, fn; + int id, i; + u32 devid; - ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); + if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) + goto found; - if (ret != 4) { - pr_err("Invalid command line: ivrs_ioapic%s\n", str); - return 1; + if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { + pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n", + str, id, seg, bus, dev, fn); + goto found; } + pr_err("Invalid command line: ivrs_ioapic%s\n", str); + return 1; + +found: if (early_ioapic_map_size == EARLY_MAP_SIZE) { pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", str); return 1; } - devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); cmdline_maps = true; i = early_ioapic_map_size++; @@ -3076,24 +3088,32 @@ static int __init parse_ivrs_ioapic(char *str) static int __init parse_ivrs_hpet(char *str) { - unsigned int bus, dev, fn; - int ret, id, i; - u16 devid; + u32 seg = 0, bus, dev, fn; + int id, i; + u32 devid; - ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); + if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) + goto found; - if (ret != 4) { - pr_err("Invalid command line: ivrs_hpet%s\n", str); - return 1; + if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { + pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n", + str, id, seg, bus, dev, fn); + goto found; } + pr_err("Invalid command line: ivrs_hpet%s\n", str); + return 1; + +found: if (early_hpet_map_size == EARLY_MAP_SIZE) { pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", str); return 1; } - devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); cmdline_maps = true; i = early_hpet_map_size++; @@ -3106,17 +3126,37 @@ static int __init parse_ivrs_hpet(char *str) static int __init parse_ivrs_acpihid(char *str) { - u32 bus, dev, fn; - char *hid, *uid, *p; + u32 seg = 0, bus, dev, fn; + char *hid, *uid, *p, *addr; char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; - int ret, i; + int i; - ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); - if (ret != 4) { - pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); - return 1; + addr = strchr(str, '@'); + if (!addr) { + if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 || + sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) { + pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n", + str, acpiid, seg, bus, dev, fn); + goto found; + } + goto not_found; } + /* We have the '@', make it the terminator to get just the acpiid */ + *addr++ = 0; + + if (sscanf(str, "=%s", acpiid) != 1) + goto not_found; + + if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 || + sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4) + goto found; + +not_found: + pr_err("Invalid command line: ivrs_acpihid%s\n", str); + return 1; + +found: p = acpiid; hid = strsep(&p, ":"); uid = p; @@ -3126,11 +3166,17 @@ static int __init parse_ivrs_acpihid(char *str) return 1; } + /* + * Ignore leading zeroes after ':', so e.g., AMDI0095:00 + * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match + */ + while (*uid == '0' && *(uid + 1)) + uid++; + i = early_acpihid_map_size++; memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); - early_acpihid_map[i].devid = - ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); early_acpihid_map[i].cmd_line = true; return 1; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 200cf5da5e0ad..f216a86d9c817 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -923,7 +923,8 @@ static void build_completion_wait(struct iommu_cmd *cmd, memset(cmd, 0, sizeof(*cmd)); cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; cmd->data[1] = upper_32_bits(paddr); - cmd->data[2] = data; + cmd->data[2] = lower_32_bits(data); + cmd->data[3] = upper_32_bits(data); CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); } diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index fb61bdca4c2c1..16776e3c6eabf 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -587,6 +587,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) put_device_state(dev_state); out: + pci_dev_put(pdev); return ret; } diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index b9a974d978311..25689bdf812e5 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -1122,7 +1122,7 @@ static int fsl_pamu_probe(struct platform_device *pdev) ret = create_csd(ppaact_phys, mem_size, csd_port_id); if (ret) { dev_err(dev, "could not create coherence subdomain\n"); - return ret; + goto error; } } diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 0bc497f4cb9f0..a27765a7f6b75 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -816,6 +816,7 @@ int __init dmar_dev_scope_init(void) info = dmar_alloc_pci_notify_info(dev, BUS_NOTIFY_ADD_DEVICE); if (!info) { + pci_dev_put(dev); return dmar_dev_scope_status; } else { dmar_pci_bus_add_dev(info); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 477dde39823c7..47666c9b4ba11 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -560,14 +560,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain, return !(addr_width < BITS_PER_LONG && pfn >> addr_width); } +/* + * Calculate the Supported Adjusted Guest Address Widths of an IOMMU. + * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of + * the returned SAGAW. + */ +static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) +{ + unsigned long fl_sagaw, sl_sagaw; + + fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); + sl_sagaw = cap_sagaw(iommu->cap); + + /* Second level only. */ + if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) + return sl_sagaw; + + /* First level only. */ + if (!ecap_slts(iommu->ecap)) + return fl_sagaw; + + return fl_sagaw & sl_sagaw; +} + static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) { unsigned long sagaw; int agaw = -1; - sagaw = cap_sagaw(iommu->cap); - for (agaw = width_to_agaw(max_gaw); - agaw >= 0; agaw--) { + sagaw = __iommu_calculate_sagaw(iommu); + for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { if (test_bit(agaw, &sagaw)) break; } @@ -2824,6 +2846,7 @@ static int __init si_domain_init(int hw) if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); + si_domain = NULL; return -EFAULT; } @@ -3483,6 +3506,10 @@ static int __init init_dmars(void) disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } + if (si_domain) { + domain_exit(si_domain); + si_domain = NULL; + } kfree(g_iommus); @@ -4866,8 +4893,10 @@ static inline bool has_external_pci(void) struct pci_dev *pdev = NULL; for_each_pci_dev(pdev) - if (pdev->external_facing) + if (pdev->external_facing) { + pci_dev_put(pdev); return true; + } return false; } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index fb911b6c418f2..86fd49ae7f612 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -669,7 +669,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, * Since it is a second level only translation setup, we should * set SRE bit as well (addresses are expected to be GPAs). */ - if (pasid != PASID_RID2PASID) + if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap)) pasid_set_sre(pte); pasid_set_present(pte); pasid_flush_caches(iommu, pte, pasid, did); @@ -704,7 +704,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, * We should set SRE bit as well since the addresses are expected * to be GPAs. */ - pasid_set_sre(pte); + if (ecap_srs(iommu->ecap)) + pasid_set_sre(pte); pasid_set_present(pte); pasid_flush_caches(iommu, pte, pasid, did); diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 82ddfe9170d4d..2abbdd71d8d99 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -618,18 +618,34 @@ static int mtk_iommu_probe(struct platform_device *pdev) ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, dev_name(&pdev->dev)); if (ret) - return ret; + goto out_clk_unprepare; iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); ret = iommu_device_register(&data->iommu); if (ret) - return ret; + goto out_sysfs_remove; - if (!iommu_present(&platform_bus_type)) - bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); + if (!iommu_present(&platform_bus_type)) { + ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); + if (ret) + goto out_dev_unreg; + } - return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); + ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); + if (ret) + goto out_bus_set_null; + return ret; + +out_bus_set_null: + bus_set_iommu(&platform_bus_type, NULL); +out_dev_unreg: + iommu_device_unregister(&data->iommu); +out_sysfs_remove: + iommu_device_sysfs_remove(&data->iommu); +out_clk_unprepare: + clk_disable_unprepare(data->bclk); + return ret; } static int mtk_iommu_remove(struct platform_device *pdev) diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index a99afb5d9011c..259f65291d909 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -32,12 +32,12 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj) ssize_t bytes; \ const char *str = "%20s: %08x\n"; \ const int maxcol = 32; \ - bytes = snprintf(p, maxcol, str, __stringify(name), \ + if (len < maxcol) \ + goto out; \ + bytes = scnprintf(p, maxcol, str, __stringify(name), \ iommu_read_reg(obj, MMU_##name)); \ p += bytes; \ len -= bytes; \ - if (len < maxcol) \ - goto out; \ } while (0) static ssize_t diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index ea6db13419165..65aa30d55d3ab 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -28,6 +28,7 @@ #include #define IOMMU_RESET_REG 0x010 +#define IOMMU_RESET_RELEASE_ALL 0xffffffff #define IOMMU_ENABLE_REG 0x020 #define IOMMU_ENABLE_ENABLE BIT(0) @@ -271,7 +272,7 @@ static u32 sun50i_mk_pte(phys_addr_t page, int prot) enum sun50i_iommu_aci aci; u32 flags = 0; - if (prot & (IOMMU_READ | IOMMU_WRITE)) + if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE)) aci = SUN50I_IOMMU_ACI_RD_WR; else if (prot & IOMMU_READ) aci = SUN50I_IOMMU_ACI_RD; @@ -512,7 +513,7 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain, sun50i_iommu_free_page_table(iommu, drop_pt); } - sun50i_table_flush(sun50i_domain, page_table, PT_SIZE); + sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES); sun50i_table_flush(sun50i_domain, dte_addr, 1); return page_table; @@ -602,7 +603,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type) struct sun50i_iommu_domain *sun50i_domain; if (type != IOMMU_DOMAIN_DMA && - type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_UNMANAGED) return NULL; @@ -880,8 +880,8 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) { + u32 status, l1_status, l2_status, resets; struct sun50i_iommu *iommu = dev_id; - u32 status; spin_lock(&iommu->iommu_lock); @@ -891,6 +891,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) return IRQ_NONE; } + l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG); + l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG); + if (status & IOMMU_INT_INVALID_L2PG) sun50i_iommu_handle_pt_irq(iommu, IOMMU_INT_ERR_ADDR_L2_REG, @@ -904,8 +907,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) iommu_write(iommu, IOMMU_INT_CLR_REG, status); - iommu_write(iommu, IOMMU_RESET_REG, ~status); - iommu_write(iommu, IOMMU_RESET_REG, status); + resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK; + iommu_write(iommu, IOMMU_RESET_REG, ~resets); + iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL); spin_unlock(&iommu->iommu_lock); diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c index 1337ceceb59b9..8be7d136c3bf8 100644 --- a/drivers/irqchip/irq-gic-pm.c +++ b/drivers/irqchip/irq-gic-pm.c @@ -104,7 +104,7 @@ static int gic_probe(struct platform_device *pdev) pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) goto rpm_disable; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 42b295337bafb..d8cb5bcd6b10e 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1615,7 +1615,7 @@ static int its_select_cpu(struct irq_data *d, cpu = cpumask_pick_least_loaded(d, tmpmask); } else { - cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask); + cpumask_copy(tmpmask, aff_mask); /* If we cannot cross sockets, limit the search to that node */ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 7013a3f084299..4c5b6772562dc 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -3219,6 +3219,7 @@ static int hfcm_l1callback(struct dchannel *dch, u_int cmd) { struct hfc_multi *hc = dch->hw; + struct sk_buff_head free_queue; u_long flags; switch (cmd) { @@ -3247,6 +3248,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd) l1_event(dch->l1, HW_POWERUP_IND); break; case HW_DEACT_REQ: + __skb_queue_head_init(&free_queue); /* start deactivation */ spin_lock_irqsave(&hc->lock, flags); if (hc->ctype == HFC_TYPE_E1) { @@ -3266,20 +3268,21 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd) plxsd_checksync(hc, 0); } } - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) del_timer(&dch->timer); spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); break; case HW_POWERUP_REQ: spin_lock_irqsave(&hc->lock, flags); @@ -3386,6 +3389,9 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) case PH_DEACTIVATE_REQ: test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); if (dch->dev.D.protocol != ISDN_P_TE_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); spin_lock_irqsave(&hc->lock, flags); if (debug & DEBUG_HFCMULTI_MSG) printk(KERN_DEBUG @@ -3407,14 +3413,14 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) /* deactivate */ dch->state = 1; } - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); @@ -3426,6 +3432,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) #endif ret = 0; spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); } else ret = l1_event(dch->l1, hh->prim); break; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index af17459c1a5c0..eba58b99cd29d 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1617,16 +1617,19 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); spin_lock_irqsave(&hc->lock, flags); if (hc->hw.protocol == ISDN_P_NT_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); /* prepare deactivation */ Write_hfc(hc, HFCPCI_STATES, 0x40); - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); @@ -1639,10 +1642,12 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) hc->hw.mst_m &= ~HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); ret = 0; + spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); } else { ret = l1_event(dch->l1, hh->prim); + spin_unlock_irqrestore(&hc->lock, flags); } - spin_unlock_irqrestore(&hc->lock, flags); break; } if (!ret) diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index cd5642cef01fd..e8b37bd5e34a3 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -326,20 +326,24 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); if (hw->protocol == ISDN_P_NT_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT); spin_lock_irqsave(&hw->lock, flags); - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); spin_unlock_irqrestore(&hw->lock, flags); + __skb_queue_purge(&free_queue); #ifdef FIXME if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) dchannel_sched_event(&hc->dch, D_CLEARBUSY); @@ -1330,7 +1334,7 @@ tx_iso_complete(struct urb *urb) printk("\n"); } - dev_kfree_skb(tx_skb); + dev_consume_skb_irq(tx_skb); tx_skb = NULL; if (fifo->dch && get_next_dframe(fifo->dch)) tx_skb = fifo->dch->tx_skb; diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index a52f275f82634..f8447135a9022 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -956,7 +956,7 @@ nj_release(struct tiger_hw *card) } if (card->irq > 0) free_irq(card->irq, card); - if (card->isac.dch.dev.dev.class) + if (device_is_registered(&card->isac.dch.dev.dev)) mISDN_unregister_device(&card->isac.dch.dev); for (i = 0; i < 2; i++) { diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index a41b4b2645941..90ee56d07a6e9 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c @@ -222,7 +222,7 @@ mISDN_register_device(struct mISDNdevice *dev, err = get_free_devid(); if (err < 0) - goto error1; + return err; dev->id = err; device_initialize(&dev->dev); @@ -233,11 +233,12 @@ mISDN_register_device(struct mISDNdevice *dev, if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_register %s %d\n", dev_name(&dev->dev), dev->id); + dev->dev.class = &mISDN_class; + err = create_stack(dev); if (err) goto error1; - dev->dev.class = &mISDN_class; dev->dev.platform_data = dev; dev->dev.parent = parent; dev_set_drvdata(&dev->dev, dev); @@ -249,8 +250,8 @@ mISDN_register_device(struct mISDNdevice *dev, error3: delete_stack(dev); - return err; error1: + put_device(&dev->dev); return err; } diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index c3b2c99b5cd5c..cfbcd9e973c2e 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -77,6 +77,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) if (!entry) return -ENOMEM; + INIT_LIST_HEAD(&entry->list); entry->elem = elem; entry->dev.class = elements_class; @@ -107,7 +108,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) device_unregister(&entry->dev); return ret; err1: - kfree(entry); + put_device(&entry->dev); return ret; } EXPORT_SYMBOL(mISDN_dsp_element_register); diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h index 7ea10db20e3a6..48133d0228120 100644 --- a/drivers/isdn/mISDN/l1oip.h +++ b/drivers/isdn/mISDN/l1oip.h @@ -59,6 +59,7 @@ struct l1oip { int bundle; /* bundle channels in one frm */ int codec; /* codec to use for transmis. */ int limit; /* limit number of bchannels */ + bool shutdown; /* if card is released */ /* timer */ struct timer_list keep_tl; diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index b57dcb834594d..aec4f2a69c3bd 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, p = frame; /* restart timer */ - if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ)) + if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown) mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ); else hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ; @@ -601,7 +601,9 @@ l1oip_socket_parse(struct l1oip *hc, struct sockaddr_in *sin, u8 *buf, int len) goto multiframe; /* restart timer */ - if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) { + if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || + !hc->timeout_on) && + !hc->shutdown) { hc->timeout_on = 1; mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ); } else /* only adjust timer */ @@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc) { int ch; - if (timer_pending(&hc->keep_tl)) - del_timer(&hc->keep_tl); + hc->shutdown = true; - if (timer_pending(&hc->timeout_tl)) - del_timer(&hc->timeout_tl); + del_timer_sync(&hc->keep_tl); + del_timer_sync(&hc->timeout_tl); cancel_work_sync(&hc->workq); diff --git a/drivers/leds/leds-lm3601x.c b/drivers/leds/leds-lm3601x.c index d0e1d4814042e..3d12727482017 100644 --- a/drivers/leds/leds-lm3601x.c +++ b/drivers/leds/leds-lm3601x.c @@ -444,8 +444,6 @@ static int lm3601x_remove(struct i2c_client *client) { struct lm3601x_led *led = i2c_get_clientdata(client); - mutex_destroy(&led->lock); - return regmap_update_bits(led->regmap, LM3601X_ENABLE_REG, LM3601X_ENABLE_MASK, LM3601X_MODE_STANDBY); diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c index d4759db002c60..defe65f51fa22 100644 --- a/drivers/macintosh/macio-adb.c +++ b/drivers/macintosh/macio-adb.c @@ -106,6 +106,10 @@ int macio_init(void) return -ENXIO; } adb = ioremap(r.start, sizeof(struct adb_regs)); + if (!adb) { + of_node_put(adbs); + return -ENOMEM; + } out_8(&adb->ctrl.r, 0); out_8(&adb->intr.r, 0); diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 49af60bdac928..7db2e23a5ac81 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -425,7 +425,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, if (of_device_register(&dev->ofdev) != 0) { printk(KERN_DEBUG"macio: device registration error for %s!\n", dev_name(&dev->ofdev.dev)); - kfree(dev); + put_device(&dev->ofdev.dev); return NULL; } diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index bee33abb53084..e913ed1e34c63 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -632,15 +632,15 @@ static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg) rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), DMA_TO_DEVICE); - if (rc < 0) - return rc; + if (!rc) + return -EIO; rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), DMA_FROM_DEVICE); - if (rc < 0) { + if (!rc) { dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), DMA_TO_DEVICE); - return rc; + return -EIO; } return 0; diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index f44079d62b1a7..527204c6d5cd0 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -493,6 +493,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, ret = device_register(&ipi_mbox->dev); if (ret) { dev_err(dev, "Failed to register ipi mbox dev.\n"); + put_device(&ipi_mbox->dev); return ret; } mdev = &ipi_mbox->dev; @@ -619,7 +620,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) ipi_mbox = &pdata->ipi_mboxes[i]; if (ipi_mbox->dev.parent) { mbox_controller_unregister(&ipi_mbox->mbox); - device_unregister(&ipi_mbox->dev); + if (device_is_registered(&ipi_mbox->dev)) + device_unregister(&ipi_mbox->dev); } } } diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index 38cc8340e817d..8b8cd751fe9a9 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c @@ -71,8 +71,10 @@ static int mcb_probe(struct device *dev) get_device(dev); ret = mdrv->probe(mdev, found_id); - if (ret) + if (ret) { module_put(carrier_mod); + put_device(dev); + } return ret; } diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 0266bfddfbe27..aa6938da0db85 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -108,7 +108,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, return 0; err: - mcb_free_dev(mdev); + put_device(&mdev->dev); return ret; } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index a878b959fbcdd..3aa73da2c67bd 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -119,6 +119,53 @@ static void __update_writeback_rate(struct cached_dev *dc) dc->writeback_rate_target = target; } +static bool idle_counter_exceeded(struct cache_set *c) +{ + int counter, dev_nr; + + /* + * If c->idle_counter is overflow (idel for really long time), + * reset as 0 and not set maximum rate this time for code + * simplicity. + */ + counter = atomic_inc_return(&c->idle_counter); + if (counter <= 0) { + atomic_set(&c->idle_counter, 0); + return false; + } + + dev_nr = atomic_read(&c->attached_dev_nr); + if (dev_nr == 0) + return false; + + /* + * c->idle_counter is increased by writeback thread of all + * attached backing devices, in order to represent a rough + * time period, counter should be divided by dev_nr. + * Otherwise the idle time cannot be larger with more backing + * device attached. + * The following calculation equals to checking + * (counter / dev_nr) < (dev_nr * 6) + */ + if (counter < (dev_nr * dev_nr * 6)) + return false; + + return true; +} + +/* + * Idle_counter is increased every time when update_writeback_rate() is + * called. If all backing devices attached to the same cache set have + * identical dc->writeback_rate_update_seconds values, it is about 6 + * rounds of update_writeback_rate() on each backing device before + * c->at_max_writeback_rate is set to 1, and then max wrteback rate set + * to each dc->writeback_rate.rate. + * In order to avoid extra locking cost for counting exact dirty cached + * devices number, c->attached_dev_nr is used to calculate the idle + * throushold. It might be bigger if not all cached device are in write- + * back mode, but it still works well with limited extra rounds of + * update_writeback_rate(). + */ static bool set_at_max_writeback_rate(struct cache_set *c, struct cached_dev *dc) { @@ -129,21 +176,8 @@ static bool set_at_max_writeback_rate(struct cache_set *c, /* Don't set max writeback rate if gc is running */ if (!c->gc_mark_valid) return false; - /* - * Idle_counter is increased everytime when update_writeback_rate() is - * called. If all backing devices attached to the same cache set have - * identical dc->writeback_rate_update_seconds values, it is about 6 - * rounds of update_writeback_rate() on each backing device before - * c->at_max_writeback_rate is set to 1, and then max wrteback rate set - * to each dc->writeback_rate.rate. - * In order to avoid extra locking cost for counting exact dirty cached - * devices number, c->attached_dev_nr is used to calculate the idle - * throushold. It might be bigger if not all cached device are in write- - * back mode, but it still works well with limited extra rounds of - * update_writeback_rate(). - */ - if (atomic_inc_return(&c->idle_counter) < - atomic_read(&c->attached_dev_nr) * 6) + + if (!idle_counter_exceeded(c)) return false; if (atomic_read(&c->at_max_writeback_rate) != 1) @@ -157,13 +191,10 @@ static bool set_at_max_writeback_rate(struct cache_set *c, dc->writeback_rate_change = 0; /* - * Check c->idle_counter and c->at_max_writeback_rate agagain in case - * new I/O arrives during before set_at_max_writeback_rate() returns. - * Then the writeback rate is set to 1, and its new value should be - * decided via __update_writeback_rate(). + * In case new I/O arrives during before + * set_at_max_writeback_rate() returns. */ - if ((atomic_read(&c->idle_counter) < - atomic_read(&c->attached_dev_nr) * 6) || + if (!idle_counter_exceeded(c) || !atomic_read(&c->at_max_writeback_rate)) return false; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index af6d4f898e4c1..2ecd0db0f2945 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -551,11 +551,13 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, return r; } -static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd) +static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd, + bool destroy_bm) { dm_sm_destroy(cmd->metadata_sm); dm_tm_destroy(cmd->tm); - dm_block_manager_destroy(cmd->bm); + if (destroy_bm) + dm_block_manager_destroy(cmd->bm); } typedef unsigned long (*flags_mutator)(unsigned long); @@ -826,7 +828,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, cmd2 = lookup(bdev); if (cmd2) { mutex_unlock(&table_lock); - __destroy_persistent_data_objects(cmd); + __destroy_persistent_data_objects(cmd, true); kfree(cmd); return cmd2; } @@ -874,7 +876,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd) mutex_unlock(&table_lock); if (!cmd->fail_io) - __destroy_persistent_data_objects(cmd); + __destroy_persistent_data_objects(cmd, true); kfree(cmd); } } @@ -1808,14 +1810,52 @@ int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result) int dm_cache_metadata_abort(struct dm_cache_metadata *cmd) { - int r; + int r = -EINVAL; + struct dm_block_manager *old_bm = NULL, *new_bm = NULL; + + /* fail_io is double-checked with cmd->root_lock held below */ + if (unlikely(cmd->fail_io)) + return r; + + /* + * Replacement block manager (new_bm) is created and old_bm destroyed outside of + * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of + * shrinker associated with the block manager's bufio client vs cmd root_lock). + * - must take shrinker_rwsem without holding cmd->root_lock + */ + new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, + CACHE_MAX_CONCURRENT_LOCKS); WRITE_LOCK(cmd); - __destroy_persistent_data_objects(cmd); - r = __create_persistent_data_objects(cmd, false); + if (cmd->fail_io) { + WRITE_UNLOCK(cmd); + goto out; + } + + __destroy_persistent_data_objects(cmd, false); + old_bm = cmd->bm; + if (IS_ERR(new_bm)) { + DMERR("could not create block manager during abort"); + cmd->bm = NULL; + r = PTR_ERR(new_bm); + goto out_unlock; + } + + cmd->bm = new_bm; + r = __open_or_format_metadata(cmd, false); + if (r) { + cmd->bm = NULL; + goto out_unlock; + } + new_bm = NULL; +out_unlock: if (r) cmd->fail_io = true; WRITE_UNLOCK(cmd); + dm_block_manager_destroy(old_bm); +out: + if (new_bm && !IS_ERR(new_bm)) + dm_block_manager_destroy(new_bm); return r; } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 4bc453f5bbaa6..9b2aec3098010 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -985,16 +985,16 @@ static void abort_transaction(struct cache *cache) if (get_cache_mode(cache) >= CM_READ_ONLY) return; - if (dm_cache_metadata_set_needs_check(cache->cmd)) { - DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); - set_cache_mode(cache, CM_FAIL); - } - DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); if (dm_cache_metadata_abort(cache->cmd)) { DMERR("%s: failed to abort metadata transaction", dev_name); set_cache_mode(cache, CM_FAIL); } + + if (dm_cache_metadata_set_needs_check(cache->cmd)) { + DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); + set_cache_mode(cache, CM_FAIL); + } } static void metadata_operation_failed(struct cache *cache, const char *op, int r) @@ -1965,6 +1965,7 @@ static void destroy(struct cache *cache) if (cache->prison) dm_bio_prison_destroy_v2(cache->prison); + cancel_delayed_work_sync(&cache->waker); if (cache->wq) destroy_workqueue(cache->wq); diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index bdb255edc2004..0d38ad6235348 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -1966,6 +1966,7 @@ static void clone_dtr(struct dm_target *ti) mempool_exit(&clone->hydration_pool); dm_kcopyd_client_destroy(clone->kcopyd_client); + cancel_delayed_work_sync(&clone->waker); destroy_workqueue(clone->wq); hash_table_exit(clone); dm_clone_metadata_close(clone->cmd); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 835b1f3464d06..a1c4cc48bf034 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -254,6 +254,7 @@ struct dm_integrity_c { struct completion crypto_backoff; + bool wrote_to_journal; bool journal_uptodate; bool just_formatted; bool recalculate_flag; @@ -2256,6 +2257,8 @@ static void integrity_commit(struct work_struct *w) if (!commit_sections) goto release_flush_bios; + ic->wrote_to_journal = true; + i = commit_start; for (n = 0; n < commit_sections; n++) { for (j = 0; j < ic->journal_section_entries; j++) { @@ -2470,10 +2473,6 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; - /* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) - return; - spin_lock_irq(&ic->endio_wait.lock); write_start = ic->committed_section; write_sections = ic->n_committed_sections; @@ -2980,10 +2979,17 @@ static void dm_integrity_postsuspend(struct dm_target *ti) drain_workqueue(ic->commit_wq); if (ic->mode == 'J') { - if (ic->meta_dev) - queue_work(ic->writer_wq, &ic->writer_work); + queue_work(ic->writer_wq, &ic->writer_work); drain_workqueue(ic->writer_wq); dm_integrity_flush_buffers(ic, true); + if (ic->wrote_to_journal) { + init_journal(ic, ic->free_section, + ic->journal_sections - ic->free_section, ic->commit_seq); + if (ic->free_section) { + init_journal(ic, 0, ic->free_section, + next_commit_seq(ic->commit_seq)); + } + } } if (ic->mode == 'B') { @@ -3011,6 +3017,8 @@ static void dm_integrity_resume(struct dm_target *ti) DEBUG_print("resume\n"); + ic->wrote_to_journal = false; + if (ic->provided_data_sectors != old_provided_data_sectors) { if (ic->provided_data_sectors > old_provided_data_sectors && ic->mode == 'B' && @@ -4380,6 +4388,8 @@ static void dm_integrity_dtr(struct dm_target *ti) BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); BUG_ON(!list_empty(&ic->wait_list)); + if (ic->mode == 'B') + cancel_delayed_work_sync(&ic->bitmap_flush_work); if (ic->metadata_wq) destroy_workqueue(ic->metadata_wq); if (ic->wait_wq) diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index b839705654d4e..20171c9d8952e 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -573,7 +573,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param) size_t *needed = needed_param; *needed += sizeof(struct dm_target_versions); - *needed += strlen(tt->name); + *needed += strlen(tt->name) + 1; *needed += ALIGN_MASK; } @@ -638,7 +638,7 @@ static int __list_versions(struct dm_ioctl *param, size_t param_size, const char iter_info.old_vers = NULL; iter_info.vers = vers; iter_info.flags = 0; - iter_info.end = (char *)vers+len; + iter_info.end = (char *)vers + needed; /* * Now loop through filling out the names & versions. diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 842d79e5ea3aa..8f4d149bb99d7 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -701,6 +701,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd) goto bad_cleanup_data_sm; } + /* + * For pool metadata opening process, root setting is redundant + * because it will be set again in __begin_transaction(). But dm + * pool aborting process really needs to get last transaction's + * root to avoid accessing broken btree. + */ + pmd->root = le64_to_cpu(disk_super->data_mapping_root); + pmd->details_root = le64_to_cpu(disk_super->device_details_root); + __setup_btree_details(pmd); dm_bm_unlock(sblock); @@ -753,13 +762,15 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f return r; } -static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) +static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd, + bool destroy_bm) { dm_sm_destroy(pmd->data_sm); dm_sm_destroy(pmd->metadata_sm); dm_tm_destroy(pmd->nb_tm); dm_tm_destroy(pmd->tm); - dm_block_manager_destroy(pmd->bm); + if (destroy_bm) + dm_block_manager_destroy(pmd->bm); } static int __begin_transaction(struct dm_pool_metadata *pmd) @@ -966,7 +977,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) } pmd_write_unlock(pmd); if (!pmd->fail_io) - __destroy_persistent_data_objects(pmd); + __destroy_persistent_data_objects(pmd, true); kfree(pmd); return 0; @@ -1873,19 +1884,52 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd) int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) { int r = -EINVAL; + struct dm_block_manager *old_bm = NULL, *new_bm = NULL; + + /* fail_io is double-checked with pmd->root_lock held below */ + if (unlikely(pmd->fail_io)) + return r; + + /* + * Replacement block manager (new_bm) is created and old_bm destroyed outside of + * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of + * shrinker associated with the block manager's bufio client vs pmd root_lock). + * - must take shrinker_rwsem without holding pmd->root_lock + */ + new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, + THIN_MAX_CONCURRENT_LOCKS); pmd_write_lock(pmd); - if (pmd->fail_io) + if (pmd->fail_io) { + pmd_write_unlock(pmd); goto out; + } __set_abort_with_changes_flags(pmd); - __destroy_persistent_data_objects(pmd); - r = __create_persistent_data_objects(pmd, false); + __destroy_persistent_data_objects(pmd, false); + old_bm = pmd->bm; + if (IS_ERR(new_bm)) { + DMERR("could not create block manager during abort"); + pmd->bm = NULL; + r = PTR_ERR(new_bm); + goto out_unlock; + } + + pmd->bm = new_bm; + r = __open_or_format_metadata(pmd, false); + if (r) { + pmd->bm = NULL; + goto out_unlock; + } + new_bm = NULL; +out_unlock: if (r) pmd->fail_io = true; - -out: pmd_write_unlock(pmd); + dm_block_manager_destroy(old_bm); +out: + if (new_bm && !IS_ERR(new_bm)) + dm_block_manager_destroy(new_bm); return r; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index a196d7cb51bd2..f3c519e18a12f 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2907,6 +2907,8 @@ static void __pool_destroy(struct pool *pool) dm_bio_prison_destroy(pool->prison); dm_kcopyd_client_destroy(pool->copier); + cancel_delayed_work_sync(&pool->waker); + cancel_delayed_work_sync(&pool->no_space_timeout); if (pool->wq) destroy_workqueue(pool->wq); @@ -3566,20 +3568,28 @@ static int pool_preresume(struct dm_target *ti) */ r = bind_control_target(pool, ti); if (r) - return r; + goto out; r = maybe_resize_data_dev(ti, &need_commit1); if (r) - return r; + goto out; r = maybe_resize_metadata_dev(ti, &need_commit2); if (r) - return r; + goto out; if (need_commit1 || need_commit2) (void) commit(pool); +out: + /* + * When a thin-pool is PM_FAIL, it cannot be rebuilt if + * bio is in deferred list. Therefore need to return 0 + * to allow pool_resume() to flush IO. + */ + if (r && get_pool_mode(pool) == PM_FAIL) + r = 0; - return 0; + return r; } static void pool_suspend_active_thins(struct pool *pool) diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index d377ea0609255..20afc0aec1778 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap) sb = kmap_atomic(bitmap->storage.sb_page); pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); - pr_debug(" version: %d\n", le32_to_cpu(sb->version)); + pr_debug(" version: %u\n", le32_to_cpu(sb->version)); pr_debug(" uuid: %08x.%08x.%08x.%08x\n", le32_to_cpu(*(__le32 *)(sb->uuid+0)), le32_to_cpu(*(__le32 *)(sb->uuid+4)), @@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap) pr_debug("events cleared: %llu\n", (unsigned long long) le64_to_cpu(sb->events_cleared)); pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); - pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); - pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); + pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize)); + pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep)); pr_debug(" sync size: %llu KB\n", (unsigned long long)le64_to_cpu(sb->sync_size)/2); - pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); + pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind)); kunmap_atomic(sb); } @@ -2106,7 +2106,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, bytes = DIV_ROUND_UP(chunks, 8); if (!bitmap->mddev->bitmap_info.external) bytes += sizeof(bitmap_super_t); - } while (bytes > (space << 9)); + } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) < + (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1)); } else chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; @@ -2151,7 +2152,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, bitmap->counts.missing_pages = pages; bitmap->counts.chunkshift = chunkshift; bitmap->counts.chunks = chunks; - bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + + bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift + BITMAP_BLOCK_SHIFT); blocks = min(old_counts.chunks << old_counts.chunkshift, @@ -2177,8 +2178,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, bitmap->counts.missing_pages = old_counts.pages; bitmap->counts.chunkshift = old_counts.chunkshift; bitmap->counts.chunks = old_counts.chunks; - bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + - BITMAP_BLOCK_SHIFT); + bitmap->mddev->bitmap_info.chunksize = + 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT); blocks = old_counts.chunks << old_counts.chunkshift; pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); break; @@ -2196,20 +2197,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (set) { bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); - if (*bmc_new == 0) { - /* need to set on-disk bits too. */ - sector_t end = block + new_blocks; - sector_t start = block >> chunkshift; - start <<= chunkshift; - while (start < end) { - md_bitmap_file_set_bit(bitmap, block); - start += 1 << chunkshift; + if (bmc_new) { + if (*bmc_new == 0) { + /* need to set on-disk bits too. */ + sector_t end = block + new_blocks; + sector_t start = block >> chunkshift; + + start <<= chunkshift; + while (start < end) { + md_bitmap_file_set_bit(bitmap, block); + start += 1 << chunkshift; + } + *bmc_new = 2; + md_bitmap_count_page(&bitmap->counts, block, 1); + md_bitmap_set_pending(&bitmap->counts, block); } - *bmc_new = 2; - md_bitmap_count_page(&bitmap->counts, block, 1); - md_bitmap_set_pending(&bitmap->counts, block); + *bmc_new |= NEEDED_MASK; } - *bmc_new |= NEEDED_MASK; if (new_blocks < old_blocks) old_blocks = new_blocks; } @@ -2516,6 +2520,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len) if (csize < 512 || !is_power_of_2(csize)) return -EINVAL; + if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE * + sizeof(((bitmap_super_t *)0)->chunksize)))) + return -EOVERFLOW; mddev->bitmap_info.chunksize = csize; return len; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 0043dec37a870..3038e7ecb7e16 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -555,13 +555,14 @@ static void md_end_flush(struct bio *bio) struct md_rdev *rdev = bio->bi_private; struct mddev *mddev = rdev->mddev; + bio_put(bio); + rdev_dec_pending(rdev, mddev); if (atomic_dec_and_test(&mddev->flush_pending)) { /* The pre-request flush has finished */ queue_work(md_wq, &mddev->flush_work); } - bio_put(bio); } static void md_submit_flush_data(struct work_struct *ws); @@ -966,10 +967,12 @@ static void super_written(struct bio *bio) } else clear_bit(LastDev, &rdev->flags); + bio_put(bio); + + rdev_dec_pending(rdev, mddev); + if (atomic_dec_and_test(&mddev->pending_writes)) wake_up(&mddev->sb_wait); - rdev_dec_pending(rdev, mddev); - bio_put(bio); } void md_super_write(struct mddev *mddev, struct md_rdev *rdev, diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a4c0cafa6010a..a20332e755e81 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -48,7 +48,7 @@ static void dump_zones(struct mddev *mddev) int len = 0; for (k = 0; k < conf->strip_zone[j].nb_dev; k++) - len += snprintf(line+len, 200-len, "%s%s", k?"/":"", + len += scnprintf(line+len, 200-len, "%s%s", k?"/":"", bdevname(conf->devlist[j*raid_disks + k]->bdev, b)); pr_debug("md: zone%d=[%s]\n", j, line); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fb31e5dd54a6e..6b5cc3f59fb39 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3115,6 +3115,7 @@ static int raid1_run(struct mddev *mddev) * RAID1 needs at least one disk in active */ if (conf->raid_disks - mddev->degraded < 1) { + md_unregister_thread(&conf->thread); ret = -EINVAL; goto abort; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 01c7edf329367..9f114b9d8dc6b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -36,6 +36,7 @@ */ #include +#include #include #include #include @@ -3936,7 +3937,7 @@ static void handle_stripe_fill(struct stripe_head *sh, * back cache (prexor with orig_page, and then xor with * page) in the read path */ - if (s->injournal && s->failed) { + if (s->to_read && s->injournal && s->failed) { if (test_bit(STRIPE_R5C_CACHING, &sh->state)) r5c_make_stripe_write_out(sh); goto out; @@ -6519,7 +6520,18 @@ static void raid5d(struct md_thread *thread) spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); spin_lock_irq(&conf->device_lock); + + /* + * Waiting on MD_SB_CHANGE_PENDING below may deadlock + * seeing md_check_recovery() is needed to clear + * the flag when using mdmon. + */ + continue; } + + wait_event_lock_irq(mddev->sb_wait, + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), + conf->device_lock); } pr_debug("%d stripes handled\n", handled); diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c index 2d95e16cd2489..f66699d5dc66e 100644 --- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c +++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c @@ -44,6 +44,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec) uint8_t *cec_message = cros_ec->event_data.data.cec_message; unsigned int len = cros_ec->event_size; + if (len > CEC_MAX_MSG_SIZE) + len = CEC_MAX_MSG_SIZE; cros_ec_cec->rx_msg.len = len; memcpy(cros_ec_cec->rx_msg.msg, cec_message, len); diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c index 028a09a7531ef..102f1af01000a 100644 --- a/drivers/media/cec/platform/s5p/s5p_cec.c +++ b/drivers/media/cec/platform/s5p/s5p_cec.c @@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n"); cec->rx = STATE_BUSY; cec->msg.len = status >> 24; + if (cec->msg.len > CEC_MAX_MSG_SIZE) + cec->msg.len = CEC_MAX_MSG_SIZE; cec->msg.rx_status = CEC_RX_STATUS_OK; s5p_cec_get_rx_buf(cec, cec->msg.len, cec->msg.msg); diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 72350343a56a6..3bafde87a1257 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -787,7 +787,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, num_buffers = max_t(unsigned int, *count, q->min_buffers_needed); num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); + /* + * Set this now to ensure that drivers see the correct q->memory value + * in the queue_setup op. + */ + mutex_lock(&q->mmap_lock); q->memory = memory; + mutex_unlock(&q->mmap_lock); /* * Ask the driver how many buffers and planes per buffer it requires. @@ -796,22 +802,27 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, plane_sizes, q->alloc_devs); if (ret) - return ret; + goto error; /* Check that driver has set sane values */ - if (WARN_ON(!num_planes)) - return -EINVAL; + if (WARN_ON(!num_planes)) { + ret = -EINVAL; + goto error; + } for (i = 0; i < num_planes; i++) - if (WARN_ON(!plane_sizes[i])) - return -EINVAL; + if (WARN_ON(!plane_sizes[i])) { + ret = -EINVAL; + goto error; + } /* Finally, allocate buffers and video memory */ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); if (allocated_buffers == 0) { dprintk(q, 1, "memory allocation failed\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error; } /* @@ -852,7 +863,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, if (ret < 0) { /* * Note: __vb2_queue_free() will subtract 'allocated_buffers' - * from q->num_buffers. + * from q->num_buffers and it will reset q->memory to + * VB2_MEMORY_UNKNOWN. */ __vb2_queue_free(q, allocated_buffers); mutex_unlock(&q->mmap_lock); @@ -868,6 +880,12 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, q->waiting_for_buffers = !q->is_output; return 0; + +error: + mutex_lock(&q->mmap_lock); + q->memory = VB2_MEMORY_UNKNOWN; + mutex_unlock(&q->mmap_lock); + return ret; } EXPORT_SYMBOL_GPL(vb2_core_reqbufs); @@ -878,6 +896,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, { unsigned int num_planes = 0, num_buffers, allocated_buffers; unsigned plane_sizes[VB2_MAX_PLANES] = { }; + bool no_previous_buffers = !q->num_buffers; int ret; if (q->num_buffers == VB2_MAX_FRAME) { @@ -885,13 +904,19 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, return -ENOBUFS; } - if (!q->num_buffers) { + if (no_previous_buffers) { if (q->waiting_in_dqbuf && *count) { dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); return -EBUSY; } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); + /* + * Set this now to ensure that drivers see the correct q->memory + * value in the queue_setup op. + */ + mutex_lock(&q->mmap_lock); q->memory = memory; + mutex_unlock(&q->mmap_lock); q->waiting_for_buffers = !q->is_output; } else { if (q->memory != memory) { @@ -914,14 +939,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, plane_sizes, q->alloc_devs); if (ret) - return ret; + goto error; /* Finally, allocate buffers and video memory */ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); if (allocated_buffers == 0) { dprintk(q, 1, "memory allocation failed\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error; } /* @@ -952,7 +978,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, if (ret < 0) { /* * Note: __vb2_queue_free() will subtract 'allocated_buffers' - * from q->num_buffers. + * from q->num_buffers and it will reset q->memory to + * VB2_MEMORY_UNKNOWN. */ __vb2_queue_free(q, allocated_buffers); mutex_unlock(&q->mmap_lock); @@ -967,6 +994,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, *count = allocated_buffers; return 0; + +error: + if (no_previous_buffers) { + mutex_lock(&q->mmap_lock); + q->memory = VB2_MEMORY_UNKNOWN; + mutex_unlock(&q->mmap_lock); + } + return ret; } EXPORT_SYMBOL_GPL(vb2_core_create_bufs); @@ -2120,6 +2155,22 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, struct vb2_buffer *vb; unsigned int buffer, plane; + /* + * Sanity checks to ensure the lock is held, MEMORY_MMAP is + * used and fileio isn't active. + */ + lockdep_assert_held(&q->mmap_lock); + + if (q->memory != VB2_MEMORY_MMAP) { + dprintk(q, 1, "queue is not currently set up for mmap\n"); + return -EINVAL; + } + + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + /* * Go over all buffers and their planes, comparing the given offset * with an offset assigned to each plane. If a match is found, @@ -2219,11 +2270,6 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) int ret; unsigned long length; - if (q->memory != VB2_MEMORY_MMAP) { - dprintk(q, 1, "queue is not currently set up for mmap\n"); - return -EINVAL; - } - /* * Check memory area access mode. */ @@ -2245,14 +2291,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) mutex_lock(&q->mmap_lock); - if (vb2_fileio_is_active(q)) { - dprintk(q, 1, "mmap: file io in progress\n"); - ret = -EBUSY; - goto unlock; - } - /* - * Find the plane corresponding to the offset passed by userspace. + * Find the plane corresponding to the offset passed by userspace. This + * will return an error if not MEMORY_MMAP or file I/O is in progress. */ ret = __find_plane_by_offset(q, off, &buffer, &plane); if (ret) @@ -2305,22 +2346,25 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, void *vaddr; int ret; - if (q->memory != VB2_MEMORY_MMAP) { - dprintk(q, 1, "queue is not currently set up for mmap\n"); - return -EINVAL; - } + mutex_lock(&q->mmap_lock); /* - * Find the plane corresponding to the offset passed by userspace. + * Find the plane corresponding to the offset passed by userspace. This + * will return an error if not MEMORY_MMAP or file I/O is in progress. */ ret = __find_plane_by_offset(q, off, &buffer, &plane); if (ret) - return ret; + goto unlock; vb = q->bufs[buffer]; vaddr = vb2_plane_vaddr(vb, plane); + mutex_unlock(&q->mmap_lock); return vaddr ? (unsigned long)vaddr : -EINVAL; + +unlock: + mutex_unlock(&q->mmap_lock); + return ret; } EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); #endif diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index e58cb8434dafe..12b7f698f5623 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -800,6 +800,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file) if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; + if (dmxdev->exit) { + mutex_unlock(&dmxdev->mutex); + return -ENODEV; + } + for (i = 0; i < dmxdev->filternum; i++) if (dmxdev->filter[i].state == DMXDEV_STATE_FREE) break; @@ -1458,7 +1463,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init); void dvb_dmxdev_release(struct dmxdev *dmxdev) { + mutex_lock(&dmxdev->mutex); dmxdev->exit = 1; + mutex_unlock(&dmxdev->mutex); + if (dmxdev->dvbdev->users > 1) { wait_event(dmxdev->dvbdev->wait_queue, dmxdev->dvbdev->users == 1); diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index cfc27629444f3..fd476536d32ed 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -157,7 +157,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca) { unsigned int i; - dvb_free_device(ca->dvbdev); + dvb_device_put(ca->dvbdev); for (i = 0; i < ca->slot_count; i++) vfree(ca->slot_info[i].rx_buffer.data); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 06ea30a689d75..b04638321b75b 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -135,7 +135,7 @@ static void __dvb_frontend_free(struct dvb_frontend *fe) struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fepriv) - dvb_free_device(fepriv->dvbdev); + dvb_device_put(fepriv->dvbdev); dvb_frontend_invoke_release(fe, fe->ops.release); @@ -2961,6 +2961,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb, .name = fe->ops.info.name, #endif }; + int ret; dev_dbg(dvb->device, "%s:\n", __func__); @@ -2994,8 +2995,13 @@ int dvb_register_frontend(struct dvb_adapter *dvb, "DVB: registering adapter %i frontend %i (%s)...\n", fe->dvb->num, fe->id, fe->ops.info.name); - dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template, + ret = dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template, fe, DVB_DEVICE_FRONTEND, 0); + if (ret) { + dvb_frontend_put(fe); + mutex_unlock(&frontend_mutex); + return ret; + } /* * Initialize the cache to the proper values according with the diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 6974f17315294..1331f2c2237e6 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -358,6 +358,12 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req) int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) { + struct vb2_queue *q = &ctx->vb_q; + + if (b->index >= q->num_buffers) { + dprintk(1, "[%s] buffer index out of range\n", ctx->name); + return -EINVAL; + } vb2_core_querybuf(&ctx->vb_q, b->index, b); dprintk(3, "[%s] index=%d\n", ctx->name, b->index); return 0; @@ -382,8 +388,13 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp) int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) { + struct vb2_queue *q = &ctx->vb_q; int ret; + if (b->index >= q->num_buffers) { + dprintk(1, "[%s] buffer index out of range\n", ctx->name); + return -EINVAL; + } ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL); if (ret) { dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index ec9ebff28552b..fea2d82300b0d 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -107,7 +107,7 @@ static int dvb_device_open(struct inode *inode, struct file *file) new_fops = fops_get(dvbdev->fops); if (!new_fops) goto fail; - file->private_data = dvbdev; + file->private_data = dvb_device_get(dvbdev); replace_fops(file, new_fops); if (file->f_op->open) err = file->f_op->open(inode, file); @@ -171,6 +171,9 @@ int dvb_generic_release(struct inode *inode, struct file *file) } dvbdev->users++; + + dvb_device_put(dvbdev); + return 0; } EXPORT_SYMBOL(dvb_generic_release); @@ -342,6 +345,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev, GFP_KERNEL); if (!dvbdev->pads) { kfree(dvbdev->entity); + dvbdev->entity = NULL; return -ENOMEM; } } @@ -488,6 +492,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, } memcpy(dvbdev, template, sizeof(struct dvb_device)); + kref_init(&dvbdev->ref); dvbdev->type = type; dvbdev->id = id; dvbdev->adapter = adap; @@ -517,7 +522,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, #endif dvbdev->minor = minor; - dvb_minors[minor] = dvbdev; + dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads); @@ -557,6 +562,7 @@ void dvb_remove_device(struct dvb_device *dvbdev) down_write(&minor_rwsem); dvb_minors[dvbdev->minor] = NULL; + dvb_device_put(dvbdev); up_write(&minor_rwsem); dvb_media_device_free(dvbdev); @@ -568,21 +574,34 @@ void dvb_remove_device(struct dvb_device *dvbdev) EXPORT_SYMBOL(dvb_remove_device); -void dvb_free_device(struct dvb_device *dvbdev) +static void dvb_free_device(struct kref *ref) { - if (!dvbdev) - return; + struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref); kfree (dvbdev->fops); kfree (dvbdev); } -EXPORT_SYMBOL(dvb_free_device); + + +struct dvb_device *dvb_device_get(struct dvb_device *dvbdev) +{ + kref_get(&dvbdev->ref); + return dvbdev; +} +EXPORT_SYMBOL(dvb_device_get); + + +void dvb_device_put(struct dvb_device *dvbdev) +{ + if (dvbdev) + kref_put(&dvbdev->ref, dvb_free_device); +} void dvb_unregister_device(struct dvb_device *dvbdev) { dvb_remove_device(dvbdev); - dvb_free_device(dvbdev); + dvb_device_put(dvbdev); } EXPORT_SYMBOL(dvb_unregister_device); diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c index da0ff7b44da41..68b92b4419cff 100644 --- a/drivers/media/dvb-frontends/bcm3510.c +++ b/drivers/media/dvb-frontends/bcm3510.c @@ -649,6 +649,7 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe) deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size); if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) { err("firmware download failed: %d\n",ret); + release_firmware(fw); return ret; } i += 4 + len; diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index a57470bf71bf3..2134e25096aac 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -6672,7 +6672,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr) static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct drxk_state *state = fe->demodulator_priv; - u16 err; + u16 err = 0; dprintk(1, "\n"); diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c index 3d54a0ec86afd..3ae1f3a2f1420 100644 --- a/drivers/media/dvb-frontends/stv0288.c +++ b/drivers/media/dvb-frontends/stv0288.c @@ -440,9 +440,8 @@ static int stv0288_set_frontend(struct dvb_frontend *fe) struct stv0288_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; - char tm; - unsigned char tda[3]; - u8 reg, time_out = 0; + u8 tda[3], reg, time_out = 0; + s8 tm; dprintk("%s : FE_SET_FRONTEND\n", __func__); diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c index 19c74db0649fc..f55322eebf6d0 100644 --- a/drivers/media/i2c/ad5820.c +++ b/drivers/media/i2c/ad5820.c @@ -329,18 +329,18 @@ static int ad5820_probe(struct i2c_client *client, ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL); if (ret < 0) - goto cleanup2; + goto clean_mutex; ret = v4l2_async_register_subdev(&coil->subdev); if (ret < 0) - goto cleanup; + goto clean_entity; return ret; -cleanup2: - mutex_destroy(&coil->power_lock); -cleanup: +clean_entity: media_entity_cleanup(&coil->subdev.entity); +clean_mutex: + mutex_destroy(&coil->power_lock); return ret; } diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c index 58489ea0c1da1..7cf2271866d05 100644 --- a/drivers/media/pci/cx88/cx88-vbi.c +++ b/drivers/media/pci/cx88/cx88-vbi.c @@ -144,11 +144,10 @@ static int buffer_prepare(struct vb2_buffer *vb) return -EINVAL; vb2_set_plane_payload(vb, 0, size); - cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, - 0, VBI_LINE_LENGTH * lines, - VBI_LINE_LENGTH, 0, - lines); - return 0; + return cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, + 0, VBI_LINE_LENGTH * lines, + VBI_LINE_LENGTH, 0, + lines); } static void buffer_finish(struct vb2_buffer *vb) diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 8cffdacf60079..e5adffa3a99a8 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -431,6 +431,7 @@ static int queue_setup(struct vb2_queue *q, static int buffer_prepare(struct vb2_buffer *vb) { + int ret; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct cx8800_dev *dev = vb->vb2_queue->drv_priv; struct cx88_core *core = dev->core; @@ -445,35 +446,35 @@ static int buffer_prepare(struct vb2_buffer *vb) switch (core->field) { case V4L2_FIELD_TOP: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, 0, UNSET, - buf->bpl, 0, core->height); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, 0, UNSET, + buf->bpl, 0, core->height); break; case V4L2_FIELD_BOTTOM: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, UNSET, 0, - buf->bpl, 0, core->height); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, UNSET, 0, + buf->bpl, 0, core->height); break; case V4L2_FIELD_SEQ_TB: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, - 0, buf->bpl * (core->height >> 1), - buf->bpl, 0, - core->height >> 1); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, + 0, buf->bpl * (core->height >> 1), + buf->bpl, 0, + core->height >> 1); break; case V4L2_FIELD_SEQ_BT: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, - buf->bpl * (core->height >> 1), 0, - buf->bpl, 0, - core->height >> 1); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, + buf->bpl * (core->height >> 1), 0, + buf->bpl, 0, + core->height >> 1); break; case V4L2_FIELD_INTERLACED: default: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, 0, buf->bpl, - buf->bpl, buf->bpl, - core->height >> 1); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, 0, buf->bpl, + buf->bpl, buf->bpl, + core->height >> 1); break; } dprintk(2, @@ -481,7 +482,7 @@ static int buffer_prepare(struct vb2_buffer *vb) buf, buf->vb.vb2_buf.index, __func__, core->width, core->height, dev->fmt->depth, dev->fmt->fourcc, (unsigned long)buf->risc.dma); - return 0; + return ret; } static void buffer_finish(struct vb2_buffer *vb) diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 6c08b77bfd477..3cadfbe60fe6e 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1270,7 +1270,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev, if (saa7164_dev_setup(dev) < 0) { err = -EINVAL; - goto fail_free; + goto fail_dev; } /* print pci info */ @@ -1438,6 +1438,8 @@ static int saa7164_initdev(struct pci_dev *pci_dev, fail_irq: saa7164_dev_unregister(dev); +fail_dev: + pci_disable_device(pci_dev); fail_free: v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c index d497afc7e7b78..4ebb1e020fad5 100644 --- a/drivers/media/pci/solo6x10/solo6x10-core.c +++ b/drivers/media/pci/solo6x10/solo6x10-core.c @@ -420,6 +420,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev) solo_dev->nr_chans); if (device_register(dev)) { + put_device(dev); dev->parent = NULL; return -ENOMEM; } diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index 159c9de857885..6ffa12e83e42a 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -852,7 +852,7 @@ static void coda_setup_iram(struct coda_ctx *ctx) /* Only H.264BP and H.263P3 are considered */ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64); iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64); - if (!iram_info->buf_dbk_c_use) + if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use) goto out; iram_info->axi_sram_use |= dbk_bits; @@ -876,7 +876,7 @@ static void coda_setup_iram(struct coda_ctx *ctx) iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128); iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128); - if (!iram_info->buf_dbk_c_use) + if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use) goto out; iram_info->axi_sram_use |= dbk_bits; @@ -1082,10 +1082,16 @@ static int coda_start_encoding(struct coda_ctx *ctx) } if (dst_fourcc == V4L2_PIX_FMT_JPEG) { - if (!ctx->params.jpeg_qmat_tab[0]) + if (!ctx->params.jpeg_qmat_tab[0]) { ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL); - if (!ctx->params.jpeg_qmat_tab[1]) + if (!ctx->params.jpeg_qmat_tab[0]) + return -ENOMEM; + } + if (!ctx->params.jpeg_qmat_tab[1]) { ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL); + if (!ctx->params.jpeg_qmat_tab[1]) + return -ENOMEM; + } coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality); } diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c index a72f4655e5ad5..b7bf529f18f77 100644 --- a/drivers/media/platform/coda/coda-jpeg.c +++ b/drivers/media/platform/coda/coda-jpeg.c @@ -1052,10 +1052,16 @@ static int coda9_jpeg_start_encoding(struct coda_ctx *ctx) v4l2_err(&dev->v4l2_dev, "error loading Huffman tables\n"); return ret; } - if (!ctx->params.jpeg_qmat_tab[0]) + if (!ctx->params.jpeg_qmat_tab[0]) { ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL); - if (!ctx->params.jpeg_qmat_tab[1]) + if (!ctx->params.jpeg_qmat_tab[0]) + return -ENOMEM; + } + if (!ctx->params.jpeg_qmat_tab[1]) { ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL); + if (!ctx->params.jpeg_qmat_tab[1]) + return -ENOMEM; + } coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality); return 0; diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index 08d1f39a914c5..60b28e6f739e3 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c @@ -1174,7 +1174,7 @@ int __init fimc_register_driver(void) return platform_driver_register(&fimc_driver); } -void __exit fimc_unregister_driver(void) +void fimc_unregister_driver(void) { platform_driver_unregister(&fimc_driver); } diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index dc2a144cd29b6..b52d2203eac57 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -213,6 +213,7 @@ static int fimc_is_register_subdevs(struct fimc_is *is) if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) { of_node_put(child); + of_node_put(i2c_bus); return ret; } index++; diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index a9a8f0433fb2c..bd37011fb671e 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -401,6 +401,7 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd, int index = fmd->num_sensors; struct fimc_source_info *pd = &fmd->sensor[index].pdata; struct device_node *rem, *np; + struct v4l2_async_subdev *asd; struct v4l2_fwnode_endpoint endpoint = { .bus_type = 0 }; int ret; @@ -418,10 +419,10 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd, pd->mux_id = (endpoint.base.port - 1) & 0x1; rem = of_graph_get_remote_port_parent(ep); - of_node_put(ep); if (rem == NULL) { v4l2_info(&fmd->v4l2_dev, "Remote device at %pOF not found\n", ep); + of_node_put(ep); return 0; } @@ -450,6 +451,7 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd, * checking parent's node name. */ np = of_get_parent(rem); + of_node_put(rem); if (of_node_name_eq(np, "i2c-isp")) pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK; @@ -458,20 +460,19 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd, of_node_put(np); if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) { - of_node_put(rem); + of_node_put(ep); return -EINVAL; } - fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_FWNODE; - fmd->sensor[index].asd.match.fwnode = of_fwnode_handle(rem); + asd = v4l2_async_notifier_add_fwnode_remote_subdev( + &fmd->subdev_notifier, of_fwnode_handle(ep), sizeof(*asd)); - ret = v4l2_async_notifier_add_subdev(&fmd->subdev_notifier, - &fmd->sensor[index].asd); - if (ret) { - of_node_put(rem); - return ret; - } + of_node_put(ep); + + if (IS_ERR(asd)) + return PTR_ERR(asd); + fmd->sensor[index].asd = asd; fmd->num_sensors++; return 0; @@ -1377,8 +1378,7 @@ static int subdev_notifier_bound(struct v4l2_async_notifier *notifier, /* Find platform data for this sensor subdev */ for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++) - if (fmd->sensor[i].asd.match.fwnode == - of_fwnode_handle(subdev->dev->of_node)) + if (fmd->sensor[i].asd == asd) si = &fmd->sensor[i]; if (si == NULL) @@ -1470,7 +1470,7 @@ static int fimc_md_probe(struct platform_device *pdev) pinctrl = devm_pinctrl_get(dev); if (IS_ERR(pinctrl)) { ret = PTR_ERR(pinctrl); - if (ret != EPROBE_DEFER) + if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get pinctrl: %d\n", ret); goto err_clk; } @@ -1582,7 +1582,11 @@ static int __init fimc_md_init(void) if (ret) return ret; - return platform_driver_register(&fimc_md_driver); + ret = platform_driver_register(&fimc_md_driver); + if (ret) + fimc_unregister_driver(); + + return ret; } static void __exit fimc_md_exit(void) diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h index 9447fafe23c6e..a3876d668ea6e 100644 --- a/drivers/media/platform/exynos4-is/media-dev.h +++ b/drivers/media/platform/exynos4-is/media-dev.h @@ -83,7 +83,7 @@ struct fimc_camclk_info { */ struct fimc_sensor_info { struct fimc_source_info pdata; - struct v4l2_async_subdev asd; + struct v4l2_async_subdev *asd; struct v4l2_subdev *subdev; struct fimc_dev *host; }; diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index 15965e63cb619..9333a7a33d4da 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -444,7 +444,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) ret = media_pipeline_start(&vdev->entity, &video->pipe); if (ret < 0) - return ret; + goto flush_buffers; ret = video_check_format(video); if (ret < 0) @@ -473,6 +473,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) error: media_pipeline_stop(&vdev->entity); +flush_buffers: video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); return ret; diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c index 710f9a2b132b0..f7de02352f1b0 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.c +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -764,8 +764,8 @@ static int vcodec_domains_get(struct venus_core *core) for (i = 0; i < res->vcodec_pmdomains_num; i++) { pd = dev_pm_domain_attach_by_name(dev, res->vcodec_pmdomains[i]); - if (IS_ERR(pd)) - return PTR_ERR(pd); + if (IS_ERR_OR_NULL(pd)) + return PTR_ERR(pd) ? : -ENODATA; core->pmdomains[i] = pd; } diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index ea13170a6a2cf..de34a87d1130e 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -158,6 +158,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) else return NULL; fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) + return NULL; } pixmp->width = clamp(pixmp->width, frame_width_min(inst), diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index f336a95432732..6cbec3bbfce66 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1584,8 +1584,18 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = { .port_num = MFC_NUM_PORTS_V7, .buf_size = &buf_size_v7, .fw_name[0] = "s5p-mfc-v7.fw", - .clk_names = {"mfc", "sclk_mfc"}, - .num_clocks = 2, + .clk_names = {"mfc"}, + .num_clocks = 1, +}; + +static struct s5p_mfc_variant mfc_drvdata_v7_3250 = { + .version = MFC_VERSION_V7, + .version_bit = MFC_V7_BIT, + .port_num = MFC_NUM_PORTS_V7, + .buf_size = &buf_size_v7, + .fw_name[0] = "s5p-mfc-v7.fw", + .clk_names = {"mfc", "sclk_mfc"}, + .num_clocks = 2, }; static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = { @@ -1655,6 +1665,9 @@ static const struct of_device_id exynos_mfc_match[] = { }, { .compatible = "samsung,mfc-v7", .data = &mfc_drvdata_v7, + }, { + .compatible = "samsung,exynos3250-mfc", + .data = &mfc_drvdata_v7_3250, }, { .compatible = "samsung,mfc-v8", .data = &mfc_drvdata_v8, diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index da138c314963a..58822ec5370e2 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -468,8 +468,10 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx) s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); /* Wait until instance is returned or timeout occurred */ if (s5p_mfc_wait_for_done_ctx(ctx, - S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) + S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)){ + clear_work_bit_irqsave(ctx); mfc_err("Err returning instance\n"); + } /* Free resources */ s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index acc2217dd7e90..62a1ad347fa75 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -1218,6 +1218,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) unsigned long mb_y_addr, mb_c_addr; int slice_type; unsigned int strm_size; + bool src_ready; slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); @@ -1257,7 +1258,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) } } } - if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) { + if (ctx->src_queue_cnt > 0 && (ctx->state == MFCINST_RUNNING || + ctx->state == MFCINST_FINISHING)) { mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); if (mb_entry->flags & MFC_BUF_FLAG_USED) { @@ -1288,7 +1290,13 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size); vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); } - if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) + + src_ready = true; + if (ctx->state == MFCINST_RUNNING && ctx->src_queue_cnt == 0) + src_ready = false; + if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0) + src_ready = false; + if (!src_ready || ctx->dst_queue_cnt == 0) clear_work_bit(ctx); return 0; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index a1453053e31ab..ef8169f6c428c 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -1060,7 +1060,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) } /* aspect ratio VUI */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 5); reg |= ((p_h264->vui_sar & 0x1) << 5); writel(reg, mfc_regs->e_h264_options); @@ -1083,7 +1083,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) /* intra picture period for H.264 open GOP */ /* control */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 4); reg |= ((p_h264->open_gop & 0x1) << 4); writel(reg, mfc_regs->e_h264_options); @@ -1097,23 +1097,23 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) } /* 'WEIGHTED_BI_PREDICTION' for B is disable */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x3 << 9); writel(reg, mfc_regs->e_h264_options); /* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 14); writel(reg, mfc_regs->e_h264_options); /* ASO */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 6); reg |= ((p_h264->aso & 0x1) << 6); writel(reg, mfc_regs->e_h264_options); /* hier qp enable */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 8); reg |= ((p_h264->open_gop & 0x1) << 8); writel(reg, mfc_regs->e_h264_options); @@ -1134,7 +1134,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) writel(reg, mfc_regs->e_h264_num_t_layer); /* frame packing SEI generation */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 25); reg |= ((p_h264->sei_frame_packing & 0x1) << 25); writel(reg, mfc_regs->e_h264_options); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index dbe7788083a4b..b7e0ec265b70c 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -937,6 +937,7 @@ static int configure_channels(struct c8sectpfei *fei) if (ret) { dev_err(fei->dev, "configure_memdma_and_inputblock failed\n"); + of_node_put(child); goto err_unmap; } index++; diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c index cc2856efea594..f2b0c490187cd 100644 --- a/drivers/media/platform/xilinx/xilinx-vipp.c +++ b/drivers/media/platform/xilinx/xilinx-vipp.c @@ -472,7 +472,7 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev) { struct device_node *ports; struct device_node *port; - int ret; + int ret = 0; ports = of_get_child_by_name(xdev->dev->of_node, "ports"); if (ports == NULL) { @@ -482,13 +482,14 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev) for_each_child_of_node(ports, port) { ret = xvip_graph_dma_init_one(xdev, port); - if (ret < 0) { + if (ret) { of_node_put(port); - return ret; + break; } } - return 0; + of_node_put(ports); + return ret; } static void xvip_graph_cleanup(struct xvip_composite_device *xdev) diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 3f8634a465730..1365ae732b799 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -733,8 +733,10 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, /* start radio */ retval = si470x_start_usb(radio); - if (retval < 0) + if (retval < 0 && !radio->int_in_running) goto err_buf; + else if (retval < 0) /* in case of radio->int_in_running == 1 */ + goto err_all; /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index bc9ac6002e259..98a38755c694e 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -646,15 +646,14 @@ static int send_packet(struct imon_context *ictx) pr_err_ratelimited("error submitting urb(%d)\n", retval); } else { /* Wait for transmission to complete (or abort) */ - mutex_unlock(&ictx->lock); retval = wait_for_completion_interruptible( &ictx->tx.finished); if (retval) { usb_kill_urb(ictx->tx_urb); pr_err_ratelimited("task interrupted\n"); } - mutex_lock(&ictx->lock); + ictx->tx.busy = false; retval = ictx->tx.status; if (retval) pr_err_ratelimited("packet tx failed (%d)\n", retval); @@ -958,7 +957,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf, if (ictx->disconnected) return -ENODEV; - mutex_lock(&ictx->lock); + if (mutex_lock_interruptible(&ictx->lock)) + return -ERESTARTSYS; if (!ictx->dev_present_intf0) { pr_err_ratelimited("no iMON device present\n"); diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index de4cf6eb5258b..142319c48405d 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1077,7 +1077,7 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout) struct mceusb_dev *ir = dev->priv; unsigned int units; - units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT); + units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT); cmdbuf[2] = units >> 8; cmdbuf[3] = units; diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c index fc64d0c8492a9..3c281265a9ecc 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c +++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c @@ -456,26 +456,20 @@ static int vidtv_bridge_dvb_init(struct vidtv_dvb *dvb) for (j = j - 1; j >= 0; --j) dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->dmx_fe[j]); -fail_dmx_dev: dvb_dmxdev_release(&dvb->dmx_dev); -fail_dmx: +fail_dmx_dev: dvb_dmx_release(&dvb->demux); +fail_dmx: +fail_demod_probe: + for (i = i - 1; i >= 0; --i) { + dvb_unregister_frontend(dvb->fe[i]); fail_fe: - for (j = i; j >= 0; --j) - dvb_unregister_frontend(dvb->fe[j]); + dvb_module_release(dvb->i2c_client_tuner[i]); fail_tuner_probe: - for (j = i; j >= 0; --j) - if (dvb->i2c_client_tuner[j]) - dvb_module_release(dvb->i2c_client_tuner[j]); - -fail_demod_probe: - for (j = i; j >= 0; --j) - if (dvb->i2c_client_demod[j]) - dvb_module_release(dvb->i2c_client_demod[j]); - + dvb_module_release(dvb->i2c_client_demod[i]); + } fail_adapter: dvb_unregister_adapter(&dvb->adapter); - fail_i2c: i2c_del_adapter(&dvb->i2c_adapter); diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c index 4b0ae6f51d765..857529ce3638a 100644 --- a/drivers/media/test-drivers/vimc/vimc-core.c +++ b/drivers/media/test-drivers/vimc/vimc-core.c @@ -357,7 +357,7 @@ static int __init vimc_init(void) if (ret) { dev_err(&vimc_pdev.dev, "platform driver registration failed (err=%d)\n", ret); - platform_driver_unregister(&vimc_pdrv); + platform_device_unregister(&vimc_pdev); return ret; } diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c index 1e356dc65d318..761d2abd40067 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.c +++ b/drivers/media/test-drivers/vivid/vivid-core.c @@ -330,6 +330,28 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a return vivid_vid_out_g_fbuf(file, fh, a); } +/* + * Only support the framebuffer of one of the vivid instances. + * Anything else is rejected. + */ +bool vivid_validate_fb(const struct v4l2_framebuffer *a) +{ + struct vivid_dev *dev; + int i; + + for (i = 0; i < n_devs; i++) { + dev = vivid_devs[i]; + if (!dev || !dev->video_pbase) + continue; + if ((unsigned long)a->base == dev->video_pbase && + a->fmt.width <= dev->display_width && + a->fmt.height <= dev->display_height && + a->fmt.bytesperline <= dev->display_byte_stride) + return true; + } + return false; +} + static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a) { struct video_device *vdev = video_devdata(file); @@ -850,8 +872,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst, /* how many inputs do we have and of what type? */ dev->num_inputs = num_inputs[inst]; - if (dev->num_inputs < 1) - dev->num_inputs = 1; + if (node_type & 0x20007) { + if (dev->num_inputs < 1) + dev->num_inputs = 1; + } else { + dev->num_inputs = 0; + } if (dev->num_inputs >= MAX_INPUTS) dev->num_inputs = MAX_INPUTS; for (i = 0; i < dev->num_inputs; i++) { @@ -868,8 +894,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst, /* how many outputs do we have and of what type? */ dev->num_outputs = num_outputs[inst]; - if (dev->num_outputs < 1) - dev->num_outputs = 1; + if (node_type & 0x40300) { + if (dev->num_outputs < 1) + dev->num_outputs = 1; + } else { + dev->num_outputs = 0; + } if (dev->num_outputs >= MAX_OUTPUTS) dev->num_outputs = MAX_OUTPUTS; for (i = 0; i < dev->num_outputs; i++) { diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h index 99e69b8f770f0..6aa32c8e6fb5c 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.h +++ b/drivers/media/test-drivers/vivid/vivid-core.h @@ -609,4 +609,6 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev) return dev->output_type[dev->output] == HDMI; } +bool vivid_validate_fb(const struct v4l2_framebuffer *a); + #endif diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index eadf28ab1e393..437889e51ca05 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -452,6 +452,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); dev->crop_cap = dev->src_rect; dev->crop_bounds_cap = dev->src_rect; + if (dev->bitmap_cap && + (dev->compose_cap.width != dev->crop_cap.width || + dev->compose_cap.height != dev->crop_cap.height)) { + vfree(dev->bitmap_cap); + dev->bitmap_cap = NULL; + } dev->compose_cap = dev->crop_cap; if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) dev->compose_cap.height /= 2; @@ -909,6 +915,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection struct vivid_dev *dev = video_drvdata(file); struct v4l2_rect *crop = &dev->crop_cap; struct v4l2_rect *compose = &dev->compose_cap; + unsigned orig_compose_w = compose->width; + unsigned orig_compose_h = compose->height; unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; int ret; @@ -953,6 +961,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection if (dev->has_compose_cap) { v4l2_rect_set_min_size(compose, &min_rect); v4l2_rect_set_max_size(compose, &max_rect); + v4l2_rect_map_inside(compose, &fmt); } dev->fmt_cap_rect = fmt; tpg_s_buf_height(&dev->tpg, fmt.height); @@ -1025,17 +1034,17 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection s->r.height /= factor; } v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); - if (dev->bitmap_cap && (compose->width != s->r.width || - compose->height != s->r.height)) { - vfree(dev->bitmap_cap); - dev->bitmap_cap = NULL; - } *compose = s->r; break; default: return -EINVAL; } + if (dev->bitmap_cap && (compose->width != orig_compose_w || + compose->height != orig_compose_h)) { + vfree(dev->bitmap_cap); + dev->bitmap_cap = NULL; + } tpg_s_crop_compose(&dev->tpg, crop, compose); return 0; } @@ -1276,7 +1285,14 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh, return -EINVAL; if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8) return -EINVAL; - if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage) + if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height) + return -EINVAL; + + /* + * Only support the framebuffer of one of the vivid instances. + * Anything else is rejected. + */ + if (!vivid_validate_fb(a)) return -EINVAL; dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base); diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index a2563c2540808..2299d5cca8ffb 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -512,7 +512,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; - if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc)) + if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc)) return -ENODEV; switch (fc_usb->udev->speed) { diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c index 86788771175b7..32b4ee65c2802 100644 --- a/drivers/media/usb/dvb-usb/az6027.c +++ b/drivers/media/usb/dvb-usb/az6027.c @@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n if (msg[i].addr == 0x99) { req = 0xBE; index = 0; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } value = msg[i].buf[0] & 0x00ff; length = 1; az6027_usb_out_op(d, req, value, index, data, length); diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 61439c8f33cab..58eea8ab54779 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -81,7 +81,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) ret = dvb_usb_adapter_stream_init(adap); if (ret) - return ret; + goto stream_init_err; ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs); if (ret) @@ -114,6 +114,8 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) dvb_usb_adapter_dvb_exit(adap); dvb_init_err: dvb_usb_adapter_stream_exit(adap); +stream_init_err: + kfree(adap->priv); return ret; } diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index af48705c704f8..942d0005c55e8 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -145,6 +145,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, const struct v4l2_bt_timings *bt = &t->bt; const struct v4l2_bt_timings_cap *cap = &dvcap->bt; u32 caps = cap->capabilities; + const u32 max_vert = 10240; + u32 max_hor = 3 * bt->width; if (t->type != V4L2_DV_BT_656_1120) return false; @@ -161,6 +163,26 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) || (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE))) return false; + + /* sanity checks for the blanking timings */ + if (!bt->interlaced && + (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch)) + return false; + /* + * Some video receivers cannot properly separate the frontporch, + * backporch and sync values, and instead they only have the total + * blanking. That can be assigned to any of these three fields. + * So just check that none of these are way out of range. + */ + if (bt->hfrontporch > max_hor || + bt->hsync > max_hor || bt->hbackporch > max_hor) + return false; + if (bt->vfrontporch > max_vert || + bt->vsync > max_vert || bt->vbackporch > max_vert) + return false; + if (bt->interlaced && (bt->il_vfrontporch > max_vert || + bt->il_vsync > max_vert || bt->il_vbackporch > max_vert)) + return false; return fnc == NULL || fnc(t, fnc_handle); } EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings); diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index 52312ce2ba056..f2c4393595574 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory { static int __videobuf_dc_alloc(struct device *dev, struct videobuf_dma_contig_memory *mem, - unsigned long size, gfp_t flags) + unsigned long size) { mem->size = size; - mem->vaddr = dma_alloc_coherent(dev, mem->size, - &mem->dma_handle, flags); - + mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle, + GFP_KERNEL); if (!mem->vaddr) { dev_err(dev, "memory alloc size %ld failed\n", mem->size); return -ENOMEM; @@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, return videobuf_dma_contig_user_get(mem, vb); /* allocate memory for the read() method */ - if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size), - GFP_KERNEL)) + if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size))) return -ENOMEM; break; case V4L2_MEMORY_OVERLAY: @@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); - if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize), - GFP_KERNEL | __GFP_COMP)) + if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize))) goto error; - /* Try to remap memory */ - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - /* the "vm_pgoff" is just used in v4l2 to find the * corresponding buffer data structure which is allocated * earlier and it does not mean the offset from the physical * buffer start address as usual. So set it to 0 to pass - * the sanity check in vm_iomap_memory(). + * the sanity check in dma_mmap_coherent(). */ vma->vm_pgoff = 0; - - retval = vm_iomap_memory(vma, mem->dma_handle, mem->size); + retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle, + mem->size); if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", retval); diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c index 9c49d00c2a966..ea6e9e1eaf046 100644 --- a/drivers/memory/atmel-sdramc.c +++ b/drivers/memory/atmel-sdramc.c @@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev) caps = of_device_get_match_data(&pdev->dev); if (caps->has_ddrck) { - clk = devm_clk_get(&pdev->dev, "ddrck"); + clk = devm_clk_get_enabled(&pdev->dev, "ddrck"); if (IS_ERR(clk)) return PTR_ERR(clk); - clk_prepare_enable(clk); } if (caps->has_mpddr_clk) { - clk = devm_clk_get(&pdev->dev, "mpddr"); + clk = devm_clk_get_enabled(&pdev->dev, "mpddr"); if (IS_ERR(clk)) { pr_err("AT91 RAMC: couldn't get mpddr clock\n"); return PTR_ERR(clk); } - clk_prepare_enable(clk); } return 0; diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c index 8450638e86700..efc6c08db2b70 100644 --- a/drivers/memory/mvebu-devbus.c +++ b/drivers/memory/mvebu-devbus.c @@ -280,10 +280,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev) if (IS_ERR(devbus->base)) return PTR_ERR(devbus->base); - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); - clk_prepare_enable(clk); /* * Obtain clock period in picoseconds, diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c index d9f5437d3bce0..1791614f324b7 100644 --- a/drivers/memory/of_memory.c +++ b/drivers/memory/of_memory.c @@ -134,6 +134,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr, for_each_child_of_node(np_ddr, np_tim) { if (of_device_is_compatible(np_tim, tim_compat)) { if (of_do_get_timings(np_tim, &timings[i])) { + of_node_put(np_tim); devm_kfree(dev, timings); goto default_timings; } @@ -282,6 +283,7 @@ const struct lpddr3_timings if (of_device_is_compatible(np_tim, tim_compat)) { if (of_lpddr3_do_get_timings(np_tim, &timings[i])) { devm_kfree(dev, timings); + of_node_put(np_tim); goto default_timings; } i++; diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c index b0b251bb207f3..1a6964f1ba6a7 100644 --- a/drivers/memory/pl353-smc.c +++ b/drivers/memory/pl353-smc.c @@ -416,6 +416,7 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) if (init) init(adev, child); of_platform_device_create(child, NULL, &adev->dev); + of_node_put(child); return 0; diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c index a016b39fe9b0e..5f1f6f3a0696a 100644 --- a/drivers/mfd/fsl-imx25-tsadc.c +++ b/drivers/mfd/fsl-imx25-tsadc.c @@ -69,7 +69,7 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev, int irq; irq = platform_get_irq(pdev, 0); - if (irq <= 0) + if (irq < 0) return irq; tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops, @@ -84,6 +84,19 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev, return 0; } +static int mx25_tsadc_unset_irq(struct platform_device *pdev) +{ + struct mx25_tsadc *tsadc = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + if (irq >= 0) { + irq_set_chained_handler_and_data(irq, NULL, NULL); + irq_domain_remove(tsadc->domain); + } + + return 0; +} + static void mx25_tsadc_setup_clk(struct platform_device *pdev, struct mx25_tsadc *tsadc) { @@ -171,18 +184,21 @@ static int mx25_tsadc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tsadc); - return devm_of_platform_populate(dev); + ret = devm_of_platform_populate(dev); + if (ret) + goto err_irq; + + return 0; + +err_irq: + mx25_tsadc_unset_irq(pdev); + + return ret; } static int mx25_tsadc_remove(struct platform_device *pdev) { - struct mx25_tsadc *tsadc = platform_get_drvdata(pdev); - int irq = platform_get_irq(pdev, 0); - - if (irq) { - irq_set_chained_handler_and_data(irq, NULL, NULL); - irq_domain_remove(tsadc->domain); - } + mx25_tsadc_unset_irq(pdev); return 0; } diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c index ddd64f9e3341e..926653e1f6033 100644 --- a/drivers/mfd/intel_soc_pmic_core.c +++ b/drivers/mfd/intel_soc_pmic_core.c @@ -95,6 +95,7 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c, return 0; err_del_irq_chip: + pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup)); regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data); return ret; } diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c index 348439a3fbbd4..39006297f3d27 100644 --- a/drivers/mfd/lp8788-irq.c +++ b/drivers/mfd/lp8788-irq.c @@ -175,6 +175,7 @@ int lp8788_irq_init(struct lp8788 *lp, int irq) IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "lp8788-irq", irqd); if (ret) { + irq_domain_remove(lp->irqdm); dev_err(lp->dev, "failed to create a thread for IRQ_N\n"); return ret; } @@ -188,4 +189,6 @@ void lp8788_irq_exit(struct lp8788 *lp) { if (lp->irq) free_irq(lp->irq, lp->irqdm); + if (lp->irqdm) + irq_domain_remove(lp->irqdm); } diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c index 768d556b3fe98..5c3d642c8e3a3 100644 --- a/drivers/mfd/lp8788.c +++ b/drivers/mfd/lp8788.c @@ -195,8 +195,16 @@ static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id) if (ret) return ret; - return mfd_add_devices(lp->dev, -1, lp8788_devs, - ARRAY_SIZE(lp8788_devs), NULL, 0, NULL); + ret = mfd_add_devices(lp->dev, -1, lp8788_devs, + ARRAY_SIZE(lp8788_devs), NULL, 0, NULL); + if (ret) + goto err_exit_irq; + + return 0; + +err_exit_irq: + lp8788_irq_exit(lp); + return ret; } static int lp8788_remove(struct i2c_client *cl) diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index 3bbb29a7e7a57..2411b7a2e6f47 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -63,6 +63,8 @@ #define SPIBASE_BYT 0x54 #define SPIBASE_BYT_SZ 512 #define SPIBASE_BYT_EN BIT(1) +#define BYT_BCR 0xfc +#define BYT_BCR_WPD BIT(0) #define SPIBASE_LPT 0x3800 #define SPIBASE_LPT_SZ 512 @@ -1083,12 +1085,57 @@ static int lpc_ich_init_wdt(struct pci_dev *dev) return ret; } +static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data) +{ + u32 val; + + val = readl(base + BYT_BCR); + if (!(val & BYT_BCR_WPD)) { + val |= BYT_BCR_WPD; + writel(val, base + BYT_BCR); + val = readl(base + BYT_BCR); + } + + return val & BYT_BCR_WPD; +} + +static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data) +{ + struct pci_dev *pdev = data; + u32 bcr; + + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + +static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data) +{ + unsigned int spi = PCI_DEVFN(13, 2); + struct pci_bus *bus = data; + u32 bcr; + + pci_bus_read_config_dword(bus, spi, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_bus_write_config_dword(bus, spi, BCR, bcr); + pci_bus_read_config_dword(bus, spi, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + static int lpc_ich_init_spi(struct pci_dev *dev) { struct lpc_ich_priv *priv = pci_get_drvdata(dev); struct resource *res = &intel_spi_res[0]; struct intel_spi_boardinfo *info; - u32 spi_base, rcba, bcr; + u32 spi_base, rcba; info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL); if (!info) @@ -1102,6 +1149,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) if (spi_base & SPIBASE_BYT_EN) { res->start = spi_base & ~(SPIBASE_BYT_SZ - 1); res->end = res->start + SPIBASE_BYT_SZ - 1; + + info->set_writeable = lpc_ich_byt_set_writeable; } break; @@ -1112,8 +1161,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) res->start = spi_base + SPIBASE_LPT; res->end = res->start + SPIBASE_LPT_SZ - 1; - pci_read_config_dword(dev, BCR, &bcr); - info->writeable = !!(bcr & BCR_WPD); + info->set_writeable = lpc_ich_lpt_set_writeable; + info->data = dev; } break; @@ -1134,8 +1183,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) res->start = spi_base & 0xfffffff0; res->end = res->start + SPIBASE_APL_SZ - 1; - pci_bus_read_config_dword(bus, spi, BCR, &bcr); - info->writeable = !!(bcr & BCR_WPD); + info->set_writeable = lpc_ich_bxt_set_writeable; + info->data = bus; } pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1); diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 6d2f4a0a901dc..37ad72d8cde2a 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -1720,7 +1720,12 @@ static struct platform_driver sm501_plat_driver = { static int __init sm501_base_init(void) { - platform_driver_register(&sm501_plat_driver); + int ret; + + ret = platform_driver_register(&sm501_plat_driver); + if (ret < 0) + return ret; + return pci_register_driver(&sm501_pci_driver); } diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 186308f1f8eba..6334376826a92 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -959,10 +959,10 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n * if it returns an error! */ if ((rc = cxl_register_afu(afu))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_afu_add(afu))) - goto err_put1; + goto err_del_dev; /* * pHyp doesn't expose the programming models supported by the @@ -978,7 +978,7 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n afu->modes_supported = CXL_MODE_DIRECTED; if ((rc = cxl_afu_select_best_mode(afu))) - goto err_put2; + goto err_remove_sysfs; adapter->afu[afu->slice] = afu; @@ -998,10 +998,12 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n return 0; -err_put2: +err_remove_sysfs: cxl_sysfs_afu_remove(afu); -err_put1: - device_unregister(&afu->dev); +err_del_dev: + device_del(&afu->dev); +err_put_dev: + put_device(&afu->dev); free = false; guest_release_serr_irq(afu); err2: @@ -1135,18 +1137,20 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic * even if it returns an error! */ if ((rc = cxl_register_adapter(adapter))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_adapter_add(adapter))) - goto err_put1; + goto err_del_dev; /* release the context lock as the adapter is configured */ cxl_adapter_context_unlock(adapter); return adapter; -err_put1: - device_unregister(&adapter->dev); +err_del_dev: + device_del(&adapter->dev); +err_put_dev: + put_device(&adapter->dev); free = false; cxl_guest_remove_chardev(adapter); err1: diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 2ba899f5659ff..d183836d80e3f 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -387,6 +387,7 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, rc = get_phb_index(np, phb_index); if (rc) { pr_err("cxl: invalid phb index\n"); + of_node_put(np); return rc; } @@ -1164,10 +1165,10 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) * if it returns an error! */ if ((rc = cxl_register_afu(afu))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_afu_add(afu))) - goto err_put1; + goto err_del_dev; adapter->afu[afu->slice] = afu; @@ -1176,10 +1177,12 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) return 0; -err_put1: +err_del_dev: + device_del(&afu->dev); +err_put_dev: pci_deconfigure_afu(afu); cxl_debugfs_afu_remove(afu); - device_unregister(&afu->dev); + put_device(&afu->dev); return rc; err_free_native: @@ -1667,23 +1670,25 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) * even if it returns an error! */ if ((rc = cxl_register_adapter(adapter))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_adapter_add(adapter))) - goto err_put1; + goto err_del_dev; /* Release the context lock as adapter is configured */ cxl_adapter_context_unlock(adapter); return adapter; -err_put1: +err_del_dev: + device_del(&adapter->dev); +err_put_dev: /* This should mirror cxl_remove_adapter, except without the * sysfs parts */ cxl_debugfs_adapter_remove(adapter); cxl_deconfigure_adapter(adapter); - device_unregister(&adapter->dev); + put_device(&adapter->dev); return ERR_PTR(rc); err_release: diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 2c3142b4b5dd7..67a51f69cf9aa 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -247,6 +247,13 @@ static void fastrpc_free_map(struct kref *ref) dma_buf_put(map->buf); } + if (map->fl) { + spin_lock(&map->fl->lock); + list_del(&map->node); + spin_unlock(&map->fl->lock); + map->fl = NULL; + } + kfree(map); } @@ -256,10 +263,12 @@ static void fastrpc_map_put(struct fastrpc_map *map) kref_put(&map->refcount, fastrpc_free_map); } -static void fastrpc_map_get(struct fastrpc_map *map) +static int fastrpc_map_get(struct fastrpc_map *map) { - if (map) - kref_get(&map->refcount); + if (!map) + return -ENOENT; + + return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT; } static int fastrpc_map_find(struct fastrpc_user *fl, int fd, @@ -1112,12 +1121,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl, fl->init_mem = NULL; fastrpc_buf_free(imem); err_alloc: - if (map) { - spin_lock(&fl->lock); - list_del(&map->node); - spin_unlock(&fl->lock); - fastrpc_map_put(map); - } + fastrpc_map_put(map); err: kfree(args); @@ -1194,10 +1198,8 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) fastrpc_context_put(ctx); } - list_for_each_entry_safe(map, m, &fl->maps, node) { - list_del(&map->node); + list_for_each_entry_safe(map, m, &fl->maps, node) fastrpc_map_put(map); - } list_for_each_entry_safe(buf, b, &fl->mmaps, node) { list_del(&buf->node); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index afb2e78df4d60..eabbdf17b0c6d 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -111,6 +111,8 @@ #define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ +#define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 5324b65d0d29a..f2765d6b8c043 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -117,6 +117,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index 4d490b92d951f..3ced98b506f43 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -204,6 +204,18 @@ static int read_dvsec_vendor(struct pci_dev *dev) return 0; } +/** + * get_dvsec_vendor0() - Find a related PCI device (function 0) + * @dev: PCI device to match + * @dev0: The PCI device (function 0) found + * @out_pos: The position of PCI device (function 0) + * + * Returns 0 on success, negative on failure. + * + * NOTE: If it's successful, the reference of dev0 is increased, + * so after using it, the callers must call pci_dev_put() to give + * up the reference. + */ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, int *out_pos) { @@ -213,10 +225,14 @@ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, dev = get_function_0(dev); if (!dev) return -1; + } else { + dev = pci_dev_get(dev); } pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); - if (!pos) + if (!pos) { + pci_dev_put(dev); return -1; + } *dev0 = dev; *out_pos = pos; return 0; @@ -233,6 +249,7 @@ int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val) pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, &reset_reload); + pci_dev_put(dev0); *val = !!(reset_reload & BIT(0)); return 0; } @@ -254,6 +271,7 @@ int ocxl_config_set_reset_reload(struct pci_dev *dev, int val) reset_reload &= ~BIT(0); pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, reset_reload); + pci_dev_put(dev0); return 0; } diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index c742ab02ae186..524ded87964d1 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -259,6 +259,8 @@ static long afu_ioctl(struct file *file, unsigned int cmd, if (IS_ERR(ev_ctx)) return PTR_ERR(ev_ctx); rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx); + if (rc) + eventfd_ctx_put(ev_ctx); break; case OCXL_IOCTL_GET_METADATA: @@ -541,8 +543,11 @@ int ocxl_file_register_afu(struct ocxl_afu *afu) goto err_put; rc = device_register(&info->dev); - if (rc) - goto err_put; + if (rc) { + free_minor(info); + put_device(&info->dev); + return rc; + } rc = ocxl_sysfs_register_afu(info); if (rc) diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index d3844730eacaf..48eec5fe7397b 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -331,6 +331,22 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, return false; } +static int pci_endpoint_test_validate_xfer_params(struct device *dev, + struct pci_endpoint_test_xfer_param *param, size_t alignment) +{ + if (!param->size) { + dev_dbg(dev, "Data size is zero\n"); + return -EINVAL; + } + + if (param->size > SIZE_MAX - alignment) { + dev_dbg(dev, "Maximum transfer data size exceeded\n"); + return -EINVAL; + } + + return 0; +} + static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, unsigned long arg) { @@ -362,9 +378,11 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, return false; } + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + size = param.size; - if (size > SIZE_MAX - alignment) - goto err; use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); if (use_dma) @@ -496,9 +514,11 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, return false; } + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + size = param.size; - if (size > SIZE_MAX - alignment) - goto err; use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); if (use_dma) @@ -594,9 +614,11 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, return false; } + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + size = param.size; - if (size > SIZE_MAX - alignment) - goto err; use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); if (use_dma) diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 723825524ea0c..9c7d475d18901 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -648,6 +648,7 @@ int gru_handle_user_call_os(unsigned long cb) if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) return -EINVAL; +again: gts = gru_find_lock_gts(cb); if (!gts) return -EINVAL; @@ -656,7 +657,11 @@ int gru_handle_user_call_os(unsigned long cb) if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) goto exit; - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + goto again; + } /* * CCH may contain stale data if ts_force_cch_reload is set. @@ -874,7 +879,11 @@ int gru_set_context_option(unsigned long arg) } else { gts->ts_user_blade_id = req.val1; gts->ts_user_chiplet_id = req.val0; - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + return ret; + } } break; case sco_gseg_owner: diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 40ac59dd018c9..e2325e3d077ea 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -716,9 +716,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru, * chiplet. Misassignment can occur if the process migrates to a different * blade or if the user changes the selected blade/chiplet. */ -void gru_check_context_placement(struct gru_thread_state *gts) +int gru_check_context_placement(struct gru_thread_state *gts) { struct gru_state *gru; + int ret = 0; /* * If the current task is the context owner, verify that the @@ -726,15 +727,23 @@ void gru_check_context_placement(struct gru_thread_state *gts) * references. Pthread apps use non-owner references to the CBRs. */ gru = gts->ts_gru; + /* + * If gru or gts->ts_tgid_owner isn't initialized properly, return + * success to indicate that the caller does not need to unload the + * gru context.The caller is responsible for their inspection and + * reinitialization if needed. + */ if (!gru || gts->ts_tgid_owner != current->tgid) - return; + return ret; if (!gru_check_chiplet_assignment(gru, gts)) { STAT(check_context_unload); - gru_unload_context(gts, 1); + ret = -EINVAL; } else if (gru_retarget_intr(gts)) { STAT(check_context_retarget_intr); } + + return ret; } @@ -934,7 +943,12 @@ vm_fault_t gru_fault(struct vm_fault *vmf) mutex_lock(>s->ts_ctxlock); preempt_disable(); - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + preempt_enable(); + mutex_unlock(>s->ts_ctxlock); + gru_unload_context(gts, 1); + return VM_FAULT_NOPAGE; + } if (!gts->ts_gru) { STAT(load_user_context); diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 5ce8f3081e960..10f0a083b1fab 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -637,7 +637,7 @@ extern int gru_user_flush_tlb(unsigned long arg); extern int gru_user_unload_context(unsigned long arg); extern int gru_get_exception_detail(unsigned long arg); extern int gru_set_context_option(unsigned long address); -extern void gru_check_context_placement(struct gru_thread_state *gts); +extern int gru_check_context_placement(struct gru_thread_state *gts); extern int gru_cpu_fault_map_id(void); extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); extern void gru_flush_all_tlb(struct gru_state *gru); diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 228f2eb1d4762..2aebbfda104d8 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -190,7 +190,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work) spin_unlock_irqrestore(&fm->lock, flags); } if (sock) - tifm_free_device(&sock->dev); + put_device(&sock->dev); } spin_lock_irqsave(&fm->lock, flags); } diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index a49782dd903cd..d4d388f021ccd 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -852,6 +852,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle) u32 context_id = vmci_get_context_id(); struct vmci_event_qp ev; + memset(&ev, 0, sizeof(ev)); ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); @@ -1465,6 +1466,7 @@ static int qp_notify_peer(bool attach, * kernel. */ + memset(&ev, 0, sizeof(ev)); ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 66a00b7c751f7..6622e32621874 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1069,6 +1069,11 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) nr = blk_rq_sectors(req); do { + unsigned int erase_arg = card->erase_arg; + + if (mmc_card_broken_sd_discard(card)) + erase_arg = SD_ERASE_ARG; + err = 0; if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -1079,7 +1084,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) card->ext_csd.generic_cmd6_time); } if (!err) - err = mmc_erase(card, from, nr, card->erase_arg); + err = mmc_erase(card, from, nr, erase_arg); } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); if (err) status = BLK_STS_IOERR; diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index 7bd392d55cfa5..5c6986131faff 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -70,6 +70,7 @@ struct mmc_fixup { #define EXT_CSD_REV_ANY (-1u) #define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_SANDISK_SD 0x3 #define CID_MANFID_ATP 0x9 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 @@ -222,4 +223,9 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c) return c->quirks & MMC_QUIRK_BROKEN_HPI; } +static inline int mmc_card_broken_sd_discard(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD; +} + #endif diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index eb82f6aac951f..8f24653942536 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1128,7 +1128,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) mmc_power_cycle(host, ocr); } else { bit = fls(ocr) - 1; - ocr &= 3 << bit; + /* + * The bit variable represents the highest voltage bit set in + * the OCR register. + * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V), + * we must shift the mask '3' with (bit - 1). + */ + ocr &= 3 << (bit - 1); if (bit != host->ios.vdd) dev_warn(mmc_dev(host), "exceeding card's volts\n"); } @@ -1533,6 +1539,11 @@ void mmc_init_erase(struct mmc_card *card) card->pref_erase = 0; } +static bool is_trim_arg(unsigned int arg) +{ + return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG; +} + static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, unsigned int arg, unsigned int qty) { @@ -1831,7 +1842,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) return -EOPNOTSUPP; - if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) && + if (mmc_card_mmc(card) && is_trim_arg(arg) && !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) return -EOPNOTSUPP; @@ -1861,7 +1872,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, * identified by the card->eg_boundary flag. */ rem = card->erase_size - (from % card->erase_size); - if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) { + if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) { err = mmc_do_erase(card, from, from + rem - 1, arg); from += rem; if ((err) || (to <= from)) diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 152e7525ed338..b9b6f000154bb 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -3195,7 +3195,8 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card, struct mmc_test_dbgfs_file *df; if (card->debugfs_root) - debugfs_create_file(name, mode, card->debugfs_root, card, fops); + file = debugfs_create_file(name, mode, card->debugfs_root, + card, fops); df = kmalloc(sizeof(*df), GFP_KERNEL); if (!df) { diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index d68e6e513a4f4..c8c0f50a2076d 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -99,6 +99,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_TRIM_BROKEN), + /* + * Some SD cards reports discard support while they don't + */ + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, + MMC_QUIRK_BROKEN_SD_DISCARD), + END_FIXUP }; diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 0b09cdaaeb6c1..868b121ce4f35 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -853,7 +853,8 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) * the CCS bit is set as well. We deliberately deviate from the spec in * regards to this, which allows UHS-I to be supported for SDSC cards. */ - if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) { + if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) && + rocr && (*rocr & SD_ROCR_S18A)) { err = mmc_set_uhs_voltage(host, pocr); if (err == -EAGAIN) { retries--; @@ -932,15 +933,16 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, /* Erase init depends on CSD and SSR */ mmc_init_erase(card); - - /* - * Fetch switch information from card. - */ - err = mmc_read_switch(card); - if (err) - return err; } + /* + * Fetch switch information from card. Note, sd3_bus_mode can change if + * voltage switch outcome changes, so do this always. + */ + err = mmc_read_switch(card); + if (err) + return err; + /* * For SPI, enable CRC as appropriate. * This CRC enable is located AFTER the reading of the @@ -1089,26 +1091,15 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) && mmc_sd_card_using_v18(card) && host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) { - /* - * Re-read switch information in case it has changed since - * oldcard was initialized. - */ - if (oldcard) { - err = mmc_read_switch(card); - if (err) - goto free_card; - } - if (mmc_sd_card_using_v18(card)) { - if (mmc_host_set_uhs_voltage(host) || - mmc_sd_init_uhs_card(card)) { - v18_fixup_failed = true; - mmc_power_cycle(host, ocr); - if (!oldcard) - mmc_remove_card(card); - goto retry; - } - goto cont; + if (mmc_host_set_uhs_voltage(host) || + mmc_sd_init_uhs_card(card)) { + v18_fixup_failed = true; + mmc_power_cycle(host, ocr); + if (!oldcard) + mmc_remove_card(card); + goto retry; } + goto cont; } /* Initialization sequence for UHS-I cards */ diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 3d709029e07ce..89dd49260080b 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -292,7 +292,14 @@ static void sdio_release_func(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - sdio_free_func_cis(func); + if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO)) + sdio_free_func_cis(func); + + /* + * We have now removed the link to the tuples in the + * card structure, so remove the reference. + */ + put_device(&func->card->dev); kfree(func->info); kfree(func->tmpbuf); @@ -324,6 +331,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card) device_initialize(&func->dev); + /* + * We may link to tuples in the card structure, + * we need make sure we have a reference to it. + */ + get_device(&func->card->dev); + func->dev.parent = &card->dev; func->dev.bus = &sdio_bus_type; func->dev.release = sdio_release_func; @@ -377,10 +390,9 @@ int sdio_add_func(struct sdio_func *func) */ void sdio_remove_func(struct sdio_func *func) { - if (!sdio_func_present(func)) - return; + if (sdio_func_present(func)) + device_del(&func->dev); - device_del(&func->dev); of_node_put(func->dev.of_node); put_device(&func->dev); } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index b23773583179d..ce524f7e11fb2 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -391,12 +391,6 @@ int sdio_read_func_cis(struct sdio_func *func) if (ret) return ret; - /* - * Since we've linked to tuples in the card structure, - * we must make sure we have a reference to it. - */ - get_device(&func->card->dev); - /* * Vendor/device id is optional for function CIS, so * copy it from the card structure as needed. @@ -422,11 +416,5 @@ void sdio_free_func_cis(struct sdio_func *func) } func->tuples = NULL; - - /* - * We have now removed the link to the tuples in the - * card structure, so remove the reference. - */ - put_device(&func->card->dev); } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 30ff42fd173e2..82e1fbd6b2ff0 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1079,9 +1079,10 @@ config MMC_SDHCI_OMAP config MMC_SDHCI_AM654 tristate "Support for the SDHCI Controller in TI's AM654 SOCs" - depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO + depends on MMC_SDHCI_PLTFM && OF select MMC_SDHCI_IO_ACCESSORS select MMC_CQHCI + select REGMAP_MMIO help This selects the Secure Digital Host Controller Interface (SDHCI) support present in TI's AM654 SOCs. The controller supports diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index bfb8efeb7eb80..d01df01d4b4d1 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c @@ -1114,7 +1114,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) alcor_hw_init(host); dev_set_drvdata(&pdev->dev, host); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto free_host; + return 0; free_host: diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 444bd3a0a9227..af85b32c6c1c8 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2223,6 +2223,7 @@ static int atmci_init_slot(struct atmel_mci *host, { struct mmc_host *mmc; struct atmel_mci_slot *slot; + int ret; mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); if (!mmc) @@ -2306,11 +2307,13 @@ static int atmci_init_slot(struct atmel_mci *host, host->slot[id] = slot; mmc_regulator_get_supply(mmc); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { + mmc_free_host(mmc); + return ret; + } if (gpio_is_valid(slot->detect_pin)) { - int ret; - timer_setup(&slot->detect_timer, atmci_detect_change, 0); ret = request_irq(gpio_to_irq(slot->detect_pin), diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index bd00515fbaba2..56a3bf51d446d 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -1097,8 +1097,9 @@ static int au1xmmc_probe(struct platform_device *pdev) if (host->platdata && host->platdata->cd_setup && !(mmc->caps & MMC_CAP_NEEDS_POLL)) host->platdata->cd_setup(mmc, 0); -out_clk: + clk_disable_unprepare(host->clk); +out_clk: clk_put(host->clk); out_irq: free_irq(host->irq, host); diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index aa3dfb9c1071b..62d00232f85ea 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1041,6 +1041,16 @@ static int jz4740_mmc_probe(struct platform_device* pdev) mmc->ops = &jz4740_mmc_ops; if (!mmc->f_max) mmc->f_max = JZ_MMC_CLK_RATE; + + /* + * There seems to be a problem with this driver on the JZ4760 and + * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz), + * the communication fails with many SD cards. + * Until this bug is sorted out, limit the maximum rate to 24 MHz. + */ + if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE) + mmc->f_max = JZ_MMC_CLK_RATE; + mmc->f_min = mmc->f_max / 128; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index bccc85b3fc50a..19a6b55e344fe 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1280,7 +1280,9 @@ static int meson_mmc_probe(struct platform_device *pdev) } mmc->ops = &meson_mmc_ops; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto err_free_irq; return 0; diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c index a5e05ed0fda3e..9d35453e7371b 100644 --- a/drivers/mmc/host/mmc_hsq.c +++ b/drivers/mmc/host/mmc_hsq.c @@ -34,7 +34,7 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq) spin_lock_irqsave(&hsq->lock, flags); /* Make sure we are not already running a request now */ - if (hsq->mrq) { + if (hsq->mrq || hsq->recovery_halt) { spin_unlock_irqrestore(&hsq->lock, flags); return; } diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 02f4fd26e76a9..1d814919eb6be 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1450,7 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi) status = mmc_add_host(mmc); if (status != 0) - goto fail_add_host; + goto fail_glue_init; /* * Index 0 is card detect @@ -1458,7 +1458,7 @@ static int mmc_spi_probe(struct spi_device *spi) */ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000); if (status == -EPROBE_DEFER) - goto fail_add_host; + goto fail_gpiod_request; if (!status) { /* * The platform has a CD GPIO signal that may support @@ -1473,7 +1473,7 @@ static int mmc_spi_probe(struct spi_device *spi) /* Index 1 is write protect/read only */ status = mmc_gpiod_request_ro(mmc, NULL, 1, 0); if (status == -EPROBE_DEFER) - goto fail_add_host; + goto fail_gpiod_request; if (!status) has_ro = true; @@ -1487,7 +1487,7 @@ static int mmc_spi_probe(struct spi_device *spi) ? ", cd polling" : ""); return 0; -fail_add_host: +fail_gpiod_request: mmc_remove_host(mmc); fail_glue_init: mmc_spi_dma_free(host); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index b5684e5d79e60..5d83c8e7bf5cf 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2191,7 +2191,9 @@ static int mmci_probe(struct amba_device *dev, pm_runtime_set_autosuspend_delay(&dev->dev, 50); pm_runtime_use_autosuspend(&dev->dev); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto clk_disable; pm_runtime_put(&dev->dev); return 0; diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index ea67a7ef2390c..fb96bb76eefbe 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -111,8 +111,8 @@ #define CLK_DIV_MASK 0x7f /* REG_BUS_WIDTH */ -#define BUS_WIDTH_8 BIT(2) -#define BUS_WIDTH_4 BIT(1) +#define BUS_WIDTH_4_SUPPORT BIT(3) +#define BUS_WIDTH_4 BIT(2) #define BUS_WIDTH_1 BIT(0) #define MMC_VDD_360 23 @@ -527,9 +527,6 @@ static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_BUS_WIDTH_4: writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH); break; - case MMC_BUS_WIDTH_8: - writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH); - break; default: writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH); break; @@ -654,16 +651,8 @@ static int moxart_probe(struct platform_device *pdev) dmaengine_slave_config(host->dma_chan_rx, &cfg); } - switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) { - case 1: + if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT) mmc->caps |= MMC_CAP_4_BIT_DATA; - break; - case 2: - mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; - break; - default: - break; - } writel(0, host->base + REG_INTERRUPT_MASK); @@ -679,7 +668,9 @@ static int moxart_probe(struct platform_device *pdev) goto out; dev_set_drvdata(dev, mmc); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto out; dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 12ee07285980c..93a105b075645 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1167,7 +1167,9 @@ static int mxcmci_probe(struct platform_device *pdev) timer_setup(&host->watchdog, mxcmci_watchdog, 0); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto out_free_dma; return 0; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index aa9cc49206d15..5b6ede81fc9f2 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1987,7 +1987,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev) if (!ret) mmc->caps |= MMC_CAP_SDIO_IRQ; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto err_irq; if (mmc_pdata(host)->name != NULL) { ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 55868b6b86583..e25e9bb34eb39 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -763,7 +763,12 @@ static int pxamci_probe(struct platform_device *pdev) dev_warn(dev, "gpio_ro and get_ro() both defined\n"); } - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { + if (host->pdata && host->pdata->exit) + host->pdata->exit(dev, mmc); + goto out; + } return 0; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index ac01fb518386a..a49b8fe2a0982 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -537,7 +537,7 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host, SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) & sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); - if (priv->adjust_hs400_calib_table) + if (priv->quirks && (priv->quirks->hs400_calib_table || priv->quirks->hs400_bad_taps)) renesas_sdhi_adjust_hs400_mode_disable(host); sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index 5fe4528e296e6..1be3a355f10d5 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -1332,6 +1332,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) #ifdef RTSX_USB_USE_LEDS_CLASS int err; #endif + int ret; ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent)); if (!ucr) @@ -1368,7 +1369,15 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) INIT_WORK(&host->led_work, rtsx_usb_update_led); #endif - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { +#ifdef RTSX_USB_USE_LEDS_CLASS + led_classdev_unregister(&host->led); +#endif + mmc_free_host(mmc); + pm_runtime_disable(&pdev->dev); + return ret; + } return 0; } diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index f24623aac2dbe..4d42b1810acea 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -12,28 +12,55 @@ #include #include +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "cqhci.h" #define SDHCI_VENDOR 0x78 #define SDHCI_VENDOR_ENHANCED_STRB 0x1 +#define SDHCI_VENDOR_GATE_SDCLK_EN 0x2 -#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0) -#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1) +#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0) +#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1) +#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2) + +#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0) +#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1) #define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 struct sdhci_brcmstb_priv { void __iomem *cfg_regs; - bool has_cqe; + unsigned int flags; }; struct brcmstb_match_priv { void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios); struct sdhci_ops *ops; - unsigned int flags; + const unsigned int flags; }; +static inline void enable_clock_gating(struct sdhci_host *host) +{ + u32 reg; + + reg = sdhci_readl(host, SDHCI_VENDOR); + reg |= SDHCI_VENDOR_GATE_SDCLK_EN; + sdhci_writel(host, reg, SDHCI_VENDOR); +} + +void brcmstb_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); + + sdhci_and_cqhci_reset(host, mask); + + /* Reset will clear this, so re-enable it */ + if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK) + enable_clock_gating(host); +} + static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdhci_host *host = mmc_priv(mmc); @@ -129,22 +156,23 @@ static struct sdhci_ops sdhci_brcmstb_ops = { static struct sdhci_ops sdhci_brcmstb_ops_7216 = { .set_clock = sdhci_brcmstb_set_clock, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = brcmstb_reset, .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, }; static struct brcmstb_match_priv match_priv_7425 = { - .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT | - BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, + .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT | + BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, .ops = &sdhci_brcmstb_ops, }; static struct brcmstb_match_priv match_priv_7445 = { - .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, + .flags = BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, .ops = &sdhci_brcmstb_ops, }; static const struct brcmstb_match_priv match_priv_7216 = { + .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE, .hs400es = sdhci_brcmstb_hs400es, .ops = &sdhci_brcmstb_ops_7216, }; @@ -176,7 +204,7 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host, bool dma64; int ret; - if (!priv->has_cqe) + if ((priv->flags & BRCMSTB_PRIV_FLAGS_HAS_CQE) == 0) return sdhci_add_host(host); dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n"); @@ -225,7 +253,6 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) struct sdhci_brcmstb_priv *priv; struct sdhci_host *host; struct resource *iomem; - bool has_cqe = false; struct clk *clk; int res; @@ -244,10 +271,6 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) return res; memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata)); - if (device_property_read_bool(&pdev->dev, "supports-cqe")) { - has_cqe = true; - match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; - } brcmstb_pdata.ops = match_priv->ops; host = sdhci_pltfm_init(pdev, &brcmstb_pdata, sizeof(struct sdhci_brcmstb_priv)); @@ -258,7 +281,10 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); priv = sdhci_pltfm_priv(pltfm_host); - priv->has_cqe = has_cqe; + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { + priv->flags |= BRCMSTB_PRIV_FLAGS_HAS_CQE; + match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; + } /* Map in the non-standard CFG registers */ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -273,6 +299,14 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) if (res) goto err; + /* + * Automatic clock gating does not work for SD cards that may + * voltage switch so only enable it for non-removable devices. + */ + if ((match_priv->flags & BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE) && + (host->mmc->caps & MMC_CAP_NONREMOVABLE)) + priv->flags |= BRCMSTB_PRIV_FLAGS_GATE_CLOCK; + /* * If the chip has enhanced strobe and it's enabled, add * callback @@ -287,14 +321,14 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) * properties through mmc_of_parse(). */ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); - if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT) + if (match_priv->flags & BRCMSTB_MATCH_FLAGS_NO_64BIT) host->caps &= ~SDHCI_CAN_64BIT; host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_DDR50); host->quirks |= SDHCI_QUIRK_MISSING_CAPS; - if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT) + if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT) host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; res = sdhci_brcmstb_add_host(host, priv); diff --git a/drivers/mmc/host/sdhci-cqhci.h b/drivers/mmc/host/sdhci-cqhci.h new file mode 100644 index 0000000000000..cf8e7ba71bbd7 --- /dev/null +++ b/drivers/mmc/host/sdhci-cqhci.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2022 The Chromium OS Authors + * + * Support that applies to the combination of SDHCI and CQHCI, while not + * expressing a dependency between the two modules. + */ + +#ifndef __MMC_HOST_SDHCI_CQHCI_H__ +#define __MMC_HOST_SDHCI_CQHCI_H__ + +#include "cqhci.h" +#include "sdhci.h" + +static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + + sdhci_reset(host, mask); +} + +#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */ diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index a4bd85b200a3e..70f388f83485c 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -26,6 +26,7 @@ #include #include #include +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" #include "cqhci.h" @@ -102,6 +103,7 @@ #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 #define ESDHC_TUNING_START_TAP_MASK 0x7f #define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7) +#define ESDHC_TUNING_STEP_DEFAULT 0x1 #define ESDHC_TUNING_STEP_MASK 0x00070000 #define ESDHC_TUNING_STEP_SHIFT 16 @@ -294,22 +296,6 @@ struct pltfm_imx_data { struct pm_qos_request pm_qos_req; }; -static const struct platform_device_id imx_esdhc_devtype[] = { - { - .name = "sdhci-esdhc-imx25", - .driver_data = (kernel_ulong_t) &esdhc_imx25_data, - }, { - .name = "sdhci-esdhc-imx35", - .driver_data = (kernel_ulong_t) &esdhc_imx35_data, - }, { - .name = "sdhci-esdhc-imx51", - .driver_data = (kernel_ulong_t) &esdhc_imx51_data, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype); - static const struct of_device_id imx_esdhc_dt_ids[] = { { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, }, { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, @@ -1243,7 +1229,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) static void esdhc_reset(struct sdhci_host *host, u8 mask) { - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); @@ -1315,7 +1301,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); struct cqhci_host *cq_host = host->mmc->cqe_private; - int tmp; + u32 tmp; if (esdhc_is_usdhc(imx_data)) { /* @@ -1368,17 +1354,24 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); - tmp |= ESDHC_STD_TUNING_EN | - ESDHC_TUNING_START_TAP_DEFAULT; - if (imx_data->boarddata.tuning_start_tap) { - tmp &= ~ESDHC_TUNING_START_TAP_MASK; + tmp |= ESDHC_STD_TUNING_EN; + + /* + * ROM code or bootloader may config the start tap + * and step, unmask them first. + */ + tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK); + if (imx_data->boarddata.tuning_start_tap) tmp |= imx_data->boarddata.tuning_start_tap; - } + else + tmp |= ESDHC_TUNING_START_TAP_DEFAULT; if (imx_data->boarddata.tuning_step) { - tmp &= ~ESDHC_TUNING_STEP_MASK; tmp |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT; + } else { + tmp |= ESDHC_TUNING_STEP_DEFAULT + << ESDHC_TUNING_STEP_SHIFT; } /* Disable the CMD CRC check for tuning, if not, need to @@ -1464,7 +1457,7 @@ static void esdhc_cqe_enable(struct mmc_host *mmc) * system resume back. */ cqhci_writel(cq_host, 0, CQHCI_CTL); - if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) + if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) dev_err(mmc_dev(host->mmc), "failed to exit halt state when enable CQE\n"); @@ -1545,72 +1538,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, } #endif -static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, - struct sdhci_host *host, - struct pltfm_imx_data *imx_data) -{ - struct esdhc_platform_data *boarddata = &imx_data->boarddata; - int err; - - if (!host->mmc->parent->platform_data) { - dev_err(mmc_dev(host->mmc), "no board data!\n"); - return -EINVAL; - } - - imx_data->boarddata = *((struct esdhc_platform_data *) - host->mmc->parent->platform_data); - /* write_protect */ - if (boarddata->wp_type == ESDHC_WP_GPIO) { - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - - err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request write-protect gpio!\n"); - return err; - } - } - - /* card_detect */ - switch (boarddata->cd_type) { - case ESDHC_CD_GPIO: - err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request card-detect gpio!\n"); - return err; - } - fallthrough; - - case ESDHC_CD_CONTROLLER: - /* we have a working card_detect back */ - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; - break; - - case ESDHC_CD_PERMANENT: - host->mmc->caps |= MMC_CAP_NONREMOVABLE; - break; - - case ESDHC_CD_NONE: - break; - } - - switch (boarddata->max_bus_width) { - case 8: - host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; - break; - case 4: - host->mmc->caps |= MMC_CAP_4_BIT_DATA; - break; - case 1: - default: - host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; - break; - } - - return 0; -} - static int sdhci_esdhc_imx_probe(struct platform_device *pdev) { const struct of_device_id *of_id = @@ -1630,8 +1557,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) imx_data = sdhci_pltfm_priv(pltfm_host); - imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *) - pdev->id_entry->driver_data; + imx_data->socdata = of_id->data; if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); @@ -1692,6 +1618,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = usdhc_execute_tuning; } + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + if (err) + goto disable_ahb_clk; + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) sdhci_esdhc_ops.platform_execute_tuning = esdhc_executing_tuning; @@ -1699,13 +1629,15 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - if (imx_data->socdata->flags & ESDHC_FLAG_HS400) + if (host->mmc->caps & MMC_CAP_8_BIT_DATA && + imx_data->socdata->flags & ESDHC_FLAG_HS400) host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400; if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; - if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { + if (host->mmc->caps & MMC_CAP_8_BIT_DATA && + imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { host->mmc->caps2 |= MMC_CAP2_HS400_ES; host->mmc_host_ops.hs400_enhanced_strobe = esdhc_hs400_enhanced_strobe; @@ -1727,13 +1659,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } - if (of_id) - err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); - else - err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data); - if (err) - goto disable_ahb_clk; - sdhci_esdhc_imx_hwinit(host); err = sdhci_add_host(host); @@ -1944,7 +1869,6 @@ static struct platform_driver sdhci_esdhc_imx_driver = { .of_match_table = imx_esdhc_dt_ids, .pm = &sdhci_esdhc_pmops, }, - .id_table = imx_esdhc_devtype, .probe = sdhci_esdhc_imx_probe, .remove = sdhci_esdhc_imx_remove, }; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 192cb8b20b472..ad2e73f9a58f4 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2182,6 +2182,7 @@ static const struct sdhci_msm_variant_info sm8250_sdhci_var = { static const struct of_device_id sdhci_msm_dt_match[] = { {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var}, {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var}, + {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var}, {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var}, {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var}, {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var}, diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index fc38db64a6b48..9da49dc152489 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -25,6 +25,7 @@ #include #include "cqhci.h" +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #define SDHCI_ARASAN_VENDOR_REGISTER 0x78 @@ -359,7 +360,7 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) { ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index a78b060ce8471..8b02fe3916d12 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -967,6 +967,12 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) dmi_match(DMI_SYS_VENDOR, "IRBIS")); } +static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC && + dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC."); +} + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) { int ret = byt_emmc_probe_slot(slot); @@ -975,9 +981,11 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->mmc->caps2 |= MMC_CAP2_CQE; if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { - slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES, - slot->host->mmc_host_ops.hs400_enhanced_strobe = - intel_hs400_enhanced_strobe; + if (!jsl_broken_hs400es(slot)) { + slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES; + slot->host->mmc_host_ops.hs400_enhanced_strobe = + intel_hs400_enhanced_strobe; + } slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; } @@ -1791,6 +1799,8 @@ static int amd_probe(struct sdhci_pci_chip *chip) } } + pci_dev_put(smbus_dev); + if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ) chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 8c357e3b78d7c..72234790a310b 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -31,6 +31,7 @@ #define O2_SD_CAPS 0xE0 #define O2_SD_ADMA1 0xE2 #define O2_SD_ADMA2 0xE7 +#define O2_SD_MISC_CTRL2 0xF0 #define O2_SD_INF_MOD 0xF1 #define O2_SD_MISC_CTRL4 0xFC #define O2_SD_MISC_CTRL 0x1C0 @@ -822,6 +823,12 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) /* Set Tuning Windows to 5 */ pci_write_config_byte(chip->pdev, O2_SD_TUNING_CTRL, 0x55); + //Adjust 1st and 2nd CD debounce time + pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32); + scratch_32 &= 0xFFE7FFFF; + scratch_32 |= 0x00180000; + pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32); + pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1); /* Lock WP */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 9cd8862e6cbd0..540ebccaa9a35 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -224,13 +224,15 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); sdhci_enable_clk(host, div); - /* enable auto gate sdhc_enable_auto_gate */ - val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); - mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | - SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; - if (mask != (val & mask)) { - val |= mask; - sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + /* Enable CLK_AUTO when the clock is greater than 400K. */ + if (clk > 400000) { + val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); + mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | + SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; + if (mask != (val & mask)) { + val |= mask; + sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + } } } @@ -296,7 +298,7 @@ static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host) static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host) { - return 400000; + return 100000; } static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host, @@ -457,7 +459,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) } if (IS_ERR(sprd_host->pinctrl)) - return 0; + goto reset; switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_180: @@ -485,6 +487,8 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) /* Wait for 300 ~ 500 us for pin state stable */ usleep_range(300, 500); + +reset: sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); return 0; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index d50b691f6c44e..d8fd2b5efd387 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -24,6 +24,7 @@ #include #include +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "cqhci.h" @@ -361,7 +362,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; u32 misc_ctrl, clk_ctrl, pad_ctrl; - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); if (!(mask & SDHCI_RESET_ALL)) return; @@ -760,7 +761,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) */ host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; clk_set_rate(pltfm_host->clk, host_clk); - tegra_host->curr_clk_rate = host_clk; + tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk); if (tegra_host->ddr_signaling) host->max_clk = host_clk; else diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index d42e86cdff12e..133f0d3764804 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -339,6 +339,7 @@ static void sdhci_init(struct sdhci_host *host, int soft) if (soft) { /* force clock reconfiguration */ host->clock = 0; + host->reinit_uhs = true; mmc->ops->set_ios(mmc, &mmc->ios); } } @@ -2258,11 +2259,46 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) } EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); +static bool sdhci_timing_has_preset(unsigned char timing) +{ + switch (timing) { + case MMC_TIMING_UHS_SDR12: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_SDR50: + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + return true; + }; + return false; +} + +static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing) +{ + return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && + sdhci_timing_has_preset(timing); +} + +static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios) +{ + /* + * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK + * Frequency. Check if preset values need to be enabled, or the Driver + * Strength needs updating. Note, clock changes are handled separately. + */ + return !host->preset_enabled && + (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); +} + void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdhci_host *host = mmc_priv(mmc); + bool reinit_uhs = host->reinit_uhs; + bool turning_on_clk = false; u8 ctrl; + host->reinit_uhs = false; + if (ios->power_mode == MMC_POWER_UNDEFINED) return; @@ -2288,6 +2324,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) sdhci_enable_preset_value(host, false); if (!ios->clock || ios->clock != host->clock) { + turning_on_clk = ios->clock && !host->clock; + host->ops->set_clock(host, ios->clock); host->clock = ios->clock; @@ -2314,6 +2352,17 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) host->ops->set_bus_width(host, ios->bus_width); + /* + * Special case to avoid multiple clock changes during voltage + * switching. + */ + if (!reinit_uhs && + turning_on_clk && + host->timing == ios->timing && + host->version >= SDHCI_SPEC_300 && + !sdhci_presetable_values_change(host, ios)) + return; + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { @@ -2357,6 +2406,7 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + host->drv_type = ios->drv_type; } else { /* * According to SDHC Spec v3.00, if the Preset Value @@ -2384,19 +2434,14 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) host->ops->set_uhs_signaling(host, ios->timing); host->timing = ios->timing; - if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && - ((ios->timing == MMC_TIMING_UHS_SDR12) || - (ios->timing == MMC_TIMING_UHS_SDR25) || - (ios->timing == MMC_TIMING_UHS_SDR50) || - (ios->timing == MMC_TIMING_UHS_SDR104) || - (ios->timing == MMC_TIMING_UHS_DDR50) || - (ios->timing == MMC_TIMING_MMC_DDR52))) { + if (sdhci_preset_needed(host, ios->timing)) { u16 preset; sdhci_enable_preset_value(host, true); preset = sdhci_get_preset_value(host); ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, preset); + host->drv_type = ios->drv_type; } /* Re-enable SD Clock */ @@ -3707,6 +3752,7 @@ int sdhci_resume_host(struct sdhci_host *host) sdhci_init(host, 0); host->pwr = 0; host->clock = 0; + host->reinit_uhs = true; mmc->ops->set_ios(mmc, &mmc->ios); } else { sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); @@ -3769,6 +3815,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) /* Force clock and power re-program */ host->pwr = 0; host->clock = 0; + host->reinit_uhs = true; mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); mmc->ops->set_ios(mmc, &mmc->ios); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 8b1650f37fbba..4db57c3a8cd4b 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -520,6 +520,8 @@ struct sdhci_host { unsigned int clock; /* Current clock (MHz) */ u8 pwr; /* Current voltage */ + u8 drv_type; /* Current UHS-I driver type */ + bool reinit_uhs; /* Force UHS-related re-initialization */ bool runtime_suspended; /* Host is runtime suspended */ bool bus_on; /* Bus power prevents runtime suspend */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 7cab9d831afb3..24cd6d3dc6477 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -15,6 +15,7 @@ #include #include "cqhci.h" +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" /* CTL_CFG Registers */ @@ -378,7 +379,7 @@ static void sdhci_am654_reset(struct sdhci_host *host, u8 mask) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) { ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); @@ -464,7 +465,7 @@ static struct sdhci_ops sdhci_am654_ops = { .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, .irq = sdhci_am654_cqhci_irq, - .reset = sdhci_reset, + .reset = sdhci_and_cqhci_reset, }; static const struct sdhci_pltfm_data sdhci_am654_pdata = { @@ -494,7 +495,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = { .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, .irq = sdhci_am654_cqhci_irq, - .reset = sdhci_reset, + .reset = sdhci_and_cqhci_reset, }; static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 3f5977979cf25..6c4f43e112826 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -168,6 +168,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) if (reg & SDHCI_CAN_DO_8BIT) priv->vendor_hs200 = F_SDH30_EMMC_HS200; + if (!(reg & SDHCI_TIMEOUT_CLK_MASK)) + host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; + ret = sdhci_add_host(host); if (ret) goto err_add_host; diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index fc62773602ec8..9215069c61560 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1459,9 +1459,11 @@ static int sunxi_mmc_remove(struct platform_device *pdev) struct sunxi_mmc_host *host = mmc_priv(mmc); mmc_remove_host(mmc); - pm_runtime_force_suspend(&pdev->dev); - disable_irq(host->irq); - sunxi_mmc_disable(host); + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) { + disable_irq(host->irq); + sunxi_mmc_disable(host); + } dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); mmc_free_host(mmc); diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c index 8d037c2071abc..497791ffada6d 100644 --- a/drivers/mmc/host/toshsd.c +++ b/drivers/mmc/host/toshsd.c @@ -651,7 +651,9 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto unmap; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto free_irq; base = pci_resource_start(pdev, 0); dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq); @@ -660,6 +662,8 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +free_irq: + free_irq(pdev->irq, host); unmap: pci_iounmap(pdev, host->ioaddr); release: diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index f07c71db3cafe..f6b525fb5c0ed 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1154,7 +1154,9 @@ static int via_sd_probe(struct pci_dev *pcidev, pcidev->subsystem_device == 0x3891) sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto unmap; return 0; diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index 97beece62fec4..72f65f32abbc7 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -2049,6 +2049,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) return; kref_get(&vub300->kref); if (enable) { + set_current_state(TASK_RUNNING); mutex_lock(&vub300->irq_mutex); if (vub300->irqs_queued) { vub300->irqs_queued -= 1; @@ -2064,6 +2065,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) vub300_queue_poll_work(vub300, 0); } mutex_unlock(&vub300->irq_mutex); + set_current_state(TASK_INTERRUPTIBLE); } else { vub300->irq_enabled = 0; } @@ -2299,14 +2301,14 @@ static int vub300_probe(struct usb_interface *interface, 0x0000, 0x0000, &vub300->system_port_status, sizeof(vub300->system_port_status), 1000); if (retval < 0) { - goto error4; + goto error5; } else if (sizeof(vub300->system_port_status) == retval) { vub300->card_present = (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; vub300->read_only = (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; } else { - goto error4; + goto error5; } usb_set_intfdata(interface, vub300); INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); @@ -2329,8 +2331,13 @@ static int vub300_probe(struct usb_interface *interface, "USB vub300 remote SDIO host controller[%d]" "connected with no SD/SDIO card inserted\n", interface_to_InterfaceNumber(interface)); - mmc_add_host(mmc); + retval = mmc_add_host(mmc); + if (retval) + goto error6; + return 0; +error6: + del_timer_sync(&vub300->inactivity_timer); error5: mmc_free_host(mmc); /* diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index cd63ea865b77f..f3090216e0dcc 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1703,7 +1703,17 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, */ wbsd_init_device(host); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { + if (!pnp) + wbsd_chip_poweroff(host); + + wbsd_release_resources(host); + wbsd_free_mmc(dev); + + mmc_free_host(mmc); + return ret; + } pr_info("%s: W83L51xD", mmc_hostname(mmc)); if (host->chip_id != 0) diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index cf10949fb0acc..3933195488575 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -849,7 +849,7 @@ static int wmt_mci_probe(struct platform_device *pdev) if (IS_ERR(priv->clk_sdmmc)) { dev_err(&pdev->dev, "Error getting clock\n"); ret = PTR_ERR(priv->clk_sdmmc); - goto fail5; + goto fail5_and_a_half; } ret = clk_prepare_enable(priv->clk_sdmmc); @@ -859,13 +859,20 @@ static int wmt_mci_probe(struct platform_device *pdev) /* configure the controller to a known 'ready' state */ wmt_reset_hardware(mmc); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto fail7; dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); return 0; +fail7: + clk_disable_unprepare(priv->clk_sdmmc); fail6: clk_put(priv->clk_sdmmc); +fail5_and_a_half: + dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16, + priv->dma_desc_buffer, priv->dma_desc_device_addr); fail5: free_irq(dma_irq, priv); fail4: diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index a030792115bc2..fa42473d04c1b 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1975,9 +1975,14 @@ static int __init docg3_probe(struct platform_device *pdev) dev_err(dev, "No I/O memory resource defined\n"); return ret; } - base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE); ret = -ENOMEM; + base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE); + if (!base) { + dev_err(dev, "devm_ioremap dev failed\n"); + return ret; + } + cascade = devm_kcalloc(dev, DOC_MAX_NBFLOORS, sizeof(*cascade), GFP_KERNEL); if (!cascade) diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c index 72f5c7b300790..add4386f99f00 100644 --- a/drivers/mtd/lpddr/lpddr2_nvm.c +++ b/drivers/mtd/lpddr/lpddr2_nvm.c @@ -433,6 +433,8 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) /* lpddr2_nvm address range */ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!add_range) + return -ENODEV; /* Populate map_info data structure */ *map = (struct map_info) { diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index 7d96758a8f04e..6e5e557559704 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c @@ -66,6 +66,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) if (!info->map.virt) { printk(KERN_WARNING "Failed to ioremap %s\n", info->map.name); + kfree(info); return -ENOMEM; } info->map.cached = ioremap_cache(info->map.phys, info->map.size); @@ -87,6 +88,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) iounmap((void *)info->map.virt); if (info->map.cached) iounmap(info->map.cached); + kfree(info); return -EIO; } info->mtd->dev.parent = &pdev->dev; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index a5197a4819025..b2d88ff90e936 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -667,8 +667,10 @@ int add_mtd_device(struct mtd_info *mtd) dev_set_drvdata(&mtd->dev, mtd); of_node_get(mtd_get_of_node(mtd)); error = device_register(&mtd->dev); - if (error) + if (error) { + put_device(&mtd->dev); goto fail_added; + } /* Add the nvmem provider */ error = mtd_nvmem_add(mtd); diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 2228c34f3deab..0d84f8156d8e4 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -405,6 +405,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc, dma_async_issue_pending(nc->dmac); wait_for_completion(&finished); + dma_unmap_single(nc->dev, buf_dma, len, dir); return 0; diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c index b2af7f81fdf8f..c174b6dc3c6ba 100644 --- a/drivers/mtd/nand/raw/fsl_elbc_nand.c +++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c @@ -727,36 +727,40 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip) struct fsl_lbc_regs __iomem *lbc = ctrl->regs; unsigned int al; - switch (chip->ecc.engine_type) { /* * if ECC was not chosen in DT, decide whether to use HW or SW ECC from * CS Base Register */ - case NAND_ECC_ENGINE_TYPE_NONE: + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) { /* If CS Base Register selects full hardware ECC then use it */ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == BR_DECC_CHK_GEN) { - chip->ecc.read_page = fsl_elbc_read_page; - chip->ecc.write_page = fsl_elbc_write_page; - chip->ecc.write_subpage = fsl_elbc_write_subpage; - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; - mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops); - chip->ecc.size = 512; - chip->ecc.bytes = 3; - chip->ecc.strength = 1; } else { /* otherwise fall back to default software ECC */ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; chip->ecc.algo = NAND_ECC_ALGO_HAMMING; } + } + + switch (chip->ecc.engine_type) { + /* if HW ECC was chosen, setup ecc and oob layout */ + case NAND_ECC_ENGINE_TYPE_ON_HOST: + chip->ecc.read_page = fsl_elbc_read_page; + chip->ecc.write_page = fsl_elbc_write_page; + chip->ecc.write_subpage = fsl_elbc_write_subpage; + mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops); + chip->ecc.size = 512; + chip->ecc.bytes = 3; + chip->ecc.strength = 1; break; - /* if SW ECC was chosen in DT, we do not need to set anything here */ + /* if none or SW ECC was chosen, we do not need to set anything here */ + case NAND_ECC_ENGINE_TYPE_NONE: case NAND_ECC_ENGINE_TYPE_SOFT: + case NAND_ECC_ENGINE_TYPE_ON_DIE: break; - /* should we also implement *_ECC_ENGINE_CONTROLLER to do as above? */ default: return -EINVAL; } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 92e8ca56f5665..200d3ab343b00 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -653,8 +653,9 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, unsigned int tRP_ps; bool use_half_period; int sample_delay_ps, sample_delay_factor; - u16 busy_timeout_cycles; + unsigned int busy_timeout_cycles; u8 wrn_dly_sel; + u64 busy_timeout_ps; if (sdr->tRC_min >= 30000) { /* ONFI non-EDO modes [0-3] */ @@ -678,7 +679,8 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); - busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); + busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max); + busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps); hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index d00c916f133bd..dce35f81e0a55 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2672,7 +2672,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, chip->controller = &nfc->controller; nand_set_flash_node(chip, np); - if (!of_property_read_bool(np, "marvell,nand-keep-config")) + if (of_property_read_bool(np, "marvell,nand-keep-config")) chip->options |= NAND_KEEP_TIMINGS; mtd = nand_to_mtd(chip); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 327a2257ec26d..38f490088d764 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -454,7 +454,7 @@ static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips, if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) { mtd->ecc_stats.corrected += ECC_ERR_CNT(*info); *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info)); - *correct_bitmap |= 1 >> i; + *correct_bitmap |= BIT_ULL(i); continue; } if ((nand->options & NAND_NEED_SCRAMBLING) && @@ -800,7 +800,7 @@ static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf, u8 *data = buf + i * ecc->size; u8 *oob = nand->oob_poi + i * (ecc->bytes + 2); - if (correct_bitmap & (1 << i)) + if (correct_bitmap & BIT_ULL(i)) continue; ret = nand_check_erased_ecc_chunk(data, ecc->size, oob, ecc->bytes + 2, diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c index 6012a10f10c83..13daf9bffd081 100644 --- a/drivers/mtd/parsers/bcm47xxpart.c +++ b/drivers/mtd/parsers/bcm47xxpart.c @@ -233,11 +233,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Read middle of the block */ - err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read, + err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read, (uint8_t *)buf); if (err && !mtd_is_bitflip(err)) { pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", - offset, err); + offset + (blocksize / 2), err); continue; } diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c index 555fe55d14aee..8a3c1f3c2d2e8 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c @@ -16,12 +16,30 @@ #define BCR 0xdc #define BCR_WPD BIT(0) +static bool intel_spi_pci_set_writeable(void __iomem *base, void *data) +{ + struct pci_dev *pdev = data; + u32 bcr; + + /* Try to make the chip read/write */ + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + static const struct intel_spi_boardinfo bxt_info = { .type = INTEL_SPI_BXT, + .set_writeable = intel_spi_pci_set_writeable, }; static const struct intel_spi_boardinfo cnl_info = { .type = INTEL_SPI_CNL, + .set_writeable = intel_spi_pci_set_writeable, }; static int intel_spi_pci_probe(struct pci_dev *pdev, @@ -29,7 +47,6 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, { struct intel_spi_boardinfo *info; struct intel_spi *ispi; - u32 bcr; int ret; ret = pcim_enable_device(pdev); @@ -41,15 +58,7 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, if (!info) return -ENOMEM; - /* Try to make the chip read/write */ - pci_read_config_dword(pdev, BCR, &bcr); - if (!(bcr & BCR_WPD)) { - bcr |= BCR_WPD; - pci_write_config_dword(pdev, BCR, bcr); - pci_read_config_dword(pdev, BCR, &bcr); - } - info->writeable = !!(bcr & BCR_WPD); - + info->data = pdev; ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info); if (IS_ERR(ispi)) return PTR_ERR(ispi); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c index b54a56a68100e..6c802db6b4af0 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi.c @@ -53,17 +53,17 @@ #define FRACC 0x50 #define FREG(n) (0x54 + ((n) * 4)) -#define FREG_BASE_MASK 0x3fff +#define FREG_BASE_MASK GENMASK(14, 0) #define FREG_LIMIT_SHIFT 16 -#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) +#define FREG_LIMIT_MASK GENMASK(30, 16) /* Offset is from @ispi->pregs */ #define PR(n) ((n) * 4) #define PR_WPE BIT(31) #define PR_LIMIT_SHIFT 16 -#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) +#define PR_LIMIT_MASK GENMASK(30, 16) #define PR_RPE BIT(15) -#define PR_BASE_MASK 0x3fff +#define PR_BASE_MASK GENMASK(14, 0) /* Offsets are from @ispi->sregs */ #define SSFSTS_CTL 0x00 @@ -117,7 +117,7 @@ #define ERASE_OPCODE_SHIFT 8 #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) #define ERASE_64K_OPCODE_SHIFT 16 -#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) +#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT) #define INTEL_SPI_TIMEOUT 5000 /* ms */ #define INTEL_SPI_FIFO_SZ 64 @@ -132,7 +132,6 @@ * @sregs: Start of software sequencer registers * @nregions: Maximum number of regions * @pr_num: Maximum number of protected range registers - * @writeable: Is the chip writeable * @locked: Is SPI setting locked * @swseq_reg: Use SW sequencer in register reads/writes * @swseq_erase: Use SW sequencer in erase operation @@ -150,7 +149,6 @@ struct intel_spi { void __iomem *sregs; size_t nregions; size_t pr_num; - bool writeable; bool locked; bool swseq_reg; bool swseq_erase; @@ -305,6 +303,14 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi) INTEL_SPI_TIMEOUT * 1000); } +static bool intel_spi_set_writeable(struct intel_spi *ispi) +{ + if (!ispi->info->set_writeable) + return false; + + return ispi->info->set_writeable(ispi->base, ispi->info->data); +} + static int intel_spi_init(struct intel_spi *ispi) { u32 opmenu0, opmenu1, lvscc, uvscc, val; @@ -317,19 +323,6 @@ static int intel_spi_init(struct intel_spi *ispi) ispi->nregions = BYT_FREG_NUM; ispi->pr_num = BYT_PR_NUM; ispi->swseq_reg = true; - - if (writeable) { - /* Disable write protection */ - val = readl(ispi->base + BYT_BCR); - if (!(val & BYT_BCR_WPD)) { - val |= BYT_BCR_WPD; - writel(val, ispi->base + BYT_BCR); - val = readl(ispi->base + BYT_BCR); - } - - ispi->writeable = !!(val & BYT_BCR_WPD); - } - break; case INTEL_SPI_LPT: @@ -359,6 +352,12 @@ static int intel_spi_init(struct intel_spi *ispi) return -EINVAL; } + /* Try to disable write protection if user asked to do so */ + if (writeable && !intel_spi_set_writeable(ispi)) { + dev_warn(ispi->dev, "can't disable chip write protection\n"); + writeable = false; + } + /* Disable #SMI generation from HW sequencer */ val = readl(ispi->base + HSFSTS_CTL); val &= ~HSFSTS_CTL_FSMIE; @@ -885,9 +884,12 @@ static void intel_spi_fill_partition(struct intel_spi *ispi, /* * If any of the regions have protection bits set, make the * whole partition read-only to be on the safe side. + * + * Also if the user did not ask the chip to be writeable + * mask the bit too. */ - if (intel_spi_is_protected(ispi, base, limit)) - ispi->writeable = false; + if (!writeable || intel_spi_is_protected(ispi, base, limit)) + part->mask_flags |= MTD_WRITEABLE; end = (limit << 12) + 4096; if (end > part->size) @@ -928,7 +930,6 @@ struct intel_spi *intel_spi_probe(struct device *dev, ispi->dev = dev; ispi->info = info; - ispi->writeable = info->writeable; ret = intel_spi_init(ispi); if (ret) @@ -946,10 +947,6 @@ struct intel_spi *intel_spi_probe(struct device *dev, intel_spi_fill_partition(ispi, &part); - /* Prevent writes if not explicitly enabled */ - if (!ispi->writeable || !writeable) - ispi->nor.mtd.flags &= ~MTD_WRITEABLE; - ret = mtd_device_register(&ispi->nor.mtd, &part, 1); if (ret) return ERR_PTR(ret); diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index e8146a47da123..2c256d455c9f3 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -1220,6 +1220,8 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, continue; erase = &map->erase_type[i]; + if (!erase->size) + continue; /* Alignment is not mandatory for overlaid regions */ if (region->offset & SNOR_OVERLAID_REGION && diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 98df38fe553ce..12d085405bd05 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -332,7 +332,7 @@ static int __init arc_rimi_init(void) dev->irq = 9; if (arcrimi_probe(dev)) { - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void) iounmap(lp->mem_start); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); free_irq(dev->irq, dev); - free_netdev(dev); + free_arcdev(dev); } #ifndef MODULE diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index 22a49c6d7ae6e..5d4a4c7efbbff 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -298,6 +298,10 @@ struct arcnet_local { int excnak_pending; /* We just got an excesive nak interrupt */ + /* RESET flag handling */ + int reset_in_progress; + struct work_struct reset_work; + struct { uint16_t sequence; /* sequence number (incs with each packet) */ __be16 aborted_seq; @@ -350,7 +354,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc) void arcnet_unregister_proto(struct ArcProto *proto); irqreturn_t arcnet_interrupt(int irq, void *dev_id); + struct net_device *alloc_arcdev(const char *name); +void free_arcdev(struct net_device *dev); int arcnet_open(struct net_device *dev); int arcnet_close(struct net_device *dev); diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index e04efc0a5c977..d76dd7d14299e 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_list *t) struct arcnet_local *lp = from_timer(lp, t, timer); struct net_device *dev = lp->dev; - if (!netif_carrier_ok(dev)) { + spin_lock_irq(&lp->lock); + + if (!lp->reset_in_progress && !netif_carrier_ok(dev)) { netif_carrier_on(dev); netdev_info(dev, "link up\n"); } + + spin_unlock_irq(&lp->lock); +} + +static void reset_device_work(struct work_struct *work) +{ + struct arcnet_local *lp; + struct net_device *dev; + + lp = container_of(work, struct arcnet_local, reset_work); + dev = lp->dev; + + /* Do not bring the network interface back up if an ifdown + * was already done. + */ + if (!netif_running(dev) || !lp->reset_in_progress) + return; + + rtnl_lock(); + + /* Do another check, in case of an ifdown that was triggered in + * the small race window between the exit condition above and + * acquiring RTNL. + */ + if (!netif_running(dev) || !lp->reset_in_progress) + goto out; + + dev_close(dev); + dev_open(dev, NULL); + +out: + rtnl_unlock(); } static void arcnet_reply_tasklet(unsigned long data) @@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const char *name) lp->dev = dev; spin_lock_init(&lp->lock); timer_setup(&lp->timer, arcnet_timer, 0); + INIT_WORK(&lp->reset_work, reset_device_work); } return dev; } EXPORT_SYMBOL(alloc_arcdev); +void free_arcdev(struct net_device *dev) +{ + struct arcnet_local *lp = netdev_priv(dev); + + /* Do not cancel this at ->ndo_close(), as the workqueue itself + * indirectly calls the ifdown path through dev_close(). + */ + cancel_work_sync(&lp->reset_work); + free_netdev(dev); +} +EXPORT_SYMBOL(free_arcdev); + /* Open/initialize the board. This is called sometime after booting when * the 'ifconfig' program is run. * @@ -587,6 +634,10 @@ int arcnet_close(struct net_device *dev) /* shut down the card */ lp->hw.close(dev); + + /* reset counters */ + lp->reset_in_progress = 0; + module_put(lp->hw.owner); return 0; } @@ -820,6 +871,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) spin_lock_irqsave(&lp->lock, flags); + if (lp->reset_in_progress) + goto out; + /* RESET flag was enabled - if device is not running, we must * clear it right away (but nothing else). */ @@ -852,11 +906,14 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) if (status & RESETflag) { arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n", status); - arcnet_close(dev); - arcnet_open(dev); + + lp->reset_in_progress = 1; + netif_stop_queue(dev); + netif_carrier_off(dev); + schedule_work(&lp->reset_work); /* get out of the interrupt handler! */ - break; + goto out; } /* RX is inhibited - we must have received something. * Prepare to receive into the next buffer. @@ -1052,6 +1109,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) udelay(1); lp->hw.intmask(dev, lp->intmask); +out: spin_unlock_irqrestore(&lp->lock, flags); return retval; } diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index f983c4ce6b07f..be618e4b9ed5e 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -169,7 +169,7 @@ static int __init com20020_init(void) dev->irq = 9; if (com20020isa_probe(dev)) { - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -182,7 +182,7 @@ static void __exit com20020_exit(void) unregister_netdev(my_dev); free_irq(my_dev->irq, my_dev); release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(my_dev); + free_arcdev(my_dev); } #ifndef MODULE diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 9f44e2e458df1..b4f8798d8c509 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -294,7 +294,7 @@ static void com20020pci_remove(struct pci_dev *pdev) unregister_netdev(dev); free_irq(dev->irq, dev); - free_netdev(dev); + free_arcdev(dev); } } diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index cf607ffcf358e..e0c7720bd5da9 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -113,6 +113,7 @@ static int com20020_probe(struct pcmcia_device *p_dev) struct com20020_dev *info; struct net_device *dev; struct arcnet_local *lp; + int ret = -ENOMEM; dev_dbg(&p_dev->dev, "com20020_attach()\n"); @@ -142,12 +143,18 @@ static int com20020_probe(struct pcmcia_device *p_dev) info->dev = dev; p_dev->priv = info; - return com20020_config(p_dev); + ret = com20020_config(p_dev); + if (ret) + goto fail_config; + + return 0; +fail_config: + free_arcdev(dev); fail_alloc_dev: kfree(info); fail_alloc_info: - return -ENOMEM; + return ret; } /* com20020_attach */ static void com20020_detach(struct pcmcia_device *link) @@ -177,7 +184,7 @@ static void com20020_detach(struct pcmcia_device *link) dev = info->dev; if (dev) { dev_dbg(&link->dev, "kfree...\n"); - free_netdev(dev); + free_arcdev(dev); } dev_dbg(&link->dev, "kfree2...\n"); kfree(info); diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index cf214b7306715..3856b447d38ed 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -396,7 +396,7 @@ static int __init com90io_init(void) err = com90io_probe(dev); if (err) { - free_netdev(dev); + free_arcdev(dev); return err; } @@ -419,7 +419,7 @@ static void __exit com90io_exit(void) free_irq(dev->irq, dev); release_region(dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(dev); + free_arcdev(dev); } module_init(com90io_init) diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index 3dc3d533cb19a..d8dfb9ea0de89 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -554,7 +554,7 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem, err_release_mem: release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); err_free_dev: - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -672,7 +672,7 @@ static void __exit com90xx_exit(void) release_region(dev->base_addr, ARCNET_TOTAL_SIZE); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); - free_netdev(dev); + free_arcdev(dev); } } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index b0f8d551b61db..320e5461853f7 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -85,8 +85,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = { static u16 ad_ticks_per_sec; static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; -static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = - MULTICAST_LACPDU_ADDR; +const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = { + 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 +}; /* ================= main 802.3ad protocol functions ================== */ static int ad_lacpdu_send(struct port *port); @@ -1519,6 +1520,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) slave_err(bond->dev, port->slave->dev, "Port %d did not find a suitable aggregator\n", port->actor_port_number); + return; } } /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index f3f86ef68ae0c..8b6cf2bf9025a 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond) d = debugfs_rename(bonding_debug_root, bond->debug_dir, bonding_debug_root, bond->dev->name); - if (d) { + if (!IS_ERR(d)) { bond->debug_dir = d; } else { netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n"); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9c4b45341fd28..c40b92f8d16bf 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -827,12 +827,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev, dev_uc_unsync(slave_dev, bond_dev); dev_mc_unsync(slave_dev, bond_dev); - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* del lacpdu mc addr from mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_del(slave_dev, lacpdu_multicast); - } + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_del(slave_dev, lacpdu_mcast_addr); } /*--------------------------- Active slave change ---------------------------*/ @@ -852,7 +848,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1); - bond_hw_addr_flush(bond->dev, old_active->dev); + if (bond->dev->flags & IFF_UP) + bond_hw_addr_flush(bond->dev, old_active->dev); } if (new_active) { @@ -863,10 +860,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1); - netif_addr_lock_bh(bond->dev); - dev_uc_sync(new_active->dev, bond->dev); - dev_mc_sync(new_active->dev, bond->dev); - netif_addr_unlock_bh(bond->dev); + if (bond->dev->flags & IFF_UP) { + netif_addr_lock_bh(bond->dev); + dev_uc_sync(new_active->dev, bond->dev); + dev_mc_sync(new_active->dev, bond->dev); + netif_addr_unlock_bh(bond->dev); + } } } @@ -2073,16 +2072,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, } } - netif_addr_lock_bh(bond_dev); - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - netif_addr_unlock_bh(bond_dev); - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; + if (bond_dev->flags & IFF_UP) { + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); - dev_mc_add(slave_dev, lacpdu_multicast); + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_add(slave_dev, lacpdu_mcast_addr); } } @@ -2310,7 +2307,8 @@ static int __bond_release_one(struct net_device *bond_dev, if (old_flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1); - bond_hw_addr_flush(bond_dev, slave_dev); + if (old_flags & IFF_UP) + bond_hw_addr_flush(bond_dev, slave_dev); } slave_disable_netpoll(slave); @@ -2395,12 +2393,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in /* called with rcu_read_lock() */ static int bond_miimon_inspect(struct bonding *bond) { + bool ignore_updelay = false; int link_state, commit = 0; struct list_head *iter; struct slave *slave; - bool ignore_updelay; - ignore_updelay = !rcu_dereference(bond->curr_active_slave); + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { + ignore_updelay = !rcu_dereference(bond->curr_active_slave); + } else { + struct bond_up_slave *usable_slaves; + + usable_slaves = rcu_dereference(bond->usable_slaves); + + if (usable_slaves && usable_slaves->count == 0) + ignore_updelay = true; + } bond_for_each_slave_rcu(bond, slave, iter) { bond_propose_link_state(slave, BOND_LINK_NOCHANGE); @@ -3772,6 +3779,9 @@ static int bond_open(struct net_device *bond_dev) /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; bond_3ad_initiate_agg_selection(bond, 1); + + bond_for_each_slave(bond, slave, iter) + dev_mc_add(slave->dev, lacpdu_mcast_addr); } if (bond_mode_can_use_xmit_hash(bond)) @@ -3783,6 +3793,7 @@ static int bond_open(struct net_device *bond_dev) static int bond_close(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave; bond_work_cancel_all(bond); bond->send_peer_notif = 0; @@ -3790,6 +3801,19 @@ static int bond_close(struct net_device *bond_dev) bond_alb_deinitialize(bond); bond->recv_probe = NULL; + if (bond_uses_primary(bond)) { + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (slave) + bond_hw_addr_flush(bond_dev, slave->dev); + rcu_read_unlock(); + } else { + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) + bond_hw_addr_flush(bond_dev, slave->dev); + } + return 0; } diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index 194c86e0f340f..8f6dccd5a5879 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -264,22 +264,24 @@ static int cc770_isa_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "couldn't register device (err=%d)\n", err); - goto exit_unmap; + goto exit_free; } dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; - exit_unmap: +exit_free: + free_cc770dev(dev); +exit_unmap: if (mem[idx]) iounmap(base); - exit_release: +exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); - exit: +exit: return err; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 7cbaac238ff62..429950241de32 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -954,11 +954,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, u32 reg_ctrl, reg_id, reg_iflag1; int i; - if (unlikely(drop)) { - skb = ERR_PTR(-ENOBUFS); - goto mark_as_read; - } - mb = flexcan_get_mb(priv, n); if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { @@ -987,6 +982,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, reg_ctrl = priv->read(&mb->can_ctrl); } + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + if (reg_ctrl & FLEXCAN_MB_CNT_EDL) skb = alloc_canfd_skb(offload->dev, &cfd); else diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c index f169d9090e52f..f903f78af087a 100644 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -294,11 +294,6 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) if (ret) return ret; - ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG, - TCAN4X5X_ENABLE_MCAN_INT); - if (ret) - return ret; - ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, TCAN4X5X_CLEAR_ALL_INT); if (ret) diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index e254e04ae257f..ef649764f9b4e 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -325,14 +325,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) &mscan_clksrc); if (!priv->can.clock.freq) { dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n"); - goto exit_free_mscan; + goto exit_put_clock; } err = register_mscandev(dev, mscan_clksrc); if (err) { dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); - goto exit_free_mscan; + goto exit_put_clock; } dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", @@ -340,7 +340,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) return 0; -exit_free_mscan: +exit_put_clock: + if (data->put_clock) + data->put_clock(ofdev); free_candev(dev); exit_dispose_irq: irq_dispose_mapping(irq); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 67f0f14e2bf4e..c61534a2a2d3a 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1075,7 +1075,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) struct rcar_canfd_global *gpriv = dev_id; struct net_device *ndev; struct rcar_canfd_channel *priv; - u32 sts, gerfl; + u32 sts, cc, gerfl; u32 ch, ridx; /* Global error interrupts still indicate a condition specific @@ -1093,7 +1093,9 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) /* Handle Rx interrupts */ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); - if (likely(sts & RCANFD_RFSTS_RFIF)) { + cc = rcar_canfd_read(priv->base, RCANFD_RFCC(ridx)); + if (likely(sts & RCANFD_RFSTS_RFIF && + cc & RCANFD_RFCC_RFIE)) { if (napi_schedule_prep(&priv->napi)) { /* Disable Rx FIFO interrupts */ rcar_canfd_clear_bit(priv->base, diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index d513fac507185..db3e767d5320f 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -202,22 +202,24 @@ static int sja1000_isa_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); - goto exit_unmap; + goto exit_free; } dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; - exit_unmap: +exit_free: + free_sja1000dev(dev); +exit_unmap: if (mem[idx]) iounmap(base); - exit_release: +exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); - exit: +exit: return err; } diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 5dde3c42d241b..ffcb04aac9729 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1419,11 +1419,14 @@ static int mcp251x_can_probe(struct spi_device *spi) ret = mcp251x_gpio_setup(priv); if (ret) - goto error_probe; + goto out_unregister_candev; netdev_info(net, "MCP%x successfully initialized.\n", priv->model); return 0; +out_unregister_candev: + unregister_candev(net); + error_probe: destroy_workqueue(priv->wq); priv->wq = NULL; diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 8847942a8d97e..73c5343e609bc 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -227,6 +227,10 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, u8 rxerr = msg->msg.rx.data[2]; u8 txerr = msg->msg.rx.data[3]; + netdev_dbg(priv->netdev, + "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n", + msg->msg.rx.dlc, state, ecc, rxerr, txerr); + skb = alloc_can_err_skb(priv->netdev, &cf); if (skb == NULL) { stats->rx_dropped++; @@ -253,6 +257,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, break; default: priv->can.state = CAN_STATE_ERROR_ACTIVE; + txerr = 0; + rxerr = 0; break; } } else { diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 1bfc497da9ac8..a879200eaab02 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -678,6 +678,7 @@ static int gs_can_open(struct net_device *netdev) flags |= GS_CAN_MODE_TRIPLE_SAMPLE; /* finally start device */ + dev->can.state = CAN_STATE_ERROR_ACTIVE; dm->mode = cpu_to_le32(GS_CAN_MODE_START); dm->flags = cpu_to_le32(flags); rc = usb_control_msg(interface_to_usbdev(dev->iface), @@ -694,13 +695,12 @@ static int gs_can_open(struct net_device *netdev) if (rc < 0) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); kfree(dm); + dev->can.state = CAN_STATE_STOPPED; return rc; } kfree(dm); - dev->can.state = CAN_STATE_ERROR_ACTIVE; - parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 61e67986b625e..5699531f87873 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context { int dlc; }; +struct kvaser_usb_busparams { + __le32 bitrate; + u8 tseg1; + u8 tseg2; + u8 sjw; + u8 nsamples; +} __packed; + struct kvaser_usb { struct usb_device *udev; struct usb_interface *intf; @@ -104,13 +112,19 @@ struct kvaser_usb_net_priv { struct can_priv can; struct can_berr_counter bec; + /* subdriver-specific data */ + void *sub_priv; + struct kvaser_usb *dev; struct net_device *netdev; int channel; - struct completion start_comp, stop_comp, flush_comp; + struct completion start_comp, stop_comp, flush_comp, + get_busparams_comp; struct usb_anchor tx_submitted; + struct kvaser_usb_busparams busparams_nominal, busparams_data; + spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */ int active_tx_contexts; struct kvaser_usb_tx_urb_context tx_contexts[]; @@ -120,11 +134,15 @@ struct kvaser_usb_net_priv { * struct kvaser_usb_dev_ops - Device specific functions * @dev_set_mode: used for can.do_set_mode * @dev_set_bittiming: used for can.do_set_bittiming + * @dev_get_busparams: readback arbitration busparams * @dev_set_data_bittiming: used for can.do_set_data_bittiming + * @dev_get_data_busparams: readback data busparams * @dev_get_berr_counter: used for can.do_get_berr_counter * * @dev_setup_endpoints: setup USB in and out endpoints * @dev_init_card: initialize card + * @dev_init_channel: initialize channel + * @dev_remove_channel: uninitialize channel * @dev_get_software_info: get software info * @dev_get_software_details: get software details * @dev_get_card_info: get card info @@ -140,12 +158,18 @@ struct kvaser_usb_net_priv { */ struct kvaser_usb_dev_ops { int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode); - int (*dev_set_bittiming)(struct net_device *netdev); - int (*dev_set_data_bittiming)(struct net_device *netdev); + int (*dev_set_bittiming)(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams); + int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv); + int (*dev_set_data_bittiming)(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams); + int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv); int (*dev_get_berr_counter)(const struct net_device *netdev, struct can_berr_counter *bec); int (*dev_setup_endpoints)(struct kvaser_usb *dev); int (*dev_init_card)(struct kvaser_usb *dev); + int (*dev_init_channel)(struct kvaser_usb_net_priv *priv); + void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv); int (*dev_get_software_info)(struct kvaser_usb *dev); int (*dev_get_software_details)(struct kvaser_usb *dev); int (*dev_get_card_info)(struct kvaser_usb *dev); @@ -178,6 +202,8 @@ struct kvaser_usb_dev_cfg { extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops; extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops; +void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv); + int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, int *actual_len); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 416763fd1f11c..1f015b496a472 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -416,10 +416,6 @@ static int kvaser_usb_open(struct net_device *netdev) if (err) return err; - err = kvaser_usb_setup_rx_urbs(dev); - if (err) - goto error; - err = ops->dev_set_opt_mode(priv); if (err) goto error; @@ -453,7 +449,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) /* This method might sleep. Do not call it in the atomic context * of URB completions. */ -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) { usb_kill_anchored_urbs(&priv->tx_submitted); kvaser_usb_reset_tx_urb_contexts(priv); @@ -510,6 +506,93 @@ static int kvaser_usb_close(struct net_device *netdev) return 0; } +static int kvaser_usb_set_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; + struct can_bittiming *bt = &priv->can.bittiming; + + struct kvaser_usb_busparams busparams; + int tseg1 = bt->prop_seg + bt->phase_seg1; + int tseg2 = bt->phase_seg2; + int sjw = bt->sjw; + int err = -EOPNOTSUPP; + + busparams.bitrate = cpu_to_le32(bt->bitrate); + busparams.sjw = (u8)sjw; + busparams.tseg1 = (u8)tseg1; + busparams.tseg2 = (u8)tseg2; + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + busparams.nsamples = 3; + else + busparams.nsamples = 1; + + err = ops->dev_set_bittiming(netdev, &busparams); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(priv->dev); + if (err) + return err; + + err = ops->dev_get_busparams(priv); + if (err) { + /* Treat EOPNOTSUPP as success */ + if (err == -EOPNOTSUPP) + err = 0; + return err; + } + + if (memcmp(&busparams, &priv->busparams_nominal, + sizeof(priv->busparams_nominal)) != 0) + err = -EINVAL; + + return err; +} + +static int kvaser_usb_set_data_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; + struct can_bittiming *dbt = &priv->can.data_bittiming; + + struct kvaser_usb_busparams busparams; + int tseg1 = dbt->prop_seg + dbt->phase_seg1; + int tseg2 = dbt->phase_seg2; + int sjw = dbt->sjw; + int err; + + if (!ops->dev_set_data_bittiming || + !ops->dev_get_data_busparams) + return -EOPNOTSUPP; + + busparams.bitrate = cpu_to_le32(dbt->bitrate); + busparams.sjw = (u8)sjw; + busparams.tseg1 = (u8)tseg1; + busparams.tseg2 = (u8)tseg2; + busparams.nsamples = 1; + + err = ops->dev_set_data_bittiming(netdev, &busparams); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(priv->dev); + if (err) + return err; + + err = ops->dev_get_data_busparams(priv); + if (err) + return err; + + if (memcmp(&busparams, &priv->busparams_data, + sizeof(priv->busparams_data)) != 0) + err = -EINVAL; + + return err; +} + static void kvaser_usb_write_bulk_callback(struct urb *urb) { struct kvaser_usb_tx_urb_context *context = urb->context; @@ -645,6 +728,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = { static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) { + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int i; for (i = 0; i < dev->nchannels; i++) { @@ -660,6 +744,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) if (!dev->nets[i]) continue; + if (ops->dev_remove_channel) + ops->dev_remove_channel(dev->nets[i]); + free_candev(dev->nets[i]->netdev); } } @@ -690,6 +777,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); + init_completion(&priv->flush_comp); + init_completion(&priv->get_busparams_comp); priv->can.ctrlmode_supported = 0; priv->dev = dev; @@ -702,7 +791,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = dev->cfg->clock.freq; priv->can.bittiming_const = dev->cfg->bittiming_const; - priv->can.do_set_bittiming = ops->dev_set_bittiming; + priv->can.do_set_bittiming = kvaser_usb_set_bittiming; priv->can.do_set_mode = ops->dev_set_mode; if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) || (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) @@ -714,7 +803,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; - priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming; + priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming; } netdev->flags |= IFF_ECHO; @@ -726,17 +815,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) dev->nets[channel] = priv; + if (ops->dev_init_channel) { + err = ops->dev_init_channel(priv); + if (err) + goto err; + } + err = register_candev(netdev); if (err) { dev_err(&dev->intf->dev, "Failed to register CAN device\n"); - free_candev(netdev); - dev->nets[channel] = NULL; - return err; + goto err; } netdev_dbg(netdev, "device registered\n"); return 0; + +err: + free_candev(netdev); + dev->nets[channel] = NULL; + return err; } static int kvaser_usb_probe(struct usb_interface *intf, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 01d4a731b579c..233bbfeaa771e 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -43,6 +43,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; /* Minihydra command IDs */ #define CMD_SET_BUSPARAMS_REQ 16 +#define CMD_GET_BUSPARAMS_REQ 17 +#define CMD_GET_BUSPARAMS_RESP 18 #define CMD_GET_CHIP_STATE_REQ 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_DRIVERMODE_REQ 21 @@ -193,21 +195,26 @@ struct kvaser_cmd_chip_state_event { #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01 #define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02 struct kvaser_cmd_set_busparams { - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 nsamples; + struct kvaser_usb_busparams busparams_nominal; u8 reserved0[4]; - __le32 bitrate_d; - u8 tseg1_d; - u8 tseg2_d; - u8 sjw_d; - u8 nsamples_d; + struct kvaser_usb_busparams busparams_data; u8 canfd_mode; u8 reserved1[7]; } __packed; +/* Busparam type */ +#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00 +#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01 +struct kvaser_cmd_get_busparams_req { + u8 type; + u8 reserved[27]; +} __packed; + +struct kvaser_cmd_get_busparams_res { + struct kvaser_usb_busparams busparams; + u8 reserved[20]; +} __packed; + /* Ctrl modes */ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01 #define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02 @@ -278,6 +285,8 @@ struct kvaser_cmd { struct kvaser_cmd_error_event error_event; struct kvaser_cmd_set_busparams set_busparams_req; + struct kvaser_cmd_get_busparams_req get_busparams_req; + struct kvaser_cmd_get_busparams_res get_busparams_res; struct kvaser_cmd_chip_state_event chip_state_event; @@ -293,6 +302,7 @@ struct kvaser_cmd { #define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) #define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) #define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) +#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6) /* CAN frame flags. Used in ext_rx_can and ext_tx_can */ #define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) #define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) @@ -359,6 +369,10 @@ struct kvaser_cmd_ext { } __packed; } __packed; +struct kvaser_usb_net_hydra_priv { + int pending_get_busparams_type; +}; + static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .name = "kvaser_usb_kcan", .tseg1_min = 1, @@ -504,6 +518,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, u8 cmd_no, int channel) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -511,6 +526,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); if (channel < 0) { kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -527,7 +543,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -543,6 +559,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, { struct kvaser_cmd *cmd; struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); @@ -550,14 +567,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd_async(priv, cmd, - kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len); if (err) kfree(cmd); @@ -701,6 +718,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_cmd *cmd; + size_t cmd_len; u32 value = 0; u32 mask = 0; u16 cap_cmd_res; @@ -712,13 +730,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, return -ENOMEM; cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -812,6 +831,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev, complete(&priv->flush_comp); } +static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + struct kvaser_usb_net_hydra_priv *hydra; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + hydra = priv->sub_priv; + if (!hydra) + return; + + switch (hydra->pending_get_busparams_type) { + case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN: + memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams, + sizeof(priv->busparams_nominal)); + break; + case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD: + memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams, + sizeof(priv->busparams_nominal)); + break; + default: + dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n", + hydra->pending_get_busparams_type); + break; + } + hydra->pending_get_busparams_type = -1; + + complete(&priv->get_busparams_comp); +} + static void kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, u8 bus_status, @@ -1099,6 +1151,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_net_priv *priv; unsigned long irq_flags; bool one_shot_fail = false; + bool is_err_frame = false; u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); @@ -1117,10 +1170,13 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); one_shot_fail = true; } + + is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK && + flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; } context = &priv->tx_contexts[transid % dev->max_tx_urbs]; - if (!one_shot_fail) { + if (!one_shot_fail && !is_err_frame) { struct net_device_stats *stats = &priv->netdev->stats; stats->tx_packets++; @@ -1294,6 +1350,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, kvaser_usb_hydra_state_event(dev, cmd); break; + case CMD_GET_BUSPARAMS_RESP: + kvaser_usb_hydra_get_busparams_reply(dev, cmd); + break; + case CMD_ERROR_EVENT: kvaser_usb_hydra_error_event(dev, cmd); break; @@ -1494,15 +1554,61 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev, return err; } -static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) +static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, + int busparams_type) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; + struct kvaser_cmd *cmd; + size_t cmd_len; + int err; + + if (!hydra) + return -EINVAL; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + cmd->get_busparams_req.type = busparams_type; + hydra->pending_get_busparams_type = busparams_type; + + reinit_completion(&priv->get_busparams_comp); + + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->get_busparams_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return err; +} + +static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv) +{ + return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN); +} + +static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv) +{ + return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD); +} + +static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; - int tseg1 = bt->prop_seg + bt->phase_seg1; - int tseg2 = bt->phase_seg2; - int sjw = bt->sjw; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -1510,33 +1616,29 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; - cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate); - cmd->set_busparams_req.sjw = (u8)sjw; - cmd->set_busparams_req.tseg1 = (u8)tseg1; - cmd->set_busparams_req.tseg2 = (u8)tseg2; - cmd->set_busparams_req.nsamples = 1; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, + sizeof(cmd->set_busparams_req.busparams_nominal)); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; } -static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) +static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *dbt = &priv->can.data_bittiming; struct kvaser_usb *dev = priv->dev; - int tseg1 = dbt->prop_seg + dbt->phase_seg1; - int tseg2 = dbt->phase_seg2; - int sjw = dbt->sjw; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -1544,11 +1646,9 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; - cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate); - cmd->set_busparams_req.sjw_d = (u8)sjw; - cmd->set_busparams_req.tseg1_d = (u8)tseg1; - cmd->set_busparams_req.tseg2_d = (u8)tseg2; - cmd->set_busparams_req.nsamples_d = 1; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + memcpy(&cmd->set_busparams_req.busparams_data, busparams, + sizeof(cmd->set_busparams_req.busparams_data)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) @@ -1564,7 +1664,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); @@ -1655,6 +1755,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev) return 0; } +static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_hydra_priv *hydra; + + hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL); + if (!hydra) + return -ENOMEM; + + priv->sub_priv = hydra; + + return 0; +} + static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) { struct kvaser_cmd cmd; @@ -1679,6 +1792,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; u32 flags; struct kvaser_usb_dev_card_data *card_data = &dev->card_data; @@ -1688,6 +1802,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) return -ENOMEM; cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->sw_detail_req.use_ext_cmd = 1; kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -1695,7 +1810,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -1811,6 +1926,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; + size_t cmd_len; int err; if ((priv->can.ctrlmode & @@ -1826,6 +1942,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) return -ENOMEM; cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid @@ -1835,7 +1952,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) else cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; @@ -1845,7 +1962,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->start_comp); + reinit_completion(&priv->start_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ, priv->channel); @@ -1863,7 +1980,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->stop_comp); + reinit_completion(&priv->stop_comp); /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT * see comment in kvaser_usb_hydra_update_state() @@ -1886,7 +2003,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->flush_comp); + reinit_completion(&priv->flush_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE, priv->channel); @@ -1997,10 +2114,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { .dev_set_mode = kvaser_usb_hydra_set_mode, .dev_set_bittiming = kvaser_usb_hydra_set_bittiming, + .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams, .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming, + .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams, .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter, .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints, .dev_init_card = kvaser_usb_hydra_init_card, + .dev_init_channel = kvaser_usb_hydra_init_channel, .dev_get_software_info = kvaser_usb_hydra_get_software_info, .dev_get_software_details = kvaser_usb_hydra_get_software_details, .dev_get_card_info = kvaser_usb_hydra_get_card_info, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 5e281249ad5fe..f06d63db9077b 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -55,6 +56,9 @@ #define CMD_RX_EXT_MESSAGE 14 #define CMD_TX_EXT_MESSAGE 15 #define CMD_SET_BUS_PARAMS 16 +#define CMD_GET_BUS_PARAMS 17 +#define CMD_GET_BUS_PARAMS_REPLY 18 +#define CMD_GET_CHIP_STATE 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_CTRL_MODE 21 #define CMD_RESET_CHIP 24 @@ -69,10 +73,13 @@ #define CMD_GET_CARD_INFO_REPLY 35 #define CMD_GET_SOFTWARE_INFO 38 #define CMD_GET_SOFTWARE_INFO_REPLY 39 +#define CMD_ERROR_EVENT 45 #define CMD_FLUSH_QUEUE 48 #define CMD_TX_ACKNOWLEDGE 50 #define CMD_CAN_ERROR_EVENT 51 #define CMD_FLUSH_QUEUE_REPLY 68 +#define CMD_GET_CAPABILITIES_REQ 95 +#define CMD_GET_CAPABILITIES_RESP 96 #define CMD_LEAF_LOG_MESSAGE 106 @@ -82,6 +89,8 @@ #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) +#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12) + /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) @@ -156,11 +165,7 @@ struct usbcan_cmd_softinfo { struct kvaser_cmd_busparams { u8 tid; u8 channel; - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 no_samp; + struct kvaser_usb_busparams busparams; } __packed; struct kvaser_cmd_tx_can { @@ -229,7 +234,7 @@ struct kvaser_cmd_tx_acknowledge_header { u8 tid; } __packed; -struct leaf_cmd_error_event { +struct leaf_cmd_can_error_event { u8 tid; u8 flags; __le16 time[3]; @@ -241,7 +246,7 @@ struct leaf_cmd_error_event { u8 error_factor; } __packed; -struct usbcan_cmd_error_event { +struct usbcan_cmd_can_error_event { u8 tid; u8 padding; u8 tx_errors_count_ch0; @@ -253,6 +258,28 @@ struct usbcan_cmd_error_event { __le16 time; } __packed; +/* CMD_ERROR_EVENT error codes */ +#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8 +#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9 + +struct leaf_cmd_error_event { + u8 tid; + u8 error_code; + __le16 timestamp[3]; + __le16 padding; + __le16 info1; + __le16 info2; +} __packed; + +struct usbcan_cmd_error_event { + u8 tid; + u8 error_code; + __le16 info1; + __le16 info2; + __le16 timestamp; + __le16 padding; +} __packed; + struct kvaser_cmd_ctrl_mode { u8 tid; u8 channel; @@ -277,6 +304,28 @@ struct leaf_cmd_log_message { u8 data[8]; } __packed; +/* Sub commands for cap_req and cap_res */ +#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02 +#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05 +struct kvaser_cmd_cap_req { + __le16 padding0; + __le16 cap_cmd; + __le16 padding1; + __le16 channel; +} __packed; + +/* Status codes for cap_res */ +#define KVASER_USB_LEAF_CAP_STAT_OK 0x00 +#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01 +#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02 +struct kvaser_cmd_cap_res { + __le16 padding; + __le16 cap_cmd; + __le16 status; + __le32 mask; + __le32 value; +} __packed; + struct kvaser_cmd { u8 len; u8 id; @@ -292,14 +341,18 @@ struct kvaser_cmd { struct leaf_cmd_softinfo softinfo; struct leaf_cmd_rx_can rx_can; struct leaf_cmd_chip_state_event chip_state_event; - struct leaf_cmd_error_event error_event; + struct leaf_cmd_can_error_event can_error_event; struct leaf_cmd_log_message log_message; + struct leaf_cmd_error_event error_event; + struct kvaser_cmd_cap_req cap_req; + struct kvaser_cmd_cap_res cap_res; } __packed leaf; union { struct usbcan_cmd_softinfo softinfo; struct usbcan_cmd_rx_can rx_can; struct usbcan_cmd_chip_state_event chip_state_event; + struct usbcan_cmd_can_error_event can_error_event; struct usbcan_cmd_error_event error_event; } __packed usbcan; @@ -309,6 +362,42 @@ struct kvaser_cmd { } u; } __packed; +#define CMD_SIZE_ANY 0xff +#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field) + +static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { + [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), + [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo), + [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can), + [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), + [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), + [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event), + [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res), + [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams), + [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), + /* ignored events: */ + [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, +}; + +static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { + [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), + [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo), + [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), + [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), + [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event), + [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), + /* ignored events: */ + [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY, +}; + /* Summary of a kvaser error event, for a unified Leaf/Usbcan error * handling. Some discrepancies between the two families exist: * @@ -332,6 +421,12 @@ struct kvaser_usb_err_summary { }; }; +struct kvaser_usb_net_leaf_priv { + struct kvaser_usb_net_priv *net; + + struct delayed_work chip_state_req_work; +}; + static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { .name = "kvaser_usb_ucii", .tseg1_min = 4, @@ -396,6 +491,43 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; +static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + /* buffer size >= cmd->len ensured by caller */ + u8 min_size = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf)) + min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id]; + break; + case KVASER_USBCAN: + if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan)) + min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id]; + break; + } + + if (min_size == CMD_SIZE_ANY) + return 0; + + if (min_size) { + min_size += CMD_HEADER_LEN; + if (cmd->len >= min_size) + return 0; + + dev_err_ratelimited(&dev->intf->dev, + "Received command %u too short (size %u, needed %u)", + cmd->id, cmd->len, min_size); + return -EIO; + } + + dev_warn_ratelimited(&dev->intf->dev, + "Unhandled command (%d, size %d)\n", + cmd->id, cmd->len); + return -EINVAL; +} + static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *frame_len, @@ -503,6 +635,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, end: kfree(buf); + if (err == 0) + err = kvaser_usb_leaf_verify_size(dev, cmd); + return err; } @@ -535,6 +670,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, dev->fw_version = le32_to_cpu(softinfo->fw_version); dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); + if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP) + dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP; + if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { /* Firmware expects bittiming parameters calculated for 16MHz * clock, regardless of the actual clock @@ -622,6 +760,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) return 0; } +static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev, + u16 cap_cmd_req, u16 *status) +{ + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + struct kvaser_cmd *cmd; + u32 value = 0; + u32 mask = 0; + u16 cap_cmd_res; + int err; + int i; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_GET_CAPABILITIES_REQ; + cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req); + + err = kvaser_usb_send_cmd(dev, cmd, cmd->len); + if (err) + goto end; + + err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); + if (err) + goto end; + + *status = le16_to_cpu(cmd->u.leaf.cap_res.status); + + if (*status != KVASER_USB_LEAF_CAP_STAT_OK) + goto end; + + cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd); + switch (cap_cmd_res) { + case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: + case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: + value = le32_to_cpu(cmd->u.leaf.cap_res.value); + mask = le32_to_cpu(cmd->u.leaf.cap_res.mask); + break; + default: + dev_warn(&dev->intf->dev, "Unknown capability command %u\n", + cap_cmd_res); + break; + } + + for (i = 0; i < dev->nchannels; i++) { + if (BIT(i) & (value & mask)) { + switch (cap_cmd_res) { + case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: + card_data->ctrlmode_supported |= + CAN_CTRLMODE_LISTENONLY; + break; + case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: + card_data->capabilities |= + KVASER_USB_CAP_BERR_CAP; + break; + } + } + } + +end: + kfree(cmd); + + return err; +} + +static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev) +{ + int err; + u16 status; + + if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { + dev_info(&dev->intf->dev, + "No extended capability support. Upgrade device firmware.\n"); + return 0; + } + + err = kvaser_usb_leaf_get_single_capability(dev, + KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n", + status); + + err = kvaser_usb_leaf_get_single_capability(dev, + KVASER_USB_LEAF_CAP_CMD_ERR_REPORT, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n", + status); + + return 0; +} + +static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev) +{ + int err = 0; + + if (dev->driver_info->family == KVASER_LEAF) + err = kvaser_usb_leaf_get_capabilities_leaf(dev); + + return err; +} + static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -650,7 +898,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, context = &priv->tx_contexts[tid % dev->max_tx_urbs]; /* Sometimes the state change doesn't come after a bus-off event */ - if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) { + if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) { struct sk_buff *skb; struct can_frame *cf; @@ -706,6 +954,16 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, return err; } +static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work) +{ + struct kvaser_usb_net_leaf_priv *leaf = + container_of(work, struct kvaser_usb_net_leaf_priv, + chip_state_req_work.work); + struct kvaser_usb_net_priv *priv = leaf->net; + + kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE); +} + static void kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_err_summary *es, @@ -724,20 +982,16 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, new_state = CAN_STATE_BUS_OFF; } else if (es->status & M16C_STATE_BUS_PASSIVE) { new_state = CAN_STATE_ERROR_PASSIVE; - } else if (es->status & M16C_STATE_BUS_ERROR) { + } else if ((es->status & M16C_STATE_BUS_ERROR) && + cur_state >= CAN_STATE_BUS_OFF) { /* Guard against spurious error events after a busoff */ - if (cur_state < CAN_STATE_BUS_OFF) { - if (es->txerr >= 128 || es->rxerr >= 128) - new_state = CAN_STATE_ERROR_PASSIVE; - else if (es->txerr >= 96 || es->rxerr >= 96) - new_state = CAN_STATE_ERROR_WARNING; - else if (cur_state > CAN_STATE_ERROR_ACTIVE) - new_state = CAN_STATE_ERROR_ACTIVE; - } - } - - if (!es->status) + } else if (es->txerr >= 128 || es->rxerr >= 128) { + new_state = CAN_STATE_ERROR_PASSIVE; + } else if (es->txerr >= 96 || es->rxerr >= 96) { + new_state = CAN_STATE_ERROR_WARNING; + } else { new_state = CAN_STATE_ERROR_ACTIVE; + } if (new_state != cur_state) { tx_state = (es->txerr >= es->rxerr) ? new_state : 0; @@ -747,7 +1001,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, } if (priv->can.restart_ms && - cur_state >= CAN_STATE_BUS_OFF && + cur_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; @@ -781,6 +1035,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, struct sk_buff *skb; struct net_device_stats *stats; struct kvaser_usb_net_priv *priv; + struct kvaser_usb_net_leaf_priv *leaf; enum can_state old_state, new_state; if (es->channel >= dev->nchannels) { @@ -790,8 +1045,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } priv = dev->nets[es->channel]; + leaf = priv->sub_priv; stats = &priv->netdev->stats; + /* Ignore e.g. state change to bus-off reported just after stopping */ + if (!netif_running(priv->netdev)) + return; + /* Update all of the CAN interface's state and error counters before * trying any memory allocation that can actually fail with -ENOMEM. * @@ -806,6 +1066,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); new_state = priv->can.state; + /* If there are errors, request status updates periodically as we do + * not get automatic notifications of improved state. + */ + if (new_state < CAN_STATE_BUS_OFF && + (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE)) + schedule_delayed_work(&leaf->chip_state_req_work, + msecs_to_jiffies(500)); + skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; @@ -823,7 +1091,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && + old_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_RESTARTED; netif_carrier_on(priv->netdev); @@ -923,11 +1191,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, case CMD_CAN_ERROR_EVENT: es.channel = 0; - es.status = cmd->u.usbcan.error_event.status_ch0; - es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; - es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; + es.status = cmd->u.usbcan.can_error_event.status_ch0; + es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0; + es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0; es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch1; + cmd->u.usbcan.can_error_event.status_ch1; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); /* The USBCAN firmware supports up to 2 channels. @@ -935,13 +1203,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, */ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { es.channel = 1; - es.status = cmd->u.usbcan.error_event.status_ch1; + es.status = cmd->u.usbcan.can_error_event.status_ch1; es.txerr = - cmd->u.usbcan.error_event.tx_errors_count_ch1; + cmd->u.usbcan.can_error_event.tx_errors_count_ch1; es.rxerr = - cmd->u.usbcan.error_event.rx_errors_count_ch1; + cmd->u.usbcan.can_error_event.rx_errors_count_ch1; es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch0; + cmd->u.usbcan.can_error_event.status_ch0; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); } break; @@ -958,11 +1226,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, switch (cmd->id) { case CMD_CAN_ERROR_EVENT: - es.channel = cmd->u.leaf.error_event.channel; - es.status = cmd->u.leaf.error_event.status; - es.txerr = cmd->u.leaf.error_event.tx_errors_count; - es.rxerr = cmd->u.leaf.error_event.rx_errors_count; - es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; + es.channel = cmd->u.leaf.can_error_event.channel; + es.status = cmd->u.leaf.can_error_event.status; + es.txerr = cmd->u.leaf.can_error_event.tx_errors_count; + es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count; + es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor; break; case CMD_LEAF_LOG_MESSAGE: es.channel = cmd->u.leaf.log_message.channel; @@ -1094,6 +1362,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, netif_rx(skb); } +static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u16 info1 = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + info1 = le16_to_cpu(cmd->u.leaf.error_event.info1); + break; + case KVASER_USBCAN: + info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1); + break; + } + + /* info1 will contain the offending cmd_no */ + switch (info1) { + case CMD_SET_CTRL_MODE: + dev_warn(&dev->intf->dev, + "CMD_SET_CTRL_MODE error in parameter\n"); + break; + + case CMD_SET_BUS_PARAMS: + dev_warn(&dev->intf->dev, + "CMD_SET_BUS_PARAMS error in parameter\n"); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled parameter error event cmd_no (%u)\n", + info1); + break; + } +} + +static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u8 error_code = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + error_code = cmd->u.leaf.error_event.error_code; + break; + case KVASER_USBCAN: + error_code = cmd->u.usbcan.error_event.error_code; + break; + } + + switch (error_code) { + case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL: + /* Received additional CAN message, when firmware TX queue is + * already full. Something is wrong with the driver. + * This should never happen! + */ + dev_err(&dev->intf->dev, + "Received error event TX_QUEUE_FULL\n"); + break; + case KVASER_USB_LEAF_ERROR_EVENT_PARAM: + kvaser_usb_leaf_error_event_parameter(dev, cmd); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled error event (%d)\n", error_code); + break; + } +} + static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -1134,9 +1470,31 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, complete(&priv->stop_comp); } +static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + u8 channel = cmd->u.busparams.channel; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams, + sizeof(priv->busparams_nominal)); + + complete(&priv->get_busparams_comp); +} + static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { + if (kvaser_usb_leaf_verify_size(dev, cmd) < 0) + return; + switch (cmd->id) { case CMD_START_CHIP_REPLY: kvaser_usb_leaf_start_chip_reply(dev, cmd); @@ -1169,6 +1527,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, kvaser_usb_leaf_tx_acknowledge(dev, cmd); break; + case CMD_ERROR_EVENT: + kvaser_usb_leaf_error_event(dev, cmd); + break; + + case CMD_GET_BUS_PARAMS_REPLY: + kvaser_usb_leaf_get_busparams_reply(dev, cmd); + break; + /* Ignored commands */ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: if (dev->driver_info->family != KVASER_USBCAN) @@ -1249,7 +1615,7 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->start_comp); + reinit_completion(&priv->start_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP, priv->channel); @@ -1265,9 +1631,12 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) { + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; - init_completion(&priv->stop_comp); + reinit_completion(&priv->stop_comp); + + cancel_delayed_work(&leaf->chip_state_req_work); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); @@ -1315,10 +1684,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) return 0; } -static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) +static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_leaf_priv *leaf; + + leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL); + if (!leaf) + return -ENOMEM; + + leaf->net = priv; + INIT_DELAYED_WORK(&leaf->chip_state_req_work, + kvaser_usb_leaf_chip_state_req_work); + + priv->sub_priv = leaf; + + return 0; +} + +static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; + + if (leaf) + cancel_delayed_work_sync(&leaf->chip_state_req_work); +} + +static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; int rc; @@ -1331,15 +1725,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); cmd->u.busparams.channel = priv->channel; cmd->u.busparams.tid = 0xff; - cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); - cmd->u.busparams.sjw = bt->sjw; - cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; - cmd->u.busparams.tseg2 = bt->phase_seg2; - - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - cmd->u.busparams.no_samp = 3; - else - cmd->u.busparams.no_samp = 1; + memcpy(&cmd->u.busparams.busparams, busparams, + sizeof(cmd->u.busparams.busparams)); rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); @@ -1347,6 +1734,27 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) return rc; } +static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv) +{ + int err; + + if (priv->dev->driver_info->family == KVASER_USBCAN) + return -EOPNOTSUPP; + + reinit_completion(&priv->get_busparams_comp); + + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->get_busparams_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + static int kvaser_usb_leaf_set_mode(struct net_device *netdev, enum can_mode mode) { @@ -1355,9 +1763,13 @@ static int kvaser_usb_leaf_set_mode(struct net_device *netdev, switch (mode) { case CAN_MODE_START: + kvaser_usb_unlink_tx_urbs(priv); + err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP); if (err) return err; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; break; default: return -EOPNOTSUPP; @@ -1404,14 +1816,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_set_mode = kvaser_usb_leaf_set_mode, .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, + .dev_get_busparams = kvaser_usb_leaf_get_busparams, .dev_set_data_bittiming = NULL, + .dev_get_data_busparams = NULL, .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, .dev_init_card = kvaser_usb_leaf_init_card, + .dev_init_channel = kvaser_usb_leaf_init_channel, + .dev_remove_channel = kvaser_usb_leaf_remove_channel, .dev_get_software_info = kvaser_usb_leaf_get_software_info, .dev_get_software_details = NULL, .dev_get_card_info = kvaser_usb_leaf_get_card_info, - .dev_get_capabilities = NULL, + .dev_get_capabilities = kvaser_usb_leaf_get_capabilities, .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, .dev_start_chip = kvaser_usb_leaf_start_chip, .dev_stop_chip = kvaser_usb_leaf_stop_chip, diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 21063335ab599..c07e327929ba5 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -47,6 +47,10 @@ #define MCBA_VER_REQ_USB 1 #define MCBA_VER_REQ_CAN 2 +/* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */ +#define MCBA_VER_TERMINATION_ON 0 +#define MCBA_VER_TERMINATION_OFF 1 + #define MCBA_SIDL_EXID_MASK 0x8 #define MCBA_DLC_MASK 0xf #define MCBA_DLC_RTR_MASK 0x40 @@ -469,7 +473,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv, priv->usb_ka_first_pass = false; } - if (msg->termination_state) + if (msg->termination_state == MCBA_VER_TERMINATION_ON) priv->can.termination = MCBA_TERMINATION_ENABLED; else priv->can.termination = MCBA_TERMINATION_DISABLED; @@ -789,9 +793,9 @@ static int mcba_set_termination(struct net_device *netdev, u16 term) }; if (term == MCBA_TERMINATION_ENABLED) - usb_msg.termination = 1; + usb_msg.termination = MCBA_VER_TERMINATION_ON; else - usb_msg.termination = 0; + usb_msg.termination = MCBA_VER_TERMINATION_OFF; mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index e38906ae8f235..fbeb99ab9e4dd 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -376,6 +376,17 @@ static struct mdio_driver dsa_loop_drv = { #define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2) +static void dsa_loop_phydevs_unregister(void) +{ + unsigned int i; + + for (i = 0; i < NUM_FIXED_PHYS; i++) + if (!IS_ERR(phydevs[i])) { + fixed_phy_unregister(phydevs[i]); + phy_device_free(phydevs[i]); + } +} + static int __init dsa_loop_init(void) { struct fixed_phy_status status = { @@ -383,23 +394,23 @@ static int __init dsa_loop_init(void) .speed = SPEED_100, .duplex = DUPLEX_FULL, }; - unsigned int i; + unsigned int i, ret; for (i = 0; i < NUM_FIXED_PHYS; i++) phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL); - return mdio_driver_register(&dsa_loop_drv); + ret = mdio_driver_register(&dsa_loop_drv); + if (ret) + dsa_loop_phydevs_unregister(); + + return ret; } module_init(dsa_loop_init); static void __exit dsa_loop_exit(void) { - unsigned int i; - mdio_driver_unregister(&dsa_loop_drv); - for (i = 0; i < NUM_FIXED_PHYS; i++) - if (!IS_ERR(phydevs[i])) - fixed_phy_unregister(phydevs[i]); + dsa_loop_phydevs_unregister(); } module_exit(dsa_loop_exit); diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 2044d440d7de4..deeed50a42c05 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -958,7 +958,7 @@ static const struct lan9303_mib_desc lan9303_mib[] = { { .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", }, { .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", }, { .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", }, - { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", }, + { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "RxShort", }, { .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", }, { .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", }, { .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", }, @@ -1002,9 +1002,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, ret = lan9303_read_switch_port( chip, port, lan9303_mib[u].offset, ®); - if (ret) + if (ret) { dev_warn(chip->dev, "Reading status port %d reg %u failed\n", port, lan9303_mib[u].offset); + reg = 0; + } data[u] = reg; } } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index ece4c0512ee2d..f42f2f4e4b60e 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -678,10 +678,10 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port, ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); /* clear forwarding port */ - alu_table[2] &= ~BIT(port); + alu_table[1] &= ~BIT(port); /* if there is no port to forward, clear table */ - if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { + if ((alu_table[1] & ALU_V_PORT_MAP) == 0) { alu_table[0] = 0; alu_table[1] = 0; alu_table[2] = 0; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 265620a81f9f6..70155e996f7d7 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -502,14 +502,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv) static int mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) { - struct mt7530_priv *priv = ds->priv; + return 0; +} + +static void +mt7531_pll_setup(struct mt7530_priv *priv) +{ u32 top_sig; u32 hwstrap; u32 xtal; u32 val; if (mt7531_dual_sgmii_supported(priv)) - return 0; + return; val = mt7530_read(priv, MT7531_CREV); top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); @@ -588,8 +593,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) val |= EN_COREPLL; mt7530_write(priv, MT7531_PLLGP_EN, val); usleep_range(25, 35); - - return 0; } static void @@ -1731,6 +1734,8 @@ mt7531_setup(struct dsa_switch *ds) SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); + mt7531_pll_setup(priv); + if (mt7531_dual_sgmii_supported(priv)) { priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII; @@ -2281,8 +2286,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port) case 6: interface = PHY_INTERFACE_MODE_2500BASEX; - mt7531_pad_setup(ds, interface); - priv->p6_interface = interface; break; default: diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7b7a8a74405df..371b345635e62 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -666,44 +666,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, { struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_port *p; - int err; + int err = 0; p = &chip->ports[port]; - /* FIXME: is this the correct test? If we're in fixed mode on an - * internal port, why should we process this any different from - * PHY mode? On the other hand, the port may be automedia between - * an internal PHY and the serdes... - */ - if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port)) - return; - mv88e6xxx_reg_lock(chip); - /* In inband mode, the link may come up at any time while the link - * is not forced down. Force the link down while we reconfigure the - * interface mode. - */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) - chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); - - err = mv88e6xxx_port_config_interface(chip, port, state->interface); - if (err && err != -EOPNOTSUPP) - goto err_unlock; - err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface, - state->advertising); - /* FIXME: we should restart negotiation if something changed - which - * is something we get if we convert to using phylinks PCS operations. - */ - if (err > 0) - err = 0; + if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) { + /* In inband mode, the link may come up at any time while the + * link is not forced down. Force the link down while we + * reconfigure the interface mode. + */ + if (mode == MLO_AN_INBAND && + p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, + LINK_FORCED_DOWN); + + err = mv88e6xxx_port_config_interface(chip, port, + state->interface); + if (err && err != -EOPNOTSUPP) + goto err_unlock; + + err = mv88e6xxx_serdes_pcs_config(chip, port, mode, + state->interface, + state->advertising); + /* FIXME: we should restart negotiation if something changed - + * which is something we get if we convert to using phylinks + * PCS operations. + */ + if (err > 0) + err = 0; + } /* Undo the forced down state above after completing configuration - * irrespective of its state on entry, which allows the link to come up. + * irrespective of its state on entry, which allows the link to come + * up in the in-band case where there is no separate SERDES. Also + * ensure that the link can come up if the PPU is in use and we are + * in PHY mode (we treat the PPU as an effective in-band mechanism.) */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) + if (chip->info->ops->port_set_link && + ((mode == MLO_AN_INBAND && p->interface != state->interface) || + (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port)))) chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); p->interface = state->interface; diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c index ec2ac91abcfa4..8e3d185c84601 100644 --- a/drivers/net/dsa/sja1105/sja1105_devlink.c +++ b/drivers/net/dsa/sja1105/sja1105_devlink.c @@ -95,6 +95,8 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds) if (IS_ERR(region)) { while (--i >= 0) dsa_devlink_region_destroy(priv->regions[i]); + + kfree(priv->regions); return PTR_ERR(region); } diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index f4f50b3a472e1..0d56cb4f5dd9b 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth) if (dma_mapping_error(greth->dev, dma_addr)) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Could not create initial DMA mapping\n"); + dev_kfree_skb(skb); goto cleanup; } greth->rx_skbuff[i] = skb; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 52414ac2c901a..1722d4091ea3f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4488,13 +4488,19 @@ static struct pci_driver ena_pci_driver = { static int __init ena_init(void) { + int ret; + ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); if (!ena_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; } - return pci_register_driver(&ena_pci_driver); + ret = pci_register_driver(&ena_pci_driver); + if (ret) + destroy_workqueue(ena_wq); + + return ret; } static void __exit ena_cleanup(void) diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 961796abab352..5e8b72db88739 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -825,7 +825,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev) lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; dev->stats.tx_bytes += skb->len; - dev_kfree_skb( skb ); + dev_consume_skb_irq(skb); lp->cur_tx++; while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { lp->cur_tx -= TX_RING_SIZE; diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index aff44241988c4..9dae225b7fd5d 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -997,7 +997,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); lp->tx_ring[entry].base = ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; - dev_kfree_skb(skb); + dev_consume_skb_irq(skb); } else { lp->tx_skbuff[entry] = skb; lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index d5fd49dd25f33..decc1c09a031b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); } +static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) +{ + unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; + + /* From MAC ver 30H the TFCR is per priority, instead of per queue */ + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) + return max_q_count; + else + return min_t(unsigned int, pdata->tx_q_count, max_q_count); +} + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { - unsigned int max_q_count, q_count; unsigned int reg, reg_val; - unsigned int i; + unsigned int i, q_count; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); /* Clear MAC flow control */ - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + q_count = xgbe_get_fc_queue_count(pdata); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { struct ieee_pfc *pfc = pdata->pfc; struct ieee_ets *ets = pdata->ets; - unsigned int max_q_count, q_count; unsigned int reg, reg_val; - unsigned int i; + unsigned int i, q_count; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { @@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) } /* Set MAC flow control */ - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + q_count = xgbe_get_fc_queue_count(pdata); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index a816b30bca04c..a5d6faf7b89e1 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1064,6 +1064,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata) devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + tasklet_kill(&pdata->tasklet_dev); + tasklet_kill(&pdata->tasklet_ecc); + if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c index 22d4fc547a0a3..a9ccc4258ee50 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c @@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata) xgbe_i2c_disable(pdata); xgbe_i2c_clear_all_interrupts(pdata); - if (pdata->dev_irq != pdata->i2c_irq) + if (pdata->dev_irq != pdata->i2c_irq) { devm_free_irq(pdata->dev, pdata->i2c_irq, pdata); + tasklet_kill(&pdata->tasklet_i2c); + } } static int xgbe_i2c_start(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 4e97b48695220..43fdd111235a6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, reg |= XGBE_KR_TRAINING_ENABLE; reg |= XGBE_KR_TRAINING_START; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); + pdata->kr_start_time = jiffies; netif_dbg(pdata, link, pdata->netdev, "KR training initiated\n"); @@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) xgbe_switch_mode(pdata); + pdata->an_result = XGBE_AN_READY; + xgbe_an_restart(pdata); return XGBE_AN_INCOMPAT_LINK; @@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata) { unsigned long link_timeout; + unsigned long kr_time; + int wait; link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ); if (time_after(jiffies, link_timeout)) { + if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) && + pdata->phy.autoneg == AUTONEG_ENABLE) { + /* AN restart should not happen while KR training is in progress. + * The while loop ensures no AN restart during KR training, + * waits up to 500ms and AN restart is triggered only if KR + * training is failed. + */ + wait = XGBE_KR_TRAINING_WAIT_ITER; + while (wait--) { + kr_time = pdata->kr_start_time + + msecs_to_jiffies(XGBE_AN_MS_TIMEOUT); + if (time_after(jiffies, kr_time)) + break; + /* AN restart is not required, if AN result is COMPLETE */ + if (pdata->an_result == XGBE_AN_COMPLETE) + return; + usleep_range(10000, 11000); + } + } netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n"); xgbe_phy_config_aneg(pdata); } @@ -1390,8 +1414,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) /* Disable auto-negotiation */ xgbe_an_disable_all(pdata); - if (pdata->dev_irq != pdata->an_irq) + if (pdata->dev_irq != pdata->an_irq) { devm_free_irq(pdata->dev, pdata->an_irq, pdata); + tasklet_kill(&pdata->tasklet_an); + } pdata->phy_if.phy_impl.stop(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 213769054391c..97e32c0490f8a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -189,6 +189,7 @@ enum xgbe_sfp_cable { XGBE_SFP_CABLE_UNKNOWN = 0, XGBE_SFP_CABLE_ACTIVE, XGBE_SFP_CABLE_PASSIVE, + XGBE_SFP_CABLE_FIBER, }; enum xgbe_sfp_base { @@ -236,9 +237,7 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_BR 12 #define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a -#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 -#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 #define XGBE_SFP_BASE_CU_CABLE_LEN 18 @@ -284,6 +283,8 @@ struct xgbe_sfp_eeprom { #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " +#define XGBE_MOLEX_VENDOR "Molex Inc. " + struct xgbe_sfp_ascii { union { char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; @@ -823,25 +824,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, enum xgbe_sfp_speed sfp_speed) { - u8 *sfp_base, min, max; + u8 *sfp_base, min; sfp_base = sfp_eeprom->base; switch (sfp_speed) { case XGBE_SFP_SPEED_1000: min = XGBE_SFP_BASE_BR_1GBE_MIN; - max = XGBE_SFP_BASE_BR_1GBE_MAX; break; case XGBE_SFP_SPEED_10000: min = XGBE_SFP_BASE_BR_10GBE_MIN; - max = XGBE_SFP_BASE_BR_10GBE_MAX; break; default: return false; } - return ((sfp_base[XGBE_SFP_BASE_BR] >= min) && - (sfp_base[XGBE_SFP_BASE_BR] <= max)); + return sfp_base[XGBE_SFP_BASE_BR] >= min; } static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) @@ -1142,16 +1140,21 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); - /* Assume ACTIVE cable unless told it is PASSIVE */ + /* Assume FIBER cable unless told otherwise */ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN]; - } else { + } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; + } else { + phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER; } /* Determine the type of SFP */ - if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) + if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER && + xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) + phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; + else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) phy_data->sfp_base = XGBE_SFP_BASE_10000_SR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR) phy_data->sfp_base = XGBE_SFP_BASE_10000_LR; @@ -1167,9 +1170,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) phy_data->sfp_base = XGBE_SFP_BASE_1000_CX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T) phy_data->sfp_base = XGBE_SFP_BASE_1000_T; - else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) && - xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) - phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 3305979a9f7c1..e0b8f3c4cc0b2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -289,6 +289,7 @@ /* Auto-negotiation */ #define XGBE_AN_MS_TIMEOUT 500 #define XGBE_LINK_TIMEOUT 5 +#define XGBE_KR_TRAINING_WAIT_ITER 50 #define XGBE_SGMII_AN_LINK_STATUS BIT(1) #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) @@ -1253,6 +1254,7 @@ struct xgbe_prv_data { unsigned int parallel_detect; unsigned int fec_ability; unsigned long an_start; + unsigned long kr_start_time; enum xgbe_an_mode an_mode; /* I2C support */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 78c7cbc372b05..71151f675a498 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1004,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev) xgene_enet_napi_enable(pdata); ret = xgene_enet_register_irq(ndev); - if (ret) + if (ret) { + xgene_enet_napi_disable(pdata); return ret; + } if (ndev->phydev) { phy_start(ndev->phydev); diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 1e4e402f07d76..dd6c44f5f9256 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1511,7 +1511,7 @@ static void bmac_tx_timeout(struct timer_list *t) i = bp->tx_empty; ++dev->stats.tx_errors; if (i != bp->tx_fill) { - dev_kfree_skb(bp->tx_bufs[i]); + dev_kfree_skb_irq(bp->tx_bufs[i]); bp->tx_bufs[i] = NULL; if (++i >= N_TX_RING) i = 0; bp->tx_empty = i; diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 9e5006e592155..6f6530c291666 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -841,7 +841,7 @@ static void mace_tx_timeout(struct timer_list *t) if (mp->tx_bad_runt) { mp->tx_bad_runt = 0; } else if (i != mp->tx_fill) { - dev_kfree_skb(mp->tx_bufs[i]); + dev_kfree_skb_irq(mp->tx_bufs[i]); if (++i >= N_TX_RING) i = 0; mp->tx_empty = i; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index de2a9348bc3f8..1d512e6a89f5c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -13,6 +13,7 @@ #include "aq_ptp.h" #include "aq_filters.h" #include "aq_macsec.h" +#include "aq_main.h" #include @@ -841,7 +842,7 @@ static int aq_set_ringparam(struct net_device *ndev, if (netif_running(ndev)) { ndev_running = true; - dev_close(ndev); + aq_ndev_close(ndev); } cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); @@ -857,7 +858,7 @@ static int aq_set_ringparam(struct net_device *ndev, goto err_exit; if (ndev_running) - err = dev_open(ndev, NULL); + err = aq_ndev_open(ndev); err_exit: return err; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c index 4a6dfac857ca9..ee823a18294cd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c @@ -585,6 +585,7 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx, ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx); + memzero_explicit(&key_rec, sizeof(key_rec)); return ret; } @@ -932,6 +933,7 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx, ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx); + memzero_explicit(&sa_key_record, sizeof(sa_key_record)); return ret; } @@ -1451,26 +1453,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic) egress_sa_threshold_expired); } +#define AQ_LOCKED_MDO_DEF(mdo) \ +static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \ +{ \ + struct aq_nic_s *nic = netdev_priv(ctx->netdev); \ + int ret; \ + mutex_lock(&nic->macsec_mutex); \ + ret = aq_mdo_##mdo(ctx); \ + mutex_unlock(&nic->macsec_mutex); \ + return ret; \ +} + +AQ_LOCKED_MDO_DEF(dev_open) +AQ_LOCKED_MDO_DEF(dev_stop) +AQ_LOCKED_MDO_DEF(add_secy) +AQ_LOCKED_MDO_DEF(upd_secy) +AQ_LOCKED_MDO_DEF(del_secy) +AQ_LOCKED_MDO_DEF(add_rxsc) +AQ_LOCKED_MDO_DEF(upd_rxsc) +AQ_LOCKED_MDO_DEF(del_rxsc) +AQ_LOCKED_MDO_DEF(add_rxsa) +AQ_LOCKED_MDO_DEF(upd_rxsa) +AQ_LOCKED_MDO_DEF(del_rxsa) +AQ_LOCKED_MDO_DEF(add_txsa) +AQ_LOCKED_MDO_DEF(upd_txsa) +AQ_LOCKED_MDO_DEF(del_txsa) +AQ_LOCKED_MDO_DEF(get_dev_stats) +AQ_LOCKED_MDO_DEF(get_tx_sc_stats) +AQ_LOCKED_MDO_DEF(get_tx_sa_stats) +AQ_LOCKED_MDO_DEF(get_rx_sc_stats) +AQ_LOCKED_MDO_DEF(get_rx_sa_stats) + const struct macsec_ops aq_macsec_ops = { - .mdo_dev_open = aq_mdo_dev_open, - .mdo_dev_stop = aq_mdo_dev_stop, - .mdo_add_secy = aq_mdo_add_secy, - .mdo_upd_secy = aq_mdo_upd_secy, - .mdo_del_secy = aq_mdo_del_secy, - .mdo_add_rxsc = aq_mdo_add_rxsc, - .mdo_upd_rxsc = aq_mdo_upd_rxsc, - .mdo_del_rxsc = aq_mdo_del_rxsc, - .mdo_add_rxsa = aq_mdo_add_rxsa, - .mdo_upd_rxsa = aq_mdo_upd_rxsa, - .mdo_del_rxsa = aq_mdo_del_rxsa, - .mdo_add_txsa = aq_mdo_add_txsa, - .mdo_upd_txsa = aq_mdo_upd_txsa, - .mdo_del_txsa = aq_mdo_del_txsa, - .mdo_get_dev_stats = aq_mdo_get_dev_stats, - .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats, - .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats, - .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats, - .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats, + .mdo_dev_open = aq_locked_mdo_dev_open, + .mdo_dev_stop = aq_locked_mdo_dev_stop, + .mdo_add_secy = aq_locked_mdo_add_secy, + .mdo_upd_secy = aq_locked_mdo_upd_secy, + .mdo_del_secy = aq_locked_mdo_del_secy, + .mdo_add_rxsc = aq_locked_mdo_add_rxsc, + .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc, + .mdo_del_rxsc = aq_locked_mdo_del_rxsc, + .mdo_add_rxsa = aq_locked_mdo_add_rxsa, + .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa, + .mdo_del_rxsa = aq_locked_mdo_del_rxsa, + .mdo_add_txsa = aq_locked_mdo_add_txsa, + .mdo_upd_txsa = aq_locked_mdo_upd_txsa, + .mdo_del_txsa = aq_locked_mdo_del_txsa, + .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats, + .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats, + .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats, + .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats, + .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats, }; int aq_macsec_init(struct aq_nic_s *nic) @@ -1492,6 +1525,7 @@ int aq_macsec_init(struct aq_nic_s *nic) nic->ndev->features |= NETIF_F_HW_MACSEC; nic->ndev->macsec_ops = &aq_macsec_ops; + mutex_init(&nic->macsec_mutex); return 0; } @@ -1515,7 +1549,7 @@ int aq_macsec_enable(struct aq_nic_s *nic) if (!nic->macsec_cfg) return 0; - rtnl_lock(); + mutex_lock(&nic->macsec_mutex); if (nic->aq_fw_ops->send_macsec_req) { struct macsec_cfg_request cfg = { 0 }; @@ -1564,7 +1598,7 @@ int aq_macsec_enable(struct aq_nic_s *nic) ret = aq_apply_macsec_cfg(nic); unlock: - rtnl_unlock(); + mutex_unlock(&nic->macsec_mutex); return ret; } @@ -1576,9 +1610,9 @@ void aq_macsec_work(struct aq_nic_s *nic) if (!netif_carrier_ok(nic->ndev)) return; - rtnl_lock(); + mutex_lock(&nic->macsec_mutex); aq_check_txsa_expiration(nic); - rtnl_unlock(); + mutex_unlock(&nic->macsec_mutex); } int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic) @@ -1589,21 +1623,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic) if (!cfg) return 0; + mutex_lock(&nic->macsec_mutex); + for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { if (!test_bit(i, &cfg->rxsc_idx_busy)) continue; cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy); } + mutex_unlock(&nic->macsec_mutex); return cnt; } int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic) { + int cnt; + if (!nic->macsec_cfg) return 0; - return hweight_long(nic->macsec_cfg->txsc_idx_busy); + mutex_lock(&nic->macsec_mutex); + cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy); + mutex_unlock(&nic->macsec_mutex); + + return cnt; } int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic) @@ -1614,12 +1657,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic) if (!cfg) return 0; + mutex_lock(&nic->macsec_mutex); + for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { if (!test_bit(i, &cfg->txsc_idx_busy)) continue; cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy); } + mutex_unlock(&nic->macsec_mutex); return cnt; } @@ -1691,6 +1737,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data) if (!cfg) return data; + mutex_lock(&nic->macsec_mutex); + aq_macsec_update_stats(nic); common_stats = &cfg->stats; @@ -1773,5 +1821,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data) data += i; + mutex_unlock(&nic->macsec_mutex); + return data; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 4af0cd9530de6..1401fc4632b51 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -53,7 +53,7 @@ struct net_device *aq_ndev_alloc(void) return ndev; } -static int aq_ndev_open(struct net_device *ndev) +int aq_ndev_open(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); int err = 0; @@ -83,17 +83,14 @@ static int aq_ndev_open(struct net_device *ndev) return err; } -static int aq_ndev_close(struct net_device *ndev) +int aq_ndev_close(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); int err = 0; err = aq_nic_stop(aq_nic); - if (err < 0) - goto err_exit; aq_nic_deinit(aq_nic, true); -err_exit: return err; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h index a5a624b9ce733..2a562ab7a5afd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -14,5 +14,7 @@ void aq_ndev_schedule_work(struct work_struct *work); struct net_device *aq_ndev_alloc(void); +int aq_ndev_open(struct net_device *ndev); +int aq_ndev_close(struct net_device *ndev); #endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 926cca9a0c837..6da3efa289a3f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -152,6 +152,8 @@ struct aq_nic_s { struct mutex fwreq_mutex; #if IS_ENABLED(CONFIG_MACSEC) struct aq_macsec_cfg *macsec_cfg; + /* mutex to protect data in macsec_cfg */ + struct mutex macsec_mutex; #endif /* PTP support */ struct aq_ptp_s *aq_ptp; diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c index 36c7cf05630a1..4319249595207 100644 --- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c +++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c @@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw, u16 table_index) { u16 packed_record[18]; + int ret; if (table_index >= NUMROWS_INGRESSSAKEYRECORD) return -EINVAL; @@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw, packed_record[16] = rec->key_len & 0x3; - return set_raw_ingress_record(hw, packed_record, 18, 2, - ROWOFFSET_INGRESSSAKEYRECORD + - table_index); + ret = set_raw_ingress_record(hw, packed_record, 18, 2, + ROWOFFSET_INGRESSSAKEYRECORD + + table_index); + + memzero_explicit(packed_record, sizeof(packed_record)); + return ret; } int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw, @@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw, ret = set_raw_egress_record(hw, packed_record, 8, 2, ROWOFFSET_EGRESSSAKEYRECORD + table_index); if (unlikely(ret)) - return ret; + goto clear_key; ret = set_raw_egress_record(hw, packed_record + 8, 8, 2, ROWOFFSET_EGRESSSAKEYRECORD + table_index - 32); - if (unlikely(ret)) - return ret; - return 0; +clear_key: + memzero_explicit(packed_record, sizeof(packed_record)); + return ret; } int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw, diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index c26c9b0c00d8f..fe3ca3af431a4 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1468,7 +1468,7 @@ static int ag71xx_open(struct net_device *ndev) if (ret) { netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n", ret); - goto err; + return ret; } max_frame_len = ag71xx_max_frame_len(ndev->mtu); @@ -1489,6 +1489,7 @@ static int ag71xx_open(struct net_device *ndev) err: ag71xx_rings_cleanup(ag); + phylink_disconnect_phy(ag->phylink); return ret; } diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 26746197515fc..022aebb68f462 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -228,12 +228,12 @@ static int bgmac_probe(struct bcma_device *core) bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; - if (ci->pkg == BCMA_PKG_ID_BCM47188 || - ci->pkg == BCMA_PKG_ID_BCM47186) { + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; } - if (ci->pkg == BCMA_PKG_ID_BCM5358) + if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII; break; case BCMA_CHIP_ID_BCM53573: diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 6290d8bedc92e..9960127f612ea 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1568,7 +1568,6 @@ void bgmac_enet_remove(struct bgmac *bgmac) phy_disconnect(bgmac->net_dev->phydev); netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); - free_netdev(bgmac->net_dev); } EXPORT_SYMBOL_GPL(bgmac_enet_remove); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 198e041d84109..4f669e7c75587 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -788,6 +788,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", pad, len, fp->rx_buf_size); bnx2x_panic(); + bnx2x_frag_free(fp, new_data); return; } #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 08437eaacbb96..ac327839eed90 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -795,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) { - struct pci_dev *dev; struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + struct pci_dev *dev; + bool pending; if (!vf) return false; dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn); - if (dev) - return bnx2x_is_pcie_pending(dev); - return false; + if (!dev) + return false; + pending = bnx2x_is_pcie_pending(dev); + pci_dev_put(dev); + + return pending; } int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b818d5f342d53..c4a768ce8c99d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -8761,10 +8761,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); return rc; } - if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { + if (tcs && (bp->tx_nr_rings_per_tc * tcs != + bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { netdev_err(bp->dev, "tx ring reservation failure\n"); netdev_reset_tc(bp->dev); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + if (bp->tx_nr_rings_xdp) + bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; + else + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; return -ENOMEM; } return 0; @@ -12008,8 +12012,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, rcu_read_lock(); hlist_for_each_entry_rcu(fltr, head, hash) { if (bnxt_fltr_match(fltr, new_fltr)) { + rc = fltr->sw_id; rcu_read_unlock(); - rc = 0; goto err_free; } } @@ -13111,8 +13115,16 @@ static struct pci_driver bnxt_pci_driver = { static int __init bnxt_init(void) { + int err; + bnxt_debug_init(); - return pci_register_driver(&bnxt_pci_driver); + err = pci_register_driver(&bnxt_pci_driver); + if (err) { + bnxt_debug_exit(); + return err; + } + + return 0; } static void __exit bnxt_exit(void) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f8f7756195205..81b63d1c2391f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -125,7 +125,7 @@ static int bnxt_set_coalesce(struct net_device *dev, } reset_coalesce: - if (netif_running(dev)) { + if (test_bit(BNXT_STATE_OPEN, &bp->state)) { if (update_stats) { rc = bnxt_close_nic(bp, true, false); if (!rc) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 5143cdd0eecad..613ca6124e3ce 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11185,7 +11185,7 @@ static void tg3_reset_task(struct work_struct *work) rtnl_lock(); tg3_full_lock(tp, 0); - if (!netif_running(tp->dev)) { + if (tp->pcierr_recovery || !netif_running(tp->dev)) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); rtnl_unlock(); @@ -18146,16 +18146,20 @@ static void tg3_shutdown(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); + tg3_reset_task_cancel(tp); + rtnl_lock(); + netif_device_detach(dev); if (netif_running(dev)) dev_close(dev); - if (system_state == SYSTEM_POWER_OFF) - tg3_power_down(tp); + tg3_power_down(tp); rtnl_unlock(); + + pci_disable_device(pdev); } /** @@ -18175,6 +18179,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, netdev_info(netdev, "PCI I/O error detected\n"); + /* Want to make sure that the reset task doesn't run */ + tg3_reset_task_cancel(tp); + rtnl_lock(); /* Could be second call or maybe we don't have netdev yet */ @@ -18191,9 +18198,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, tg3_timer_stop(tp); - /* Want to make sure that the reset task doesn't run */ - tg3_reset_task_cancel(tp); - netif_device_detach(netdev); /* Clean up software state, even if MMIO is blocked */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 792c8147c2c4c..e0d62e2513879 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1963,7 +1963,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || skb_is_nonlinear(*skb); int padlen = ETH_ZLEN - (*skb)->len; - int headroom = skb_headroom(*skb); int tailroom = skb_tailroom(*skb); struct sk_buff *nskb; u32 fcs; @@ -1977,9 +1976,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) /* FCS could be appeded to tailroom. */ if (tailroom >= ETH_FCS_LEN) goto add_fcs; - /* FCS could be appeded by moving data to headroom. */ - else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) - padlen = 0; /* No room for FCS, need to reallocate skb. */ else padlen = ETH_FCS_LEN; @@ -1988,10 +1984,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) padlen += ETH_FCS_LEN; } - if (!cloned && headroom + tailroom >= padlen) { - (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); - skb_set_tail_pointer(*skb, (*skb)->len); - } else { + if (cloned || tailroom < padlen) { nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); if (!nskb) return -ENOMEM; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e0d18e9171080..eefb25bcf57ff 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1798,13 +1798,10 @@ static int liquidio_open(struct net_device *netdev) ifstate_set(lio, LIO_IFSTATE_RUNNING); - if (OCTEON_CN23XX_PF(oct)) { - if (!oct->msix_on) - if (setup_tx_poll_fn(netdev)) - return -1; - } else { - if (setup_tx_poll_fn(netdev)) - return -1; + if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { + ret = setup_tx_poll_fn(netdev); + if (ret) + goto err_poll; } netif_tx_start_all_queues(netdev); @@ -1817,7 +1814,7 @@ static int liquidio_open(struct net_device *netdev) /* tell Octeon to start forwarding packets to host */ ret = send_rx_ctrl_cmd(lio, 1); if (ret) - return ret; + goto err_rx_ctrl; /* start periodical statistics fetch */ INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); @@ -1828,6 +1825,27 @@ static int liquidio_open(struct net_device *netdev) dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); + return 0; + +err_rx_ctrl: + if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) + cleanup_tx_poll_fn(netdev); +err_poll: + if (lio->ptp_clock) { + ptp_clock_unregister(lio->ptp_clock); + lio->ptp_clock = NULL; + } + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } + return ret; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c00f1a7ffc15f..488da767cfdf3 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2258,7 +2258,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_unregister_interrupts; + goto err_destroy_workqueue; } nic->msg_enable = debug; @@ -2267,6 +2267,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_destroy_workqueue: + destroy_workqueue(nic->nicvf_rx_mode_wq); err_unregister_interrupts: nicvf_unregister_interrupts(nic); err_free_netdev: diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 8ff28ed04b7fc..f0e48b9373d6d 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1438,8 +1438,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, return AE_OK; } - if (strncmp(string.pointer, bgx_sel, 4)) + if (strncmp(string.pointer, bgx_sel, 4)) { + kfree(string.pointer); return AE_OK; + } acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, bgx_acpi_register_phy, NULL, bgx, NULL); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 84ad7261e243a..8a167eea288c3 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1302,6 +1302,7 @@ static int cxgb_up(struct adapter *adap) if (ret < 0) { CH_ERR(adap, "failed to bind qsets, err %d\n", ret); t3_intr_disable(adap); + quiesce_rx(adap); free_irq_resources(adap); err = ret; goto out; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index c5b0e725b2382..2169351b6afc3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -14,6 +14,7 @@ #include "cudbg_entity.h" #include "cudbg_lib.h" #include "cudbg_zlib.h" +#include "cxgb4_tc_mqprio.h" static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ @@ -3476,7 +3477,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < utxq->ntxq; i++) QDESC_GET_TXQ(&utxq->uldtxq[i].q, cudbg_uld_txq_to_qtype(j), - out_unlock); + out_unlock_uld); } } @@ -3493,7 +3494,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nrxq; i++) QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, cudbg_uld_rxq_to_qtype(j), - out_unlock); + out_unlock_uld); } /* ULD FLQ */ @@ -3505,7 +3506,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nrxq; i++) QDESC_GET_FLQ(&urxq->uldrxq[i].fl, cudbg_uld_flq_to_qtype(j), - out_unlock); + out_unlock_uld); } /* ULD CIQ */ @@ -3518,29 +3519,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nciq; i++) QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, cudbg_uld_ciq_to_qtype(j), - out_unlock); + out_unlock_uld); } } + mutex_unlock(&uld_mutex); + + if (!padap->tc_mqprio) + goto out; + mutex_lock(&padap->tc_mqprio->mqprio_mutex); /* ETHOFLD TXQ */ if (s->eohw_txq) for (i = 0; i < s->eoqsets; i++) QDESC_GET_TXQ(&s->eohw_txq[i].q, - CUDBG_QTYPE_ETHOFLD_TXQ, out); + CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio); /* ETHOFLD RXQ and FLQ */ if (s->eohw_rxq) { for (i = 0; i < s->eoqsets; i++) QDESC_GET_RXQ(&s->eohw_rxq[i].rspq, - CUDBG_QTYPE_ETHOFLD_RXQ, out); + CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio); for (i = 0; i < s->eoqsets; i++) QDESC_GET_FLQ(&s->eohw_rxq[i].fl, - CUDBG_QTYPE_ETHOFLD_FLQ, out); + CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio); } -out_unlock: - mutex_unlock(&uld_mutex); +out_unlock_mqprio: + mutex_unlock(&padap->tc_mqprio->mqprio_mutex); out: qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); @@ -3578,6 +3584,10 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, #undef QDESC_GET return rc; + +out_unlock_uld: + mutex_unlock(&uld_mutex); + goto out; } int cudbg_collect_flash(struct cudbg_init *pdbg_init, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2820a0bb971bf..5e1e46425014c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev) */ err = t4vf_update_port_info(pi); if (err < 0) - return err; + goto err_unwind; /* * Note that this interface is up and start everything up ... diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 48c6eb142dcc0..05a0cc583f8a4 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -550,11 +550,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); + spin_unlock_irqrestore(&bp->lock, flags); + /* free the buffer */ dev_kfree_skb(skb); - spin_unlock_irqrestore(&bp->lock, flags); - return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 15aa3b3c0089f..5f9603d4c0493 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -5,6 +5,7 @@ #include #include #include +#include /* ENETC overhead: optional extension BD + 1 BD gap */ #define ENETC_TXBDS_NEEDED(val) ((val) + 2) @@ -384,12 +385,7 @@ static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(tstamp); - /* Ensure skb_mstamp_ns, which might have been populated with - * the txtime, is not mistaken for a software timestamp, - * because this will prevent the dispatch of our hardware - * timestamp to the socket. - */ - skb->tstamp = ktime_set(0, 0); + skb_txtime_consumed(skb); skb_tstamp_tx(skb, &shhwtstamps); } } @@ -1212,7 +1208,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) /* enable Tx ints by setting pkt thr to 1 */ enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); - tbmr = ENETC_TBMR_EN; + tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio); if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) tbmr |= ENETC_TBMR_VIH; @@ -1241,7 +1237,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + /* Also prepare the consumer index in case page allocation never + * succeeds. In that case, hardware will never advance producer index + * to match consumer index, and will drop all frames. + */ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); + enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); /* enable Rx ints by setting pkt thr to 1 */ enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); @@ -1267,13 +1268,14 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); + enetc_setup_txbdr(hw, priv->tx_ring[i]); for (i = 0; i < priv->num_rx_rings; i++) - enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]); + enetc_setup_rxbdr(hw, priv->rx_ring[i]); } static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) @@ -1306,13 +1308,14 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); + enetc_clear_txbdr(hw, priv->tx_ring[i]); for (i = 0; i < priv->num_rx_rings; i++) - enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]); + enetc_clear_rxbdr(hw, priv->rx_ring[i]); udelay(1); } @@ -1320,13 +1323,13 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) static int enetc_setup_irqs(struct enetc_ndev_priv *priv) { struct pci_dev *pdev = priv->si->pdev; + struct enetc_hw *hw = &priv->si->hw; int i, j, err; for (i = 0; i < priv->bdr_int_num; i++) { int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); struct enetc_int_vector *v = priv->int_vector[i]; int entry = ENETC_BDR_INT_BASE_IDX + i; - struct enetc_hw *hw = &priv->si->hw; snprintf(v->name, sizeof(v->name), "%s-rxtx%d", priv->ndev->name, i); @@ -1414,13 +1417,14 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0); + enetc_txbdr_wr(hw, i, ENETC_TBIER, 0); for (i = 0; i < priv->num_rx_rings; i++) - enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0); + enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); } static int enetc_phylink_connect(struct net_device *ndev) @@ -1560,6 +1564,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_mqprio_qopt *mqprio = type_data; + struct enetc_hw *hw = &priv->si->hw; struct enetc_bdr *tx_ring; u8 num_tc; int i; @@ -1574,7 +1579,8 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) /* Reset all ring priorities to 0 */ for (i = 0; i < priv->num_tx_rings; i++) { tx_ring = priv->tx_ring[i]; - enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0); + tx_ring->prio = 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); } return 0; @@ -1593,7 +1599,8 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) */ for (i = 0; i < num_tc; i++) { tx_ring = priv->tx_ring[i]; - enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i); + tx_ring->prio = i; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); } /* Reset the number of netdev queues based on the TC count */ @@ -1671,52 +1678,29 @@ static int enetc_set_rss(struct net_device *ndev, int en) return 0; } -static int enetc_set_psfp(struct net_device *ndev, int en) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - int err; - - if (en) { - err = enetc_psfp_enable(priv); - if (err) - return err; - - priv->active_offloads |= ENETC_F_QCI; - return 0; - } - - err = enetc_psfp_disable(priv); - if (err) - return err; - - priv->active_offloads &= ~ENETC_F_QCI; - - return 0; -} - static void enetc_enable_rxvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_rx_rings; i++) - enetc_bdr_enable_rxvlan(&priv->si->hw, i, en); + enetc_bdr_enable_rxvlan(hw, i, en); } static void enetc_enable_txvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_bdr_enable_txvlan(&priv->si->hw, i, en); + enetc_bdr_enable_txvlan(hw, i, en); } -int enetc_set_features(struct net_device *ndev, - netdev_features_t features) +void enetc_set_features(struct net_device *ndev, netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; - int err = 0; if (changed & NETIF_F_RXHASH) enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); @@ -1728,11 +1712,6 @@ int enetc_set_features(struct net_device *ndev, if (changed & NETIF_F_HW_VLAN_CTAG_TX) enetc_enable_txvlan(ndev, !!(features & NETIF_F_HW_VLAN_CTAG_TX)); - - if (changed & NETIF_F_HW_TC) - err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); - - return err; } #ifdef CONFIG_FSL_ENETC_PTP_CLOCK diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 15d19cbd5a954..725c3d1cbb198 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -58,6 +58,7 @@ struct enetc_bdr { void __iomem *rcir; }; u16 index; + u16 prio; int bd_count; /* # of BDs */ int next_to_use; int next_to_clean; @@ -301,8 +302,7 @@ void enetc_start(struct net_device *ndev); void enetc_stop(struct net_device *ndev); netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); struct net_device_stats *enetc_get_stats(struct net_device *ndev); -int enetc_set_features(struct net_device *ndev, - netdev_features_t features); +void enetc_set_features(struct net_device *ndev, netdev_features_t features); int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data); @@ -335,22 +335,24 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data); int enetc_psfp_init(struct enetc_ndev_priv *priv); int enetc_psfp_clean(struct enetc_ndev_priv *priv); +int enetc_set_psfp(struct net_device *ndev, bool en); static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; u32 reg; - reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR); + reg = enetc_port_rd(hw, ENETC_PSIDCAPR); priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK; /* Port stream filter capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR); + reg = enetc_port_rd(hw, ENETC_PSFCAPR); priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK; /* Port stream gate capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR); + reg = enetc_port_rd(hw, ENETC_PSGCAPR); priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK); priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16; /* Port flow meter capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR); + reg = enetc_port_rd(hw, ENETC_PFMCAPR); priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK; } @@ -410,4 +412,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) { return 0; } + +static inline int enetc_set_psfp(struct net_device *ndev, bool en) +{ + return 0; +} #endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 716b396bf0947..515db7e6e6497 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -671,6 +671,13 @@ static int enetc_pf_set_features(struct net_device *ndev, { netdev_features_t changed = ndev->features ^ features; struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (changed & NETIF_F_HW_TC) { + err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); + if (err) + return err; + } if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { struct enetc_pf *pf = enetc_si_priv(priv->si); @@ -684,7 +691,9 @@ static int enetc_pf_set_features(struct net_device *ndev, if (changed & NETIF_F_LOOPBACK) enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); - return enetc_set_features(ndev, features); + enetc_set_features(ndev, features); + + return 0; } static const struct net_device_ops enetc_ndev_ops = { @@ -739,9 +748,6 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->priv_flags |= IFF_UNICAST_FLT; - if (si->hw_features & ENETC_SI_F_QBV) - priv->active_offloads |= ENETC_F_QBV; - if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) { priv->active_offloads |= ENETC_F_QCI; ndev->features |= NETIF_F_HW_TC; @@ -987,7 +993,8 @@ static void enetc_pl_mac_link_up(struct phylink_config *config, struct enetc_ndev_priv *priv; priv = netdev_priv(pf->si->ndev); - if (priv->active_offloads & ENETC_F_QBV) + + if (pf->si->hw_features & ENETC_SI_F_QBV) enetc_sched_speed_set(priv, speed); if (!phylink_autoneg_inband(mode) && diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 9e6988fd3787a..5841721c81190 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -17,8 +17,9 @@ static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) { + struct enetc_hw *hw = &priv->si->hw; u32 old_speed = priv->speed; - u32 pspeed; + u32 pspeed, tmp; if (speed == old_speed) return; @@ -39,16 +40,15 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) } priv->speed = speed; - enetc_port_wr(&priv->si->hw, ENETC_PMR, - (enetc_port_rd(&priv->si->hw, ENETC_PMR) - & (~ENETC_PMR_PSPEED_MASK)) - | pspeed); + tmp = enetc_port_rd(hw, ENETC_PMR); + enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed); } static int enetc_setup_taprio(struct net_device *ndev, struct tc_taprio_qopt_offload *admin_conf) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; struct enetc_cbd cbd = {.cmd = 0}; struct tgs_gcl_conf *gcl_config; struct tgs_gcl_data *gcl_data; @@ -60,15 +60,16 @@ static int enetc_setup_taprio(struct net_device *ndev, int err; int i; - if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) + if (admin_conf->num_entries > enetc_get_max_gcl_len(hw)) return -EINVAL; gcl_len = admin_conf->num_entries; - tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); + tge = enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET); if (!admin_conf->enable) { - enetc_wr(&priv->si->hw, - ENETC_QBV_PTGCR_OFFSET, - tge & (~ENETC_QBV_TGE)); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE); + + priv->active_offloads &= ~ENETC_F_QBV; + return 0; } @@ -123,18 +124,18 @@ static int enetc_setup_taprio(struct net_device *ndev, cbd.cls = BDCR_CMD_PORT_GCL; cbd.status_flags = 0; - enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, - tge | ENETC_QBV_TGE); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge | ENETC_QBV_TGE); err = enetc_send_cmd(priv->si, &cbd); if (err) - enetc_wr(&priv->si->hw, - ENETC_QBV_PTGCR_OFFSET, - tge & (~ENETC_QBV_TGE)); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE); dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); kfree(gcl_data); + if (!err) + priv->active_offloads |= ENETC_F_QBV; + return err; } @@ -142,6 +143,8 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) { struct tc_taprio_qopt_offload *taprio = type_data; struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_bdr *tx_ring; int err; int i; @@ -150,18 +153,20 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) if (priv->tx_ring[i]->tsd_enable) return -EBUSY; - for (i = 0; i < priv->num_tx_rings; i++) - enetc_set_bdr_prio(&priv->si->hw, - priv->tx_ring[i]->index, - taprio->enable ? i : 0); + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = taprio->enable ? i : 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } err = enetc_setup_taprio(ndev, taprio); - - if (err) - for (i = 0; i < priv->num_tx_rings; i++) - enetc_set_bdr_prio(&priv->si->hw, - priv->tx_ring[i]->index, - taprio->enable ? 0 : i); + if (err) { + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = taprio->enable ? 0 : i; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + } return err; } @@ -182,7 +187,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) struct tc_cbs_qopt_offload *cbs = type_data; u32 port_transmit_rate = priv->speed; u8 tc_nums = netdev_get_num_tc(ndev); - struct enetc_si *si = priv->si; + struct enetc_hw *hw = &priv->si->hw; u32 hi_credit_bit, hi_credit_reg; u32 max_interference_size; u32 port_frame_max_size; @@ -203,15 +208,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) * lower than this TC have been disabled. */ if (tc == prio_top && - enetc_get_cbs_enable(&si->hw, prio_next)) { + enetc_get_cbs_enable(hw, prio_next)) { dev_err(&ndev->dev, "Disable TC%d before disable TC%d\n", prio_next, tc); return -EINVAL; } - enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0); - enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0); + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0); + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0); return 0; } @@ -228,13 +233,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) * higher than this TC have been enabled. */ if (tc == prio_next) { - if (!enetc_get_cbs_enable(&si->hw, prio_top)) { + if (!enetc_get_cbs_enable(hw, prio_top)) { dev_err(&ndev->dev, "Enable TC%d first before enable TC%d\n", prio_top, prio_next); return -EINVAL; } - bw_sum += enetc_get_cbs_bw(&si->hw, prio_top); + bw_sum += enetc_get_cbs_bw(hw, prio_top); } if (bw_sum + bw >= 100) { @@ -243,7 +248,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) return -EINVAL; } - enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc)); + enetc_port_rd(hw, ENETC_PTCMSDUR(tc)); /* For top prio TC, the max_interfrence_size is maxSizedFrame. * @@ -263,8 +268,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) u32 m0, ma, r0, ra; m0 = port_frame_max_size * 8; - ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8; - ra = enetc_get_cbs_bw(&si->hw, prio_top) * + ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8; + ra = enetc_get_cbs_bw(hw, prio_top) * port_transmit_rate * 10000ULL; r0 = port_transmit_rate * 1000000ULL; max_interference_size = m0 + ma + @@ -284,10 +289,10 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, port_transmit_rate * 1000000ULL); - enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg); + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg); /* Set bw register and enable this traffic class */ - enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); return 0; } @@ -297,6 +302,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_etf_qopt_offload *qopt = type_data; u8 tc_nums = netdev_get_num_tc(ndev); + struct enetc_hw *hw = &priv->si->hw; int tc; if (!tc_nums) @@ -312,12 +318,11 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) return -EBUSY; /* TSD and Qbv are mutually exclusive in hardware */ - if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) + if (enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) return -EBUSY; priv->tx_ring[tc]->tsd_enable = qopt->enable; - enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc), - qopt->enable ? ENETC_TSDE : 0); + enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0); return 0; } @@ -1525,6 +1530,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } +int enetc_set_psfp(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (en) { + err = enetc_psfp_enable(priv); + if (err) + return err; + + priv->active_offloads |= ENETC_F_QCI; + return 0; + } + + err = enetc_psfp_disable(priv); + if (err) + return err; + + priv->active_offloads &= ~ENETC_F_QCI; + + return 0; +} + int enetc_psfp_init(struct enetc_ndev_priv *priv) { if (epsfp.psfp_sfi_bitmap) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 33c125735db7e..5ce3e2593bdde 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -88,7 +88,9 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) static int enetc_vf_set_features(struct net_device *ndev, netdev_features_t features) { - return enetc_set_features(ndev, features); + enetc_set_features(ndev, features); + + return 0; } /* Probing/ Init */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d8bdaf2e5365c..686bb873125cc 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -623,7 +623,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } bdp->cbd_datlen = cpu_to_fec16(size); @@ -685,7 +685,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } } @@ -2251,6 +2251,31 @@ static u32 fec_enet_register_offset[] = { IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, IEEE_R_FDXFC, IEEE_R_OCTETS_OK }; +/* for i.MX6ul */ +static u32 fec_enet_register_offset_6ul[] = { + FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, + FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, + FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, + FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, + FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, + FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, + FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, + RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, + RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, + RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, + RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, + RMON_T_P_GTE2048, RMON_T_OCTETS, + IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, + IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, + IEEE_T_FDXFC, IEEE_T_OCTETS_OK, + RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, + RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, + RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, + RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, + RMON_R_P_GTE2048, RMON_R_OCTETS, + IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, + IEEE_R_FDXFC, IEEE_R_OCTETS_OK +}; #else static __u32 fec_enet_register_version = 1; static u32 fec_enet_register_offset[] = { @@ -2275,7 +2300,24 @@ static void fec_enet_get_regs(struct net_device *ndev, u32 *buf = (u32 *)regbuf; u32 i, off; int ret; +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) + u32 *reg_list; + u32 reg_cnt; + if (!of_machine_is_compatible("fsl,imx6ul")) { + reg_list = fec_enet_register_offset; + reg_cnt = ARRAY_SIZE(fec_enet_register_offset); + } else { + reg_list = fec_enet_register_offset_6ul; + reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); + } +#else + /* coldfire */ + static u32 *reg_list = fec_enet_register_offset; + static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); +#endif ret = pm_runtime_resume_and_get(dev); if (ret < 0) return; @@ -2284,8 +2326,8 @@ static void fec_enet_get_regs(struct net_device *ndev, memset(buf, 0, regs->len); - for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { - off = fec_enet_register_offset[i]; + for (i = 0; i < reg_cnt; i++) { + off = reg_list[i]; if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && !(fep->quirks & FEC_QUIRK_HAS_FRREG)) diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 6eeccc11b76ef..3312dc4083a08 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -884,12 +884,21 @@ static int mac_probe(struct platform_device *_of_dev) return err; } +static int mac_remove(struct platform_device *pdev) +{ + struct mac_device *mac_dev = platform_get_drvdata(pdev); + + platform_device_unregister(mac_dev->priv->eth_dev); + return 0; +} + static struct platform_driver mac_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = mac_match, }, .probe = mac_probe, + .remove = mac_remove, }; builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 99fe2c210d0f6..61f4b6e50d29b 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -98,7 +98,7 @@ static int do_pd_setup(struct fs_enet_private *fep) return -EINVAL; fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0); - if (!fep->fcc.fccp) + if (!fep->fec.fecp) return -EINVAL; return 0; diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 57c3bc4f70895..c16dfd8693639 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&priv->napi, skb); dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += len; next: pos = (pos + 1) % rxq->num; if (rx_pkts_num >= limit) diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 8b2bf85039f16..43f3146caf07e 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&priv->napi, skb); dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += len; next: pos = dma_ring_incr(pos, RX_DESC_NUM); } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 00fafc0f85121..430eccea8e5e9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) hdev->cls_dev.release = hnae_release; (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); ret = device_register(&hdev->cls_dev); - if (ret) + if (ret) { + put_device(&hdev->cls_dev); return ret; + } __module_get(THIS_MODULE); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index d6580e942724d..f7f3e4bbc4770 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -3089,7 +3089,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev) struct pci_dev *pdev = hdev->pdev; int ret = 0; - if (hdev->reset_type == HNAE3_VF_FULL_RESET && + if ((hdev->reset_type == HNAE3_VF_FULL_RESET || + hdev->reset_type == HNAE3_FLR_RESET) && test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { hclgevf_misc_irq_uninit(hdev); hclgevf_uninit_msi(hdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c index 19eb839177ec2..061952c6c21a4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c @@ -85,6 +85,7 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx) struct tag_sml_funcfg_tbl *funcfg_table_elem; struct hinic_cmd_lt_rd *read_data; u16 out_size = sizeof(*read_data); + int ret = ~0; int err; read_data = kzalloc(sizeof(*read_data), GFP_KERNEL); @@ -111,20 +112,25 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx) switch (idx) { case VALID: - return funcfg_table_elem->dw0.bs.valid; + ret = funcfg_table_elem->dw0.bs.valid; + break; case RX_MODE: - return funcfg_table_elem->dw0.bs.nic_rx_mode; + ret = funcfg_table_elem->dw0.bs.nic_rx_mode; + break; case MTU: - return funcfg_table_elem->dw1.bs.mtu; + ret = funcfg_table_elem->dw1.bs.mtu; + break; case RQ_DEPTH: - return funcfg_table_elem->dw13.bs.cfg_rq_depth; + ret = funcfg_table_elem->dw13.bs.cfg_rq_depth; + break; case QUEUE_NUM: - return funcfg_table_elem->dw13.bs.cfg_q_num; + ret = funcfg_table_elem->dw13.bs.cfg_q_num; + break; } kfree(read_data); - return ~0; + return ret; } static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 21b8235952d33..dff979f5d08b5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -929,7 +929,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, err_set_cmdq_depth: hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); - + free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]); err_cmdq_ctxt: hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 799b85c88eff8..bcf2476512a5a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -892,7 +892,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, if (err) return -EINVAL; - interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt; + interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt; interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt; err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 4f1d585485d7a..6ec042d48cd1f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -1502,8 +1502,15 @@ static struct pci_driver hinic_driver = { static int __init hinic_module_init(void) { + int ret; + hinic_dbg_register_debugfs(HINIC_DRV_NAME); - return pci_register_driver(&hinic_driver); + + ret = pci_register_driver(&hinic_driver); + if (ret) + hinic_dbg_unregister_debugfs(); + + return ret; } static void __exit hinic_module_exit(void) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index f8a26459ff653..4d82ebfe27f93 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -1178,7 +1178,6 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev) dev_err(&hwdev->hwif->pdev->dev, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", err, register_info.status, out_size); - hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); return -EIO; } } else { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index f630667364253..28a5f8d73a614 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2897,6 +2897,7 @@ static struct device *ehea_register_port(struct ehea_port *port, ret = of_device_register(&port->ofdev); if (ret) { pr_err("failed to register device. ret=%d\n", ret); + put_device(&port->ofdev.dev); goto out; } diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 9295a9a1efc73..001850d578e8f 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1739,14 +1739,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb, dma_addr_t dma_addr; cb->command = nic->tx_command; - dma_addr = pci_map_single(nic->pdev, - skb->data, skb->len, PCI_DMA_TODEVICE); + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); /* If we can't map the skb, have the upper layer try later */ - if (pci_dma_mapping_error(nic->pdev, dma_addr)) { - dev_kfree_skb_any(skb); - skb = NULL; + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) return -ENOMEM; - } /* * Use the last 4 bytes of the SKB payload packet as the CRC, used for @@ -1828,10 +1825,10 @@ static int e100_tx_clean(struct nic *nic) dev->stats.tx_packets++; dev->stats.tx_bytes += cb->skb->len; - pci_unmap_single(nic->pdev, - le32_to_cpu(cb->u.tcb.tbd.buf_addr), - le16_to_cpu(cb->u.tcb.tbd.size), - PCI_DMA_TODEVICE); + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); dev_kfree_skb_any(cb->skb); cb->skb = NULL; tx_cleaned = 1; @@ -1855,10 +1852,10 @@ static void e100_clean_cbs(struct nic *nic) while (nic->cbs_avail != nic->params.cbs.count) { struct cb *cb = nic->cb_to_clean; if (cb->skb) { - pci_unmap_single(nic->pdev, - le32_to_cpu(cb->u.tcb.tbd.buf_addr), - le16_to_cpu(cb->u.tcb.tbd.size), - PCI_DMA_TODEVICE); + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); dev_kfree_skb(cb->skb); } nic->cb_to_clean = nic->cb_to_clean->next; @@ -1925,10 +1922,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) /* Init, and map the RFD. */ skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); - rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { dev_kfree_skb_any(rx->skb); rx->skb = NULL; rx->dma_addr = 0; @@ -1941,8 +1938,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) if (rx->prev->skb) { struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; put_unaligned_le32(rx->dma_addr, &prev_rfd->link); - pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); } return 0; @@ -1961,8 +1960,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, return -EAGAIN; /* Need to sync before taking a peek at cb_complete bit */ - pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); rfd_status = le16_to_cpu(rfd->status); netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, @@ -1981,9 +1980,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, if (ioread8(&nic->csr->scb.status) & rus_no_res) nic->ru_running = RU_SUSPENDED; - pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, - sizeof(struct rfd), - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); return -ENODATA; } @@ -1995,8 +1994,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, actual_size = RFD_BUF_LEN - sizeof(struct rfd); /* Get data */ - pci_unmap_single(nic->pdev, rx->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); /* If this buffer has the el bit, but we think the receiver * is still running, check to see if it really stopped while @@ -2097,22 +2096,25 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, (struct rfd *)new_before_last_rx->skb->data; new_before_last_rfd->size = 0; new_before_last_rfd->command |= cpu_to_le16(cb_el); - pci_dma_sync_single_for_device(nic->pdev, - new_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); /* Now that we have a new stopping point, we can clear the old * stopping point. We must sync twice to get the proper * ordering on the hardware side of things. */ old_before_last_rfd->command &= ~cpu_to_le16(cb_el); - pci_dma_sync_single_for_device(nic->pdev, - old_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); - pci_dma_sync_single_for_device(nic->pdev, - old_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); } if (restart_required) { @@ -2134,8 +2136,9 @@ static void e100_rx_clean_list(struct nic *nic) if (nic->rxs) { for (rx = nic->rxs, i = 0; i < count; rx++, i++) { if (rx->skb) { - pci_unmap_single(nic->pdev, rx->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); dev_kfree_skb(rx->skb); } } @@ -2177,8 +2180,8 @@ static int e100_rx_alloc_list(struct nic *nic) before_last = (struct rfd *)rx->skb->data; before_last->command |= cpu_to_le16(cb_el); before_last->size = 0; - pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); nic->rx_to_use = nic->rx_to_clean = nic->rxs; nic->ru_running = RU_SUSPENDED; @@ -2377,8 +2380,8 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) msleep(10); - pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), skb->data, ETH_DATA_LEN)) @@ -2759,16 +2762,16 @@ static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) static int e100_alloc(struct nic *nic) { - nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), - &nic->dma_addr); + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); return nic->mem ? 0 : -ENOMEM; } static void e100_free(struct nic *nic) { if (nic->mem) { - pci_free_consistent(nic->pdev, sizeof(struct mem), - nic->mem, nic->dma_addr); + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); nic->mem = NULL; } } @@ -2861,7 +2864,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_pdev; } - if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); goto err_out_free_res; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d0c4de0231120..ae0c9aaab48db 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5937,9 +5937,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(tx_ring, - (MAX_SKB_FRAGS * + ((MAX_SKB_FRAGS + 1) * DIV_ROUND_UP(PAGE_SIZE, - adapter->tx_fifo_limit) + 2)); + adapter->tx_fifo_limit) + 4)); if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 99b8252eb969e..a388a0fcbeed3 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -32,6 +32,8 @@ struct workqueue_struct *fm10k_workqueue; **/ static int __init fm10k_init_module(void) { + int ret; + pr_info("%s\n", fm10k_driver_string); pr_info("%s\n", fm10k_copyright); @@ -43,7 +45,13 @@ static int __init fm10k_init_module(void) fm10k_dbg_init(); - return fm10k_register_pci_driver(); + ret = fm10k_register_pci_driver(); + if (ret) { + fm10k_dbg_exit(); + destroy_workqueue(fm10k_workqueue); + } + + return ret; } module_init(fm10k_init_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 32f3facbed1a5..b3cb5d1033260 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -178,6 +178,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) "Cannot locate client instance close routine\n"); return; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n"); + return; + } cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); i40e_client_release_qvlist(&cdev->lan_info); @@ -374,7 +378,6 @@ void i40e_client_subtask(struct i40e_pf *pf) /* Remove failed client instance */ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); - i40e_client_del_instance(pf); return; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 63054061966e6..520929f4d535f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2081,9 +2081,6 @@ static int i40e_set_ringparam(struct net_device *netdev, */ rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS; err = i40e_setup_rx_descriptors(&rx_rings[i]); - if (err) - goto rx_unwind; - err = i40e_alloc_rx_bi(&rx_rings[i]); if (err) goto rx_unwind; @@ -3086,10 +3083,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) if (cmd->flow_type == TCP_V4_FLOW || cmd->flow_type == UDP_V4_FLOW) { - if (i_set & I40E_L3_SRC_MASK) - cmd->data |= RXH_IP_SRC; - if (i_set & I40E_L3_DST_MASK) - cmd->data |= RXH_IP_DST; + if (hw->mac.type == I40E_MAC_X722) { + if (i_set & I40E_X722_L3_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_X722_L3_DST_MASK) + cmd->data |= RXH_IP_DST; + } else { + if (i_set & I40E_L3_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_L3_DST_MASK) + cmd->data |= RXH_IP_DST; + } } else if (cmd->flow_type == TCP_V6_FLOW || cmd->flow_type == UDP_V6_FLOW) { if (i_set & I40E_L3_V6_SRC_MASK) @@ -3396,12 +3400,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, /** * i40e_get_rss_hash_bits - Read RSS Hash bits from register + * @hw: hw structure * @nfc: pointer to user request * @i_setc: bits currently set * * Returns value of bits to be set per user request **/ -static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) +static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw, + struct ethtool_rxnfc *nfc, + u64 i_setc) { u64 i_set = i_setc; u64 src_l3 = 0, dst_l3 = 0; @@ -3420,8 +3427,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) dst_l3 = I40E_L3_V6_DST_MASK; } else if (nfc->flow_type == TCP_V4_FLOW || nfc->flow_type == UDP_V4_FLOW) { - src_l3 = I40E_L3_SRC_MASK; - dst_l3 = I40E_L3_DST_MASK; + if (hw->mac.type == I40E_MAC_X722) { + src_l3 = I40E_X722_L3_SRC_MASK; + dst_l3 = I40E_X722_L3_DST_MASK; + } else { + src_l3 = I40E_L3_SRC_MASK; + dst_l3 = I40E_L3_DST_MASK; + } } else { /* Any other flow type are not supported here */ return i_set; @@ -3439,6 +3451,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) return i_set; } +#define FLOW_PCTYPES_SIZE 64 /** * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash * @pf: pointer to the physical function struct @@ -3451,9 +3464,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) struct i40e_hw *hw = &pf->hw; u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); - u8 flow_pctype = 0; + DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE); u64 i_set, i_setc; + bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE); + if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "Change of RSS hash input set is not supported when MFP mode is enabled\n"); @@ -3469,36 +3484,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) switch (nfc->flow_type) { case TCP_V4_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, + flow_pctypes); break; case TCP_V6_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, + flow_pctypes); break; case UDP_V4_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, + flow_pctypes); + set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, + flow_pctypes); + } hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case UDP_V6_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, + flow_pctypes); + set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, + flow_pctypes); + } hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; case AH_ESP_V4_FLOW: @@ -3531,17 +3545,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return -EINVAL; } - if (flow_pctype) { - i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, - flow_pctype)) | - ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, - flow_pctype)) << 32); - i_set = i40e_get_rss_hash_bits(nfc, i_setc); - i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype), - (u32)i_set); - i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype), - (u32)(i_set >> 32)); - hena |= BIT_ULL(flow_pctype); + if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) { + u8 flow_id; + + for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) { + i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) | + ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32); + i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc); + + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id), + (u32)i_set); + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id), + (u32)(i_set >> 32)); + hena |= BIT_ULL(flow_id); + } } i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); @@ -4217,11 +4234,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, return -EOPNOTSUPP; /* First 4 bytes of L4 header */ - if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF)) - new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; - else if (!usr_ip4_spec->l4_4_bytes) - new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); - else + if (usr_ip4_spec->l4_4_bytes) return -EOPNOTSUPP; /* Filtering on Type of Service is not supported. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 97009cbea7793..9e8a20a94862f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2788,7 +2788,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) struct i40e_pf *pf = vsi->back; if (i40e_enabled_xdp_vsi(vsi)) { - int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int frame_size = new_mtu + I40E_PACKET_HDR_PAD; if (frame_size > i40e_max_xdp_frame_size(vsi)) return -EINVAL; @@ -3409,12 +3409,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) if (ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - kfree(ring->rx_bi); ring->xsk_pool = i40e_xsk_pool(ring); if (ring->xsk_pool) { - ret = i40e_alloc_rx_bi_zc(ring); - if (ret) - return ret; ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); /* For AF_XDP ZC, we disallow packets to span on @@ -3432,9 +3428,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->queue_index); } else { - ret = i40e_alloc_rx_bi(ring); - if (ret) - return ret; ring->rx_buf_len = vsi->rx_buf_len; if (ring->vsi->type == I40E_VSI_MAIN) { ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -5733,6 +5726,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi) } } +/** + * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits + * @vsi: Pointer to vsi structure + * @max_tx_rate: max TX rate in bytes to be converted into Mbits + * + * Helper function to convert units before send to set BW limit + **/ +static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) +{ + if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { + dev_warn(&vsi->back->pdev->dev, + "Setting max tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = I40E_BW_CREDIT_DIVISOR; + } else { + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); + } + + return max_tx_rate; +} + /** * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate * @vsi: VSI to be configured @@ -5755,10 +5768,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) max_tx_rate, seid); return -EINVAL; } - if (max_tx_rate && max_tx_rate < 50) { + if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { dev_warn(&pf->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); - max_tx_rate = 50; + max_tx_rate = I40E_BW_CREDIT_DIVISOR; } /* Tx rate credits are in values of 50Mbps, 0 is disabled */ @@ -7719,9 +7732,9 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { u64 credits = max_tx_rate; @@ -10052,6 +10065,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) return 0; } +/** + * i40e_clean_xps_state - clean xps state for every tx_ring + * @vsi: ptr to the VSI + **/ +static void i40e_clean_xps_state(struct i40e_vsi *vsi) +{ + int i; + + if (vsi->tx_rings) + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->tx_rings[i]) + clear_bit(__I40E_TX_XPS_INIT_DONE, + vsi->tx_rings[i]->state); +} + /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure @@ -10083,8 +10111,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) rtnl_unlock(); for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) + if (pf->vsi[v]) { + i40e_clean_xps_state(pf->vsi[v]); pf->vsi[v]->seid = 0; + } } i40e_shutdown_adminq(&pf->hw); @@ -10366,10 +10396,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); u64 credits = 0; - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto end_unlock; @@ -12490,6 +12520,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; @@ -12664,6 +12696,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, i40e_reset_and_rebuild(pf, true, true); } + if (!i40e_enabled_xdp_vsi(vsi) && prog) { + if (i40e_realloc_rx_bi_zc(vsi, true)) + return -ENOMEM; + } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { + if (i40e_realloc_rx_bi_zc(vsi, false)) + return -ENOMEM; + } + for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); @@ -12896,6 +12936,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) i40e_queue_pair_disable_irq(vsi, queue_pair); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); @@ -15950,6 +15991,8 @@ static struct pci_driver i40e_driver = { **/ static int __init i40e_init_module(void) { + int err; + pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); @@ -15967,7 +16010,14 @@ static int __init i40e_init_module(void) } i40e_dbg_init(); - return pci_register_driver(&i40e_driver); + err = pci_register_driver(&i40e_driver); + if (err) { + destroy_workqueue(i40e_wq); + i40e_dbg_exit(); + return err; + } + + return 0; } module_init(i40e_init_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5ad28129fab2a..43be33d87e391 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1305,14 +1305,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) return -ENOMEM; } -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; - - rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi ? 0 : -ENOMEM; -} - static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); @@ -1443,6 +1435,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; + rx_ring->rx_bi = + kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); + if (!rx_ring->rx_bi) + return -ENOMEM; + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 93ac201f68b8e..af843e8169f7d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -465,7 +465,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 446672a7e39fb..0872448c0e804 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1404,6 +1404,10 @@ struct i40e_lldp_variables { #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 /* INPUT SET MASK for RSS, flow director, and flexible payload */ +#define I40E_X722_L3_SRC_SHIFT 49 +#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT) +#define I40E_X722_L3_DST_SHIFT 41 +#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT) #define I40E_L3_SRC_SHIFT 47 #define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) #define I40E_L3_V6_SRC_SHIFT 43 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1947c5a775505..bb2a79b70c3ae 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1483,10 +1483,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) return true; - /* If the VFs have been disabled, this means something else is - * resetting the VF, so we shouldn't continue. - */ - if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) + /* Bail out if VFs are disabled. */ + if (test_bit(__I40E_VF_DISABLE, pf->state)) + return true; + + /* If VF is being reset already we don't need to continue. */ + if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) return true; i40e_trigger_vf_reset(vf, flr); @@ -1523,7 +1525,8 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) i40e_cleanup_reset_vf(vf); i40e_flush(hw); - clear_bit(__I40E_VF_DISABLE, pf->state); + usleep_range(20000, 40000); + clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states); return true; } @@ -1556,8 +1559,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) return false; /* Begin reset on all VFs at once */ - for (v = 0; v < pf->num_alloc_vfs; v++) - i40e_trigger_vf_reset(&pf->vf[v], flr); + for (v = 0; v < pf->num_alloc_vfs; v++) { + vf = &pf->vf[v]; + /* If VF is being reset no need to trigger reset again */ + if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + i40e_trigger_vf_reset(&pf->vf[v], flr); + } /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -1573,9 +1580,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) */ while (v < pf->num_alloc_vfs) { vf = &pf->vf[v]; - reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) - break; + if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { + reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); + if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) + break; + } /* If the current VF has finished resetting, move on * to the next VF in sequence. @@ -1603,6 +1612,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) if (pf->vf[v].lan_vsi_idx == 0) continue; + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); } @@ -1614,6 +1627,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) if (pf->vf[v].lan_vsi_idx == 0) continue; + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); } @@ -1623,10 +1640,16 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) mdelay(50); /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) + for (v = 0; v < pf->num_alloc_vfs; v++) { + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_cleanup_reset_vf(&pf->vf[v]); + } i40e_flush(hw); + usleep_range(20000, 40000); clear_bit(__I40E_VF_DISABLE, pf->state); return true; @@ -1985,6 +2008,25 @@ static void i40e_del_qch(struct i40e_vf *vf) } } +/** + * i40e_vc_get_max_frame_size + * @vf: pointer to the VF + * + * Max frame size is determined based on the current port's max frame size and + * whether a port VLAN is configured on this VF. The VF is not aware whether + * it's in a port VLAN so the PF needs to account for this in max frame size + * checks and sending the max frame size to the VF. + **/ +static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) +{ + u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; + + if (vf->port_vlan_id) + max_frame_size -= VLAN_HLEN; + + return max_frame_size; +} + /** * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info @@ -2085,6 +2127,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; + vfres->max_mtu = i40e_vc_get_max_frame_size(vf); if (vf->lan_vsi_idx) { vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index a554d0a0b09bd..358bbdb587951 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -39,6 +39,7 @@ enum i40e_vf_states { I40E_VF_STATE_MC_PROMISC, I40E_VF_STATE_UC_PROMISC, I40E_VF_STATE_PRE_ENABLE, + I40E_VF_STATE_RESETTING }; /* VF capabilities */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 75e4a698c3db2..7f12261236293 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -9,14 +9,6 @@ #include "i40e_txrx_common.h" #include "i40e_xsk.h" -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; - - rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi_zc ? 0 : -ENOMEM; -} - void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi_zc, 0, @@ -28,6 +20,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) return &rx_ring->rx_bi_zc[idx]; } +/** + * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer + * @rx_ring: Current rx ring + * @pool_present: is pool for XSK present + * + * Try allocating memory and return ENOMEM, if failed to allocate. + * If allocation was successful, substitute buffer with allocated one. + * Returns 0 on success, negative on failure + */ +static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) +{ + size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : + sizeof(*rx_ring->rx_bi); + void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); + + if (!sw_ring) + return -ENOMEM; + + if (pool_present) { + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + rx_ring->rx_bi_zc = sw_ring; + } else { + kfree(rx_ring->rx_bi_zc); + rx_ring->rx_bi_zc = NULL; + rx_ring->rx_bi = sw_ring; + } + return 0; +} + +/** + * i40e_realloc_rx_bi_zc - reallocate rx SW rings + * @vsi: Current VSI + * @zc: is zero copy set + * + * Reallocate buffer for rx_rings that might be used by XSK. + * XDP requires more memory, than rx_buf provides. + * Returns 0 on success, negative on failure + */ +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc) +{ + struct i40e_ring *rx_ring; + unsigned long q; + + for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { + rx_ring = vsi->rx_rings[q]; + if (i40e_realloc_rx_xdp_bi(rx_ring, zc)) + return -ENOMEM; + } + return 0; +} + /** * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a * certain ring/qid @@ -68,6 +112,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, if (err) return err; + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); + if (err) + return err; + err = i40e_queue_pair_enable(vsi, qid); if (err) return err; @@ -112,6 +160,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); if (if_running) { + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); + if (err) + return err; err = i40e_queue_pair_enable(vsi, qid); if (err) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 7adfd8539247c..36f5b6d206010 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -17,7 +17,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring); int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring); +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc); void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring); #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index a9cea7ccdd865..ae96b552a3bb3 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1318,7 +1318,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter) static int iavf_init_rss(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; - int ret; if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ @@ -1334,9 +1333,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter) iavf_fill_rss_lut(adapter); netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); - ret = iavf_config_rss(adapter); - return ret; + return iavf_config_rss(adapter); } /** @@ -4040,7 +4038,11 @@ static int __init iavf_init_module(void) pr_err("%s: Failed to create workqueue\n", iavf_driver_name); return -ENOMEM; } + ret = pci_register_driver(&iavf_driver); + if (ret) + destroy_workqueue(iavf_wq); + return ret; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 99983f7a0ce0b..d481a922f0184 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail; + /* underlying hardware might not allow access and/or always return + * 0 for the head/tail registers so just use the cached values + */ head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use; if (head != tail) return (head < tail) ? @@ -1368,7 +1371,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, #endif struct sk_buff *skb; - if (!rx_buffer) + if (!rx_buffer || !size) return NULL; /* prefetch first cache line of first page */ va = page_address(rx_buffer->page) + rx_buffer->page_offset; @@ -1526,7 +1529,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; - if (rx_buffer) + if (rx_buffer && size) rx_buffer->pagecnt_bias++; break; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index ff479bf721443..5deee75bc4360 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -241,11 +241,14 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; - struct virtchnl_queue_pair_info *vqpi; + int i, max_frame = adapter->vf_res->max_mtu; int pairs = adapter->num_active_queues; - int i, max_frame = IAVF_MAX_RXBUFFER; + struct virtchnl_queue_pair_info *vqpi; size_t len; + if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) + max_frame = IAVF_MAX_RXBUFFER; + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 810f2bdb91645..c1465096239b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3404,7 +3404,7 @@ static int ice_init_pf(struct ice_pf *pf) pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); if (!pf->avail_rxqs) { - devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); + bitmap_free(pf->avail_txqs); pf->avail_txqs = NULL; return -ENOMEM; } @@ -4853,7 +4853,7 @@ static int __init ice_module_init(void) pr_info("%s\n", ice_driver_string); pr_info("%s\n", ice_copyright); - ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 28baf203459a8..5e3b0a5843a8e 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) *data = 1; return -1; } + wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8); + wr32(E1000_EIMS, BIT(0)); } else if (adapter->flags & IGB_FLAG_HAS_MSI) { shared_int = false; if (request_irq(irq, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 327196d15a6ae..0ea8e4024d638 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1204,8 +1204,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, if (!q_vector) { q_vector = kzalloc(size, GFP_KERNEL); } else if (size > ksize(q_vector)) { - kfree_rcu(q_vector, rcu); - q_vector = kzalloc(size, GFP_KERNEL); + struct igb_q_vector *new_q_vector; + + new_q_vector = kzalloc(size, GFP_KERNEL); + if (new_q_vector) + kfree_rcu(q_vector, rcu); + q_vector = new_q_vector; } else { memset(q_vector, 0, size); } @@ -5879,7 +5883,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, */ if (tx_ring->launchtime_enable) { ts = ktime_to_timespec64(first->skb->tstamp); - first->skb->tstamp = ktime_set(0, 0); + skb_txtime_consumed(first->skb); context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->seqnum_seed = 0; @@ -7416,7 +7420,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - u32 reg, msgbuf[3]; + u32 reg, msgbuf[3] = {}; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index a97bf7a5f1d67..970dd878d8a76 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -87,6 +87,8 @@ struct igc_ring { u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ u32 start_time; u32 end_time; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 32f5fd6841398..352b50d3881d0 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -278,6 +278,8 @@ #define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + /* Transmit Control */ #define IGC_TCTL_EN 0x00000002 /* enable Tx */ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index e7ffe63925fd2..1a0aae7b128d8 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -898,25 +898,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev) return netdev_mc_count(netdev); } -static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) { + struct igc_adapter *adapter = netdev_priv(ring->netdev); ktime_t cycle_time = adapter->cycle_time; ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; u32 launchtime; + s64 n; - /* FIXME: when using ETF together with taprio, we may have a - * case where 'delta' is larger than the cycle_time, this may - * cause problems if we don't read the current value of - * IGC_BASET, as the value writen into the launchtime - * descriptor field may be misinterpreted. + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(txtime, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. */ - div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; return cpu_to_le32(launchtime); } +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, - struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) { @@ -935,35 +1028,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) mss_l4len_idx |= tx_ring->reg_idx << 4; + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - - /* We assume there is always a valid Tx time available. Invalid times - * should have been handled by the upper layers. - */ - if (tx_ring->launchtime_enable) { - struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); - ktime_t txtime = first->skb->tstamp; - - first->skb->tstamp = ktime_set(0, 0); - context_desc->launch_time = igc_tx_launchtime(adapter, - txtime); - } else { - context_desc->launch_time = 0; - } + context_desc->launch_time = launch_time; } -static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb) -{ - unsigned int offset = 0; - - ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); - - return offset == skb_checksum_start_offset(skb); -} - -static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; @@ -985,10 +1060,7 @@ static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) break; case offsetof(struct sctphdr, checksum): /* validate that this is actually an SCTP request */ - if ((first->protocol == htons(ETH_P_IP) && - (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || - (first->protocol == htons(ETH_P_IPV6) && - igc_ipv6_csum_is_sctp(skb))) { + if (skb_csum_is_sctp(skb)) { type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; break; } @@ -1006,7 +1078,8 @@ static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); } static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) @@ -1230,6 +1303,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, static int igc_tso(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, u8 *hdr_len) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; @@ -1316,8 +1390,8 @@ static int igc_tso(struct igc_ring *tx_ring, vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); return 1; } @@ -1325,11 +1399,14 @@ static int igc_tso(struct igc_ring *tx_ring, static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { + bool first_flag = false, insert_empty = false; u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = vlan_get_protocol(skb); struct igc_tx_buffer *first; + __le32 launch_time = 0; u32 tx_flags = 0; unsigned short f; + ktime_t txtime; u8 hdr_len = 0; int tso = 0; @@ -1343,11 +1420,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size( &skb_shinfo(skb)->frags[f])); - if (igc_maybe_stop_tx(tx_ring, count + 3)) { + if (igc_maybe_stop_tx(tx_ring, count + 5)) { /* this is a hard error */ return NETDEV_TX_BUSY; } + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->skb = skb; @@ -1378,11 +1484,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = protocol; - tso = igc_tso(tx_ring, first, &hdr_len); + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); if (tso < 0) goto out_drop; else if (!tso) - igc_tx_csum(tx_ring, first); + igc_tx_csum(tx_ring, first, launch_time, first_flag); igc_tx_map(tx_ring, first, hdr_len); @@ -4756,9 +4862,10 @@ static bool validate_schedule(struct igc_adapter *adapter, return false; for (n = 0; n < qopt->num_entries; n++) { - const struct tc_taprio_sched_entry *e; + const struct tc_taprio_sched_entry *e, *prev; int i; + prev = n ? &qopt->entries[n - 1] : NULL; e = &qopt->entries[n]; /* i225 only supports "global" frame preemption @@ -4771,7 +4878,12 @@ static bool validate_schedule(struct igc_adapter *adapter, if (e->gate_mask & BIT(i)) queue_uses[i]++; - if (queue_uses[i] > 1) + /* There are limitations: A single queue cannot be + * opened and closed multiple times per cycle unless the + * gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) return false; } } @@ -4798,14 +4910,19 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; u32 start_time = 0, end_time = 0; size_t n; + int i; if (!qopt->enable) { adapter->base_time = 0; return 0; } + if (qopt->base_time < 0) + return -ERANGE; + if (adapter->base_time) return -EALREADY; @@ -4815,28 +4932,58 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, adapter->cycle_time = qopt->cycle_time; adapter->base_time = qopt->base_time; - /* FIXME: be a little smarter about cases when the gate for a - * queue stays open for more than one entry. - */ for (n = 0; n < qopt->num_entries; n++) { struct tc_taprio_sched_entry *e = &qopt->entries[n]; - int i; end_time += e->interval; + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; if (!(e->gate_mask & BIT(i))) continue; - ring->start_time = start_time; + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; ring->end_time = end_time; + + queue_configured[i] = true; } start_time += e->interval; } + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + if (!queue_configured[i]) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 4ab46eee3d938..ef53f7665b58c 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -134,10 +134,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, * * We need to convert the system time value stored in the RX/TXSTMP registers * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. **/ -static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, - struct skb_shared_hwtstamps *hwtstamps, - u64 systim) +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) { switch (adapter->hw.mac.type) { case igc_i225: @@ -147,8 +149,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, systim & 0xFFFFFFFF); break; default: - break; + return -EINVAL; } + return 0; } /** @@ -372,7 +375,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) regval = rd32(IGC_TXSTMPL); regval |= (u64)rd32(IGC_TXSTMPH) << 32; - igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; switch (adapter->link_speed) { case SPEED_10: diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 174103c4bea6d..2d4db2a547b26 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -92,15 +92,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_STQT(i), ring->start_time); wr32(IGC_ENDQT(i), ring->end_time); - if (adapter->base_time) { - /* If we have a base_time we are in "taprio" - * mode and we need to be strict about the - * cycles: only transmit a packet if it can be - * completed during that cycle. - */ - txqctl |= IGC_TXQCTL_STRICT_CYCLE | - IGC_TXQCTL_STRICT_END; - } + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; if (ring->launchtime_enable) txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 27c6f911737bf..18251edbfabfb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -67,6 +67,8 @@ #define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + /* Attempt to maximize the headroom available for incoming frames. We * use a 2K buffer for receives and need 1536/1534 to store the data for * the frame. This leaves us with 512 bytes of room. From that we need diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b5b8be4672aa4..5c542f5d2b20d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6728,6 +6728,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) ixgbe_free_rx_resources(adapter->rx_ring[i]); } +/** + * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * @adapter: device handle, pointer to adapter + */ +static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter) +{ + if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + return IXGBE_RXBUFFER_2K; + else + return IXGBE_RXBUFFER_3K; +} + /** * ixgbe_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure @@ -6739,18 +6751,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (adapter->xdp_prog) { - int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN; - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; + if (ixgbe_enabled_xdp_adapter(adapter)) { + int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD; - if (new_frame_size > ixgbe_rx_bufsz(ring)) { - e_warn(probe, "Requested MTU size is not supported with XDP\n"); - return -EINVAL; - } + if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) { + e_warn(probe, "Requested MTU size is not supported with XDP\n"); + return -EINVAL; } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index fc389eecdd2b8..b0413904b798c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -851,9 +851,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn) rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn); if (rp_pdev && rp_pdev->subordinate) { bus = rp_pdev->subordinate->number; + pci_dev_put(rp_pdev); return pci_get_domain_bus_and_slot(0, bus, 0); } + pci_dev_put(rp_pdev); return NULL; } @@ -870,6 +872,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) struct ixgbe_adapter *adapter = hw->back; struct pci_dev *pdev = adapter->pdev; struct pci_dev *func0_pdev; + bool has_mii = false; /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0 @@ -880,15 +883,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0)); if (func0_pdev) { if (func0_pdev == pdev) - return true; - else - return false; + has_mii = true; + goto out; } func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0)); if (func0_pdev == pdev) - return true; + has_mii = true; - return false; +out: + pci_dev_put(func0_pdev); + return has_mii; } /** diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 2d6ac61d7a3e6..4510a84514fa4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4878,6 +4878,8 @@ static struct pci_driver ixgbevf_driver = { **/ static int __init ixgbevf_init_module(void) { + int err; + pr_info("%s\n", ixgbevf_driver_string); pr_info("%s\n", ixgbevf_copyright); ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); @@ -4886,7 +4888,13 @@ static int __init ixgbevf_init_module(void) return -ENOMEM; } - return pci_register_driver(&ixgbevf_driver); + err = pci_register_driver(&ixgbevf_driver); + if (err) { + destroy_workqueue(ixgbevf_wq); + return err; + } + + return 0; } module_init(ixgbevf_init_module); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 2d0c52f7106bc..5ea626b1e5783 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -466,7 +466,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { - dev_kfree_skb_any(skb); netdev_err(dev, "tx ring full\n"); netif_tx_stop_queue(txq); return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 90e6111ce534d..735b76effc492 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2471,6 +2471,7 @@ static int mv643xx_eth_open(struct net_device *dev) for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); out: + napi_disable(&mp->napi); free_irq(dev->irq, dev); return err; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 74e266c0b8e10..f5567d485e91a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4140,7 +4140,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) /* Use the cpu associated to the rxq when it is online, in all * the other cases, use the cpu 0 which can't be offline. */ - if (cpu_online(pp->rxq_def)) + if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) elected_cpu = pp->rxq_def; max_cpu = num_present_cpus(); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index d825eb021b22e..e999ac2de34e8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1434,6 +1434,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset); void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); +void mvpp2_dbgfs_exit(void); #ifdef CONFIG_MVPP2_PTP int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index 4a3baa7e01424..75e83ea2a926e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, return 0; } +static struct dentry *mvpp2_root; + +void mvpp2_dbgfs_exit(void) +{ + debugfs_remove(mvpp2_root); +} + void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) { debugfs_remove_recursive(priv->dbgfs_dir); @@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) { - struct dentry *mvpp2_dir, *mvpp2_root; + struct dentry *mvpp2_dir; int ret, i; - mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); if (!mvpp2_root) mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 542cd6f2c9bd4..68c5ed8716c84 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7155,7 +7155,18 @@ static struct platform_driver mvpp2_driver = { }, }; -module_platform_driver(mvpp2_driver); +static int __init mvpp2_driver_init(void) +{ + return platform_driver_register(&mvpp2_driver); +} +module_init(mvpp2_driver_init); + +static void __exit mvpp2_driver_exit(void) +{ + platform_driver_unregister(&mvpp2_driver); + mvpp2_dbgfs_exit(); +} +module_exit(mvpp2_driver_exit); MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Marcin Wojtas "); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index fc27a40202c6d..c0a0a31272cc2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -145,6 +145,16 @@ int cgx_get_cgxid(void *cgxd) return cgx->cgx_id; } +u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + u64 cfg; + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG); + + return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; +} + /* Ensure the required lock for event queue(where asynchronous events are * posted) is acquired before calling this API. Else an asynchronous event(with * latest link status) can reach the destination before this function returns @@ -340,9 +350,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); if (enable) - cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN; + cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN; else - cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN); + cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN); cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); return 0; } @@ -814,8 +824,7 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx) minor_ver = FIELD_GET(RESP_MINOR_VER, resp); dev_dbg(dev, "Firmware command interface version = %d.%d\n", major_ver, minor_ver); - if (major_ver != CGX_FIRMWARE_MAJOR_VER || - minor_ver != CGX_FIRMWARE_MINOR_VER) + if (major_ver != CGX_FIRMWARE_MAJOR_VER) return -EIO; else return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 27ca3291682bc..e176a6c654ef2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -27,7 +27,10 @@ /* Registers */ #define CGXX_CMRX_CFG 0x00 -#define CMR_EN BIT_ULL(55) +#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59) +#define CMR_P2X_SEL_SHIFT 59ULL +#define CMR_P2X_SEL_NIX0 1ULL +#define CMR_P2X_SEL_NIX1 2ULL #define DATA_PKT_TX_EN BIT_ULL(53) #define DATA_PKT_RX_EN BIT_ULL(54) #define CGX_LMAC_TYPE_SHIFT 40 @@ -142,5 +145,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id, int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause); void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable); +u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index c26652436c53a..acbc67074f59c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -316,31 +316,36 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, block->fn_map[lf] = attach ? pcifunc : 0; - switch (block->type) { - case BLKTYPE_NPA: + switch (block->addr) { + case BLKADDR_NPA: pfvf->npalf = attach ? true : false; num_lfs = pfvf->npalf; break; - case BLKTYPE_NIX: + case BLKADDR_NIX0: + case BLKADDR_NIX1: pfvf->nixlf = attach ? true : false; num_lfs = pfvf->nixlf; break; - case BLKTYPE_SSO: + case BLKADDR_SSO: attach ? pfvf->sso++ : pfvf->sso--; num_lfs = pfvf->sso; break; - case BLKTYPE_SSOW: + case BLKADDR_SSOW: attach ? pfvf->ssow++ : pfvf->ssow--; num_lfs = pfvf->ssow; break; - case BLKTYPE_TIM: + case BLKADDR_TIM: attach ? pfvf->timlfs++ : pfvf->timlfs--; num_lfs = pfvf->timlfs; break; - case BLKTYPE_CPT: + case BLKADDR_CPT0: attach ? pfvf->cptlfs++ : pfvf->cptlfs--; num_lfs = pfvf->cptlfs; break; + case BLKADDR_CPT1: + attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--; + num_lfs = pfvf->cpt1_lfs; + break; } reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; @@ -1035,7 +1040,30 @@ int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, /* Get current count of a RVU block's LF/slots * provisioned to a given RVU func. */ -static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) +u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr) +{ + switch (blkaddr) { + case BLKADDR_NPA: + return pfvf->npalf ? 1 : 0; + case BLKADDR_NIX0: + case BLKADDR_NIX1: + return pfvf->nixlf ? 1 : 0; + case BLKADDR_SSO: + return pfvf->sso; + case BLKADDR_SSOW: + return pfvf->ssow; + case BLKADDR_TIM: + return pfvf->timlfs; + case BLKADDR_CPT0: + return pfvf->cptlfs; + case BLKADDR_CPT1: + return pfvf->cpt1_lfs; + } + return 0; +} + +/* Return true if LFs of block type are attached to pcifunc */ +static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype) { switch (blktype) { case BLKTYPE_NPA: @@ -1043,15 +1071,16 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) case BLKTYPE_NIX: return pfvf->nixlf ? 1 : 0; case BLKTYPE_SSO: - return pfvf->sso; + return !!pfvf->sso; case BLKTYPE_SSOW: - return pfvf->ssow; + return !!pfvf->ssow; case BLKTYPE_TIM: - return pfvf->timlfs; + return !!pfvf->timlfs; case BLKTYPE_CPT: - return pfvf->cptlfs; + return pfvf->cptlfs || pfvf->cpt1_lfs; } - return 0; + + return false; } bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) @@ -1064,7 +1093,7 @@ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) pfvf = rvu_get_pfvf(rvu, pcifunc); /* Check if this PFFUNC has a LF of type blktype attached */ - if (!rvu_get_rsrc_mapcount(pfvf, blktype)) + if (!is_blktype_attached(pfvf, blktype)) return false; return true; @@ -1105,7 +1134,7 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) block = &hw->block[blkaddr]; - num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type); + num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); if (!num_lfs) return; @@ -1179,6 +1208,58 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu, return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); } +static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr = BLKADDR_NIX0, vf; + struct rvu_pfvf *pf; + + /* All CGX mapped PFs are set with assigned NIX block during init */ + if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + blkaddr = pf->nix_blkaddr; + } else if (is_afvf(pcifunc)) { + vf = pcifunc - 1; + /* Assign NIX based on VF number. All even numbered VFs get + * NIX0 and odd numbered gets NIX1 + */ + blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0; + /* NIX1 is not present on all silicons */ + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + blkaddr = BLKADDR_NIX0; + } + + switch (blkaddr) { + case BLKADDR_NIX1: + pfvf->nix_blkaddr = BLKADDR_NIX1; + break; + case BLKADDR_NIX0: + default: + pfvf->nix_blkaddr = BLKADDR_NIX0; + break; + } + + return pfvf->nix_blkaddr; +} + +static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) +{ + int blkaddr; + + switch (blktype) { + case BLKTYPE_NIX: + blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc); + break; + default: + return rvu_get_blkaddr(rvu, blktype, 0); + }; + + if (is_block_implemented(rvu->hw, blkaddr)) + return blkaddr; + + return -ENODEV; +} + static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype, int num_lfs) { @@ -1192,7 +1273,7 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, if (!num_lfs) return; - blkaddr = rvu_get_blkaddr(rvu, blktype, 0); + blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc); if (blkaddr < 0) return; @@ -1221,12 +1302,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, struct rsrc_attach *req, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int free_lfs, mappedlfs, blkaddr; struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; - int free_lfs, mappedlfs; /* Only one NPA LF can be attached */ - if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) { + if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) { block = &hw->block[BLKADDR_NPA]; free_lfs = rvu_rsrc_free_count(&block->lf); if (!free_lfs) @@ -1239,8 +1320,11 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, } /* Only one NIX LF can be attached */ - if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) { - block = &hw->block[BLKADDR_NIX0]; + if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) { + blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return blkaddr; + block = &hw->block[blkaddr]; free_lfs = rvu_rsrc_free_count(&block->lf); if (!free_lfs) goto fail; @@ -1260,7 +1344,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, pcifunc, req->sso, block->lf.max); return -EINVAL; } - mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); /* Check if additional resources are available */ if (req->sso > mappedlfs && @@ -1276,7 +1360,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, pcifunc, req->sso, block->lf.max); return -EINVAL; } - mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->ssow > mappedlfs && ((req->ssow - mappedlfs) > free_lfs)) @@ -1291,7 +1375,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, pcifunc, req->timlfs, block->lf.max); return -EINVAL; } - mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->timlfs > mappedlfs && ((req->timlfs - mappedlfs) > free_lfs)) @@ -1306,7 +1390,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, pcifunc, req->cptlfs, block->lf.max); return -EINVAL; } - mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->cptlfs > mappedlfs && ((req->cptlfs - mappedlfs) > free_lfs)) @@ -1942,7 +2026,7 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) block = &rvu->hw->block[blkaddr]; num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), - block->type); + block->addr); if (!num_lfs) return; for (slot = 0; slot < num_lfs; slot++) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 90eed3160915f..fc6d785b98ddd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -137,6 +137,7 @@ struct rvu_pfvf { u16 ssow; u16 cptlfs; u16 timlfs; + u16 cpt1_lfs; u8 cgx_lmac; /* Block LF's MSIX vector info */ @@ -182,6 +183,8 @@ struct rvu_pfvf { bool cgx_in_use; /* this PF/VF using CGX? */ int cgx_users; /* number of cgx users - used only by PFs */ + + u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ }; struct nix_txsch { @@ -420,6 +423,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); +u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); int rvu_get_pf(u16 pcifunc); struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index f4ecc755eaff1..6c6b411e78fd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -74,6 +74,20 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) return rvu->cgx_idmap[cgx_id]; } +/* Based on P2X connectivity find mapped NIX block for a PF */ +static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, + int cgx_id, int lmac_id) +{ + struct rvu_pfvf *pfvf = &rvu->pf[pf]; + u8 p2x; + + p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); + /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */ + pfvf->nix_blkaddr = BLKADDR_NIX0; + if (p2x == CMR_P2X_SEL_NIX1) + pfvf->nix_blkaddr = BLKADDR_NIX1; +} + static int rvu_map_cgx_lmac_pf(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; @@ -117,6 +131,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; free_pkind = rvu_alloc_rsrc(&pkind->rsrc); pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; + rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); rvu->cgx_mapped_pfs++; } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index f6a3cf3e6f236..9886a30e9723c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -187,8 +187,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int pkind, pf, vf, lbkid; u8 cgx_id, lmac_id; - int pkind, pf, vf; int err; pf = rvu_get_pf(pcifunc); @@ -221,13 +221,24 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + /* If NIX1 block is present on the silicon then NIXes are + * assigned alternatively for lbk interfaces. NIX0 should + * send packets on lbk link 1 channels and NIX1 should send + * on lbk link 0 channels for the communication between + * NIX0 and NIX1. + */ + lbkid = 0; + if (rvu->hw->lbk_links > 1) + lbkid = vf & 0x1 ? 0 : 1; + /* Note that AF's VFs work in pairs and talk over consecutive * loopback channels.Therefore if odd number of AF VFs are * enabled then the last VF remains with no pair. */ - pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf); - pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) : - NIX_CHAN_LBK_CHX(0, vf + 1); + pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf); + pfvf->tx_chan_base = vf & 0x1 ? + NIX_CHAN_LBK_CHX(lbkid, vf - 1) : + NIX_CHAN_LBK_CHX(lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, @@ -3157,7 +3168,7 @@ int rvu_nix_init(struct rvu *rvu) hw->cgx = (cfg >> 12) & 0xF; hw->lmac_per_cgx = (cfg >> 8) & 0xF; hw->cgx_links = hw->cgx * hw->lmac_per_cgx; - hw->lbk_links = 1; + hw->lbk_links = (cfg >> 24) & 0xF; hw->sdp_links = 1; /* Initialize admin queue */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index 2a13c318048cc..59a3ea02b8add 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -771,6 +771,7 @@ static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma, int prestera_rxtx_switch_init(struct prestera_switch *sw) { struct prestera_rxtx *rxtx; + int err; rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL); if (!rxtx) @@ -778,7 +779,11 @@ int prestera_rxtx_switch_init(struct prestera_switch *sw) sw->rxtx = rxtx; - return prestera_sdma_switch_init(sw); + err = prestera_sdma_switch_init(sw); + if (err) + kfree(rxtx); + + return err; } void prestera_rxtx_switch_fini(struct prestera_switch *sw) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c7aff89141e17..217dc67c48fa2 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2299,7 +2299,10 @@ static int mtk_open(struct net_device *dev) int err = mtk_start_dma(eth); if (err) + if (err) { + phylink_disconnect_phy(mac->phylink); return err; + } mtk_gdm_config(eth, MTK_GDMA_TO_PDMA); diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 427e7a31862c2..d7f2890c254fe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev, err = mlx4_bitmap_init(*bitmap + k, 1, MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0, 0); - mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); + if (!err) + mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); } if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 94426d29025eb..39c17e9039157 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -959,6 +959,7 @@ static void cmd_work_handler(struct work_struct *work) cmd_ent_get(ent); set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); + cmd_ent_get(ent); /* for the _real_ FW event on completion */ /* Skip sending command to fw if internal error */ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { u8 status = 0; @@ -972,7 +973,6 @@ static void cmd_work_handler(struct work_struct *work) return; } - cmd_ent_get(ent); /* for the _real_ FW event on completion */ /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); @@ -1422,8 +1422,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, return -EFAULT; err = sscanf(outlen_str, "%d", &outlen); - if (err < 0) - return err; + if (err != 1) + return -EINVAL; ptr = kzalloc(outlen, GFP_KERNEL); if (!ptr) @@ -1586,8 +1586,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force cmd_ent_put(ent); /* timeout work was canceled */ if (!forced || /* Real FW completion */ - pci_channel_offline(dev->pdev) || /* FW is inaccessible */ - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ + !opcode_allowed(cmd, ent->op)) cmd_ent_put(ent); ent->ts2 = ktime_get_ns(); @@ -1687,12 +1687,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) - while (down_trylock(&cmd->sem)) + for (i = 0; i < cmd->max_reg_cmds; i++) { + while (down_trylock(&cmd->sem)) { mlx5_cmd_trigger_completions(dev); + cond_resched(); + } + } - while (down_trylock(&cmd->pages_sem)) + while (down_trylock(&cmd->pages_sem)) { mlx5_cmd_trigger_completions(dev); + cond_resched(); + } /* Unlock cmdif */ up(&cmd->pages_sem); @@ -1853,7 +1858,7 @@ void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, ctx->dev = dev; /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ atomic_set(&ctx->num_inflight, 1); - init_waitqueue_head(&ctx->wait); + init_completion(&ctx->inflight_done); } EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); @@ -1867,8 +1872,8 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); */ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) { - atomic_dec(&ctx->num_inflight); - wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0); + if (!atomic_dec_and_test(&ctx->num_inflight)) + wait_for_completion(&ctx->inflight_done); } EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); @@ -1879,7 +1884,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) work->user_callback(status, work); if (atomic_dec_and_test(&ctx->num_inflight)) - wake_up(&ctx->wait); + complete(&ctx->inflight_done); } int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, @@ -1895,7 +1900,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, ret = cmd_exec(ctx->dev, in, in_size, out, out_size, mlx5_cmd_exec_cb_handler, work, false); if (ret && atomic_dec_and_test(&ctx->num_inflight)) - wake_up(&ctx->wait); + complete(&ctx->inflight_done); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index e8a4adccd2b26..40d7bfca37499 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) MLX5_GET(mtrc_cap, out, num_string_trace); tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db); tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); + tracer->str_db.loaded = false; for (i = 0; i < tracer->str_db.num_string_db; i++) { mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]); @@ -638,7 +639,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer, trace_timestamp = (timestamp_event.timestamp & MASK_52_7) | (str_frmt->timestamp & MASK_6_0); else - trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) | + trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) | (str_frmt->timestamp & MASK_6_0); mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp); @@ -756,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer) if (err) mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err); + tracer->buff.consumer_index = 0; return err; } @@ -820,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work) mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner); if (tracer->owner) { tracer->owner = false; - tracer->buff.consumer_index = 0; return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 038a0f1cecec6..e44281ae570d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -88,6 +88,8 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[], struct udphdr *udp = (struct udphdr *)(buf); struct vxlanhdr *vxh; + if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) + return -EOPNOTSUPP; vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); *ip_proto = IPPROTO_UDP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 26f7fab109d97..d08bd22dc5698 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -113,7 +113,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) struct xfrm_replay_state_esn *replay_esn; u32 seq_bottom = 0; u8 overlap; - u32 *esn; if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) { sa_entry->esn_state.trigger = 0; @@ -128,11 +127,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x, htonl(seq_bottom)); - esn = &sa_entry->esn_state.esn; sa_entry->esn_state.trigger = 1; if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { - ++(*esn); sa_entry->esn_state.overlap = 0; return true; } else if (unlikely(!overlap && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cfc3bfcb04a2f..5673a4113253b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -992,7 +992,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; - sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN; sq->xsk_pool = xsk_pool; sq->stats = sq->xsk_pool ? diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index f1bf7f6758d0d..16846442717dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1344,9 +1344,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *out_dev, *encap_dev = NULL; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; + struct net_device *encap_dev = NULL; struct mlx5_esw_flow_attr *esw_attr; struct mlx5_fc *counter = NULL; struct mlx5e_rep_priv *rpriv; @@ -1391,16 +1391,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, esw_attr = attr->esw_attr; for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { + struct net_device *out_dev; int mirred_ifindex; if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) continue; mirred_ifindex = parse_attr->mirred_ifindex[out_index]; - out_dev = __dev_get_by_index(dev_net(priv->netdev), - mirred_ifindex); + out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); + if (!out_dev) { + NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found"); + err = -ENODEV; + return err; + } err = mlx5e_attach_encap(priv, flow, out_dev, out_index, extack, &encap_dev, &encap_valid); + dev_put(out_dev); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index 1cbb330b9f42b..132ea9997676a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act, sizeof(dest->vport.num), hash); hash = jhash((const void *)&dest->vport.vhca_id, sizeof(dest->vport.num), hash); - if (dest->vport.pkt_reformat) - hash = jhash(dest->vport.pkt_reformat, - sizeof(*dest->vport.pkt_reformat), + if (flow_act->pkt_reformat) + hash = jhash(flow_act->pkt_reformat, + sizeof(*flow_act->pkt_reformat), hash); return hash; } @@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1, if (ret) return ret; - return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ? - memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat, - sizeof(*dest1->vport.pkt_reformat)) : 0; + if (flow_act1->pkt_reformat && flow_act2->pkt_reformat) + return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat, + sizeof(*flow_act1->pkt_reformat)); + + return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat); } static int @@ -306,6 +308,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) { struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl; + attr->dests[curr_dest].termtbl = NULL; + /* search for the destination associated with the * current term table */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 0c32c485eb588..f42e118f32901 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -618,6 +618,13 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); + mutex_lock(&dev->intf_state_mutex); + if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) { + mlx5_core_err(dev, "health works are not permitted at this stage\n"); + mutex_unlock(&dev->intf_state_mutex); + return; + } + mutex_unlock(&dev->intf_state_mutex); enter_error_state(dev, false); if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) { if (mlx5_health_try_recover(dev)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index cac8f085b16d7..2cf7f0fc170b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -166,16 +166,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate) } } -static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) +static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) { int rate, width; rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper); if (rate < 0) - return -EINVAL; + return SPEED_UNKNOWN; width = mlx5_ptys_width_enum_to_int(ib_link_width_oper); if (width < 0) - return -EINVAL; + return SPEED_UNKNOWN; return rate * width; } @@ -198,16 +198,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper); - if (speed < 0) - return -EINVAL; + link_ksettings->base.speed = speed; + link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL; - link_ksettings->base.duplex = DUPLEX_FULL; link_ksettings->base.port = PORT_OTHER; link_ksettings->base.autoneg = AUTONEG_DISABLE; - link_ksettings->base.speed = speed; - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 5c6a376aa62ec..0e7fd200b4268 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -69,6 +69,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, params->lro_en = false; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; params->tunneled_offload_en = false; + + /* CQE compression is not supported for IPoIB */ + params->rx_cqe_compress_def = false; + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); } /* Called directly after IPoIB netdevice was created to initialize SW structs */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index c70c1f0ca0c19..44a434b1178b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -440,7 +440,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, static const struct ptp_clock_info mlx5_ptp_clock_info = { .owner = THIS_MODULE, .name = "mlx5_ptp", - .max_adj = 100000000, + .max_adj = 50000000, .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 839a01da110f3..8ff16318e32dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -122,7 +122,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) { struct mlx5_mpfs *mpfs = dev->priv.mpfs; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return; WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +137,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return 0; mutex_lock(&mpfs->lock); @@ -185,7 +185,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return 0; mutex_lock(&mpfs->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8246b6285d5a4..112eaef186e19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -906,6 +906,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) err_tables_cleanup: mlx5_geneve_destroy(dev->geneve); mlx5_vxlan_destroy(dev->vxlan); + mlx5_cleanup_clock(dev); + mlx5_cleanup_reserved_gids(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_fw_reset_cleanup(dev); err_events_cleanup: @@ -1640,7 +1642,7 @@ static void mlx5_core_verify_params(void) } } -static int __init init(void) +static int __init mlx5_init(void) { int err; @@ -1665,7 +1667,7 @@ static int __init init(void) return err; } -static void __exit cleanup(void) +static void __exit mlx5_cleanup(void) { #ifdef CONFIG_MLX5_CORE_EN mlx5e_cleanup(); @@ -1674,5 +1676,5 @@ static void __exit cleanup(void) mlx5_unregister_debugfs(); } -module_init(init); -module_exit(cleanup); +module_init(mlx5_init); +module_exit(mlx5_cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index b599b6beb5b95..6a4b997c258a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -9,7 +9,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, struct mlx5dr_matcher *last_matcher = NULL; struct mlx5dr_htbl_connect_info info; struct mlx5dr_ste_htbl *last_htbl; - int ret; + int ret = -EOPNOTSUPP; if (action && action->action_type != DR_ACTION_TYP_FT) return -EOPNOTSUPP; @@ -68,6 +68,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, } } + if (ret) + goto out; + /* Release old action */ if (tbl->miss_action) refcount_dec(&tbl->miss_action->refcount); diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 9ed264ed70705..1fa16064142d9 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6923,7 +6923,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) char banner[sizeof(version)]; struct ksz_switch *sw = NULL; - result = pci_enable_device(pdev); + result = pcim_enable_device(pdev); if (result) return result; diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 81a8ccca7e5e0..5693784eec5bc 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -359,7 +359,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg, goto err_out; usleep_range(26, 100); - while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) && + while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) && (mistat & BUSY)) cpu_relax(); @@ -397,7 +397,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg, goto err_out; usleep_range(26, 100); - while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) && + while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) && (mistat & BUSY)) cpu_relax(); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index b221b83ec5a6f..e56e540ce05c3 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -468,6 +468,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, flow_rule_match_control(rule, &match); } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + filter->key_type = OCELOT_VCAP_KEY_ANY; + filter->vlan.vid.value = match.key->vlan_id; + filter->vlan.vid.mask = match.mask->vlan_id; + filter->vlan.pcp.value[0] = match.key->vlan_priority; + filter->vlan.pcp.mask[0] = match.mask->vlan_priority; + match_protocol = false; + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; @@ -600,18 +612,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, match_protocol = false; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_match_vlan match; - - flow_rule_match_vlan(rule, &match); - filter->key_type = OCELOT_VCAP_KEY_ANY; - filter->vlan.vid.value = match.key->vlan_id; - filter->vlan.vid.mask = match.mask->vlan_id; - filter->vlan.pcp.value[0] = match.key->vlan_priority; - filter->vlan.pcp.mask[0] = match.mask->vlan_priority; - match_protocol = false; - } - finished_key_parsing: if (match_protocol && proto != ETH_P_ALL) { if (filter->block_id == VCAP_ES0) { diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 1664e9184c9ca..5a1ed4818baac 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3920,6 +3920,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) myri10ge_free_slices(mgp); abort_with_firmware: + kfree(mgp->msix_vectors); myri10ge_dummy_rdma(mgp, 0); abort_with_ioremap: diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 3cae8449fadb7..ff46b08ac1f42 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2384,7 +2384,7 @@ static void free_tx_buffers(struct s2io_nic *nic) skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); if (skb) { swstats->mem_freed += skb->truesize; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); cnt++; } } @@ -7114,9 +7114,8 @@ static int s2io_card_up(struct s2io_nic *sp) if (ret) { DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", dev->name); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENOMEM; + ret = -ENOMEM; + goto err_fill_buff; } DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, ring->rx_bufs_left); @@ -7154,18 +7153,16 @@ static int s2io_card_up(struct s2io_nic *sp) /* Enable Rx Traffic and interrupts on the NIC */ if (start_nic(sp)) { DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENODEV; + ret = -ENODEV; + goto err_out; } /* Add interrupt service routine */ if (s2io_add_isr(sp) != 0) { if (sp->config.intr_type == MSI_X) s2io_rem_isr(sp); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENODEV; + ret = -ENODEV; + goto err_out; } timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); @@ -7185,6 +7182,20 @@ static int s2io_card_up(struct s2io_nic *sp) } return 0; + +err_out: + if (config->napi) { + if (config->intr_type == MSI_X) { + for (i = 0; i < sp->config.rx_ring_num; i++) + napi_disable(&sp->mac_control.rings[i].napi); + } else { + napi_disable(&sp->napi); + } + } +err_fill_buff: + s2io_reset(sp); + free_rx_buffers(sp); + return ret; } /** diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 7a8187458724d..24578c48f075b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -363,7 +363,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) return ret; attrs.split = eth_port.is_split; - attrs.splittable = !attrs.split; + attrs.splittable = eth_port.port_lanes > 1 && !attrs.split; attrs.lanes = eth_port.port_lanes; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = eth_port.label_port; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 3977aa2f59bd1..311873ff57e33 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1225,6 +1225,9 @@ nfp_port_get_module_info(struct net_device *netdev, u8 data; port = nfp_port_from_netdev(netdev); + if (!port) + return -EOPNOTSUPP; + /* update port state to get latest interface */ set_bit(NFP_PORT_CHANGED, &port->flags); eth_port = nfp_port_get_eth_port(port); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 6ef48eb3a77d4..b163489489e95 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, } /* Adjust the start address to be cache size aligned */ - cache->id = id; cache->addr = addr & ~(u64)(cache->size - 1); /* Re-init to the new ID and address */ @@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, return NULL; } + cache->id = id; + exit: /* Adjust offset */ *offset = addr - cache->addr; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index a6861df9904f9..07fbd329fe93c 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -249,25 +249,26 @@ static void nixge_hw_dma_bd_release(struct net_device *ndev) struct sk_buff *skb; int i; - for (i = 0; i < RX_BD_NUM; i++) { - phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], - phys); - - dma_unmap_single(ndev->dev.parent, phys_addr, - NIXGE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); - - skb = (struct sk_buff *)(uintptr_t) - nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], - sw_id_offset); - dev_kfree_skb(skb); - } + if (priv->rx_bd_v) { + for (i = 0; i < RX_BD_NUM; i++) { + phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + phys); + + dma_unmap_single(ndev->dev.parent, phys_addr, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + + skb = (struct sk_buff *)(uintptr_t) + nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + sw_id_offset); + dev_kfree_skb(skb); + } - if (priv->rx_bd_v) dma_free_coherent(ndev->dev.parent, sizeof(*priv->rx_bd_v) * RX_BD_NUM, priv->rx_bd_v, priv->rx_bd_p); + } if (priv->tx_skb) devm_kfree(ndev->dev.parent, priv->tx_skb); @@ -899,6 +900,7 @@ static int nixge_open(struct net_device *ndev) err_rx_irq: free_irq(priv->tx_irq, ndev); err_tx_irq: + napi_disable(&priv->napi); phy_stop(phy); phy_disconnect(phy); tasklet_kill(&priv->dma_err_tasklet); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 2942102efd488..bde32f0845ca5 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1166,6 +1166,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, buffer_info->dma = 0; buffer_info->time_stamp = 0; tx_ring->next_to_use = ring_num; + dev_kfree_skb_any(skb); return; } buffer_info->mapped = true; @@ -2481,6 +2482,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) unregister_netdev(netdev); pch_gbe_phy_hw_reset(&adapter->hw); + pci_dev_put(adapter->ptp_pdev); free_netdev(netdev); } @@ -2562,7 +2564,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, /* setup the private structure */ ret = pch_gbe_sw_init(adapter); if (ret) - goto err_free_netdev; + goto err_put_dev; /* Initialize PHY */ ret = pch_gbe_init_phy(adapter); @@ -2620,6 +2622,8 @@ static int pch_gbe_probe(struct pci_dev *pdev, err_free_adapter: pch_gbe_phy_hw_reset(&adapter->hw); +err_put_dev: + pci_dev_put(adapter->ptp_pdev); err_free_netdev: free_netdev(netdev); return ret; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index e42520f909fe2..fcd4213c99b83 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -257,6 +257,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) .oper = IONIC_Q_ENABLE, }, }; + int ret; idev = &lif->ionic->idev; dev = lif->ionic->dev; @@ -264,16 +265,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", ctx.cmd.q_control.index, ctx.cmd.q_control.type); + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); + + ret = ionic_adminq_post_wait(lif, &ctx); + if (ret) + return ret; + + if (qcq->napi.poll) + napi_enable(&qcq->napi); + if (qcq->flags & IONIC_QCQ_F_INTR) { irq_set_affinity_hint(qcq->intr.vector, &qcq->intr.affinity_mask); - napi_enable(&qcq->napi); - ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_CLEAR); } - return ionic_adminq_post_wait(lif, &ctx); + return 0; } static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) @@ -2383,11 +2392,15 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, * than the full array, but leave the qcq shells in place */ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { - lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->txqcqs[i]); + if (lif->txqcqs && lif->txqcqs[i]) { + lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->txqcqs[i]); + } - lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->rxqcqs[i]); + if (lif->rxqcqs && lif->rxqcqs[i]) { + lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->rxqcqs[i]); + } } return err; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index f60ffef33e0ce..00b6985edea04 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -569,8 +569,14 @@ int ionic_port_reset(struct ionic *ionic) static int __init ionic_init_module(void) { + int ret; + ionic_debugfs_create(); - return ionic_bus_register_driver(); + ret = ionic_bus_register_driver(); + if (ret) + ionic_debugfs_destroy(); + + return ret; } static void __exit ionic_cleanup_module(void) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 46dbb49f837c8..5463c8b8e43ce 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -986,7 +986,7 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) stats->vlan_inserted++; } - if (skb->csum_not_inet) + if (skb_csum_is_sctp(skb)) stats->crc32_csum++; else stats->csum++; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 6ab3e60d4928c..4b4077cf2d266 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1796,9 +1796,10 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, u8 split_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0; + u8 port_id = 0, pf_id = 0, vf_id = 0; bool read_using_dmae = false; u32 thresh; + u16 fid; if (!dump) return len; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index d210632676d32..a632de208a7dc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1456,7 +1456,12 @@ int qede_poll(struct napi_struct *napi, int budget) rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && qede_has_rx_work(fp->rxq)) ? qede_rx_int(fp, budget) : 0; - if (rx_work_done < budget) { + + if (fp->xdp_xmit & QEDE_XDP_REDIRECT) + xdp_do_flush(); + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (rx_work_done < budget || !budget) { if (!qede_poll_is_more_work(fp)) { napi_complete_done(napi, rx_work_done); @@ -1474,9 +1479,6 @@ int qede_poll(struct napi_struct *napi, int budget) qede_update_tx_producer(fp->xdp_tx); } - if (fp->xdp_xmit & QEDE_XDP_REDIRECT) - xdp_do_flush_map(); - return rx_work_done; } diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 2219e4c59ae60..99fd35a8ca750 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2475,6 +2475,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, skb_shinfo(skb)->nr_frags); if (tx_cb->seg_count == -1) { netdev_err(ndev, "%s: invalid segment count!\n", __func__); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index bd06076803295..2fd5c6fdb5003 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2991,7 +2991,7 @@ static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter) QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); dev_info(&adapter->pdev->dev, "%s: lock recovery initiated\n", __func__); - msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY); + mdelay(QLC_83XX_DRV_LOCK_RECOVERY_DELAY); val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK); id = ((val >> 2) & 0xF); if (id == adapter->portnum) { @@ -3027,7 +3027,7 @@ int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter) if (status) break; - msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY); + mdelay(QLC_83XX_DRV_LOCK_WAIT_DELAY); i++; if (i == 1) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index d2c190732d3ef..beeeec8516b83 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2505,7 +2505,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) goto disable_mbx_intr; qlcnic_83xx_clear_function_resources(adapter); - qlcnic_dcb_enable(adapter->dcb); + + err = qlcnic_dcb_enable(adapter->dcb); + if (err) { + qlcnic_dcb_free(adapter->dcb); + goto disable_mbx_intr; + } + qlcnic_83xx_initialize_nic(adapter, 1); qlcnic_dcb_get_info(adapter->dcb); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 7519773eaca6e..22afa2be85fdb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -41,11 +41,6 @@ struct qlcnic_dcb { unsigned long state; }; -static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) -{ - kfree(dcb); -} - static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { if (dcb && dcb->ops->get_hw_capability) @@ -112,9 +107,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb) dcb->ops->init_dcbnl_ops(dcb); } -static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb) +static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb) { - if (dcb && qlcnic_dcb_attach(dcb)) - qlcnic_clear_dcb_ops(dcb); + return dcb ? qlcnic_dcb_attach(dcb) : 0; } #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 27c07b2412f46..44b745293fd0c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2622,7 +2622,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Device does not support MSI interrupts\n"); if (qlcnic_82xx_check(adapter)) { - qlcnic_dcb_enable(adapter->dcb); + err = qlcnic_dcb_enable(adapter->dcb); + if (err) { + qlcnic_dcb_free(adapter->dcb); + dev_err(&pdev->dev, "Failed to enable DCB\n"); + goto err_out_free_hw; + } + qlcnic_dcb_get_info(adapter->dcb); err = qlcnic_setup_intr(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 8367891bfb139..e864c453c5e6b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -221,6 +221,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) return 0; qlcnic_destroy_async_wq: + while (i--) + kfree(sriov->vf_info[i].vp); destroy_workqueue(bc->bc_async_wq); qlcnic_destroy_trans_wq: diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index ccdfa930130bc..4cff544f04c2e 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1158,10 +1158,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Failed to register net device\n"); - goto err_out_mdio_unregister; + goto err_out_phy_disconnect; } return 0; +err_out_phy_disconnect: + phy_disconnect(dev->phydev); err_out_mdio_unregister: mdiobus_unregister(lp->mii_bus); err_out_mdio: @@ -1185,6 +1187,7 @@ static void r6040_remove_one(struct pci_dev *pdev) struct r6040_private *lp = netdev_priv(dev); unregister_netdev(dev); + phy_disconnect(dev->phydev); mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); netif_napi_del(&lp->napi); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index f96eed67e1a2b..410ccd28f6531 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -736,14 +736,14 @@ static void ravb_error_interrupt(struct net_device *ndev) ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), RIS2); /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) priv->stats[RAVB_BE].rx_over_errors++; - /* Receive Descriptor Empty int */ + /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF1) priv->stats[RAVB_NC].rx_over_errors++; @@ -2253,11 +2253,11 @@ static int ravb_remove(struct platform_device *pdev) priv->desc_bat_dma); /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); - pm_runtime_put_sync(&pdev->dev); unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); @@ -2364,6 +2364,7 @@ static int __maybe_unused ravb_resume(struct device *dev) ret = ravb_open(ndev); if (ret < 0) return ret; + ravb_set_rx_mode(ndev); netif_device_attach(ndev); } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 5b7413305be63..eb1be73020822 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3255,6 +3255,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) bool was_enabled = efx->port_enabled; int rc; +#ifdef CONFIG_SFC_SRIOV + /* If this function is a VF and we have access to the parent PF, + * then use the PF control path to attempt to change the VF MAC address. + */ + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { + struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac[ETH_ALEN]; + + /* net_dev->dev_addr can be zeroed by efx_net_stop in + * efx_ef10_sriov_set_vf_mac, so pass in a copy. + */ + ether_addr_copy(mac, efx->net_dev->dev_addr); + + rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac); + if (!rc) + return 0; + + netif_dbg(efx, drv, efx->net_dev, + "Updating VF mac via PF failed (%d), setting directly\n", + rc); + } +#endif + efx_device_detach_sync(efx); efx_net_stop(efx->net_dev); @@ -3277,40 +3301,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_net_open(efx->net_dev); efx_device_attach_if_not_resetting(efx); -#ifdef CONFIG_SFC_SRIOV - if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; - - if (rc == -EPERM) { - struct efx_nic *efx_pf; - - /* Switch to PF and change MAC address on vport */ - efx_pf = pci_get_drvdata(pci_dev_pf); - - rc = efx_ef10_sriov_set_vf_mac(efx_pf, - nic_data->vf_index, - efx->net_dev->dev_addr); - } else if (!rc) { - struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); - struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; - unsigned int i; - - /* MAC address successfully changed by VF (with MAC - * spoofing) so update the parent PF if possible. - */ - for (i = 0; i < efx_pf->vf_count; ++i) { - struct ef10_vf *vf = nic_data->vf + i; - - if (vf->efx == efx) { - ether_addr_copy(vf->mac, - efx->net_dev->dev_addr); - return 0; - } - } - } - } else -#endif if (rc == -EPERM) { netif_err(efx, drv, efx->net_dev, "Cannot change MAC address; use sfboot to enable" diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 67fe44db6b612..63a44ee763be7 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -200,6 +200,7 @@ static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb, skb->len, skb->data_len, channel->channel); if (!efx->n_channels || !efx->n_tx_channels || !channel) { netif_stop_queue(net_dev); + dev_kfree_skb_any(skb); goto err; } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 7183080763417..29c8d2c990044 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1047,8 +1047,11 @@ static int efx_pci_probe_post_io(struct efx_nic *efx) /* Determine netdevice features */ net_dev->features |= (efx->type->offload_features | NETIF_F_SG | NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL); - if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) { net_dev->features |= NETIF_F_TSO6; + if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) + net_dev->hw_enc_features |= NETIF_F_TSO6; + } /* Check whether device supports TSO */ if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) net_dev->features &= ~NETIF_F_ALL_TSO; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index d0f1b2dc7dff0..c49168ba7a4d6 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -308,7 +308,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; - efx->tx_channel_offset = 1; + efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; efx->legacy_irq = efx->pci_dev->irq; diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 40b2af8bfb81c..2ac3c8f1b04b5 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -157,7 +157,8 @@ struct efx_filter_spec { u32 flags:6; u32 dmaq_id:12; u32 rss_context; - __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + u32 vport_id; + __be16 outer_vid; __be16 inner_vid; u8 loc_mac[ETH_ALEN]; u8 rem_mac[ETH_ALEN]; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 2c09afac5beb4..36b46ddb67107 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -676,17 +676,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left, (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) return false; - return memcmp(&left->outer_vid, &right->outer_vid, + return memcmp(&left->vport_id, &right->vport_id, sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; + offsetof(struct efx_filter_spec, vport_id)) == 0; } u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) { - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, + BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3); + return jhash2((const u32 *)&spec->vport_id, (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, + offsetof(struct efx_filter_spec, vport_id)) / 4, 0); } diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 1665529a72717..fcc7de8ae2bfa 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -545,7 +545,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, * previous packets out. */ if (!netdev_xmit_more()) - efx_tx_send_pending(tx_queue->channel); + efx_tx_send_pending(efx_get_tx_channel(efx, index)); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index ef3634d1b9f7f..b9acee214bb6a 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1958,11 +1958,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) ret = PTR_ERR(priv->phydev); dev_err(priv->dev, "get_phy_device err(%d)\n", ret); priv->phydev = NULL; + mdiobus_unregister(bus); return -ENODEV; } ret = phy_device_register(priv->phydev); if (ret) { + phy_device_free(priv->phydev); mdiobus_unregister(bus); dev_err(priv->dev, "phy_device_register err(%d)\n", ret); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 752658ec7beeb..50ef68497bce9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -261,11 +261,9 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac, if (ret) return ret; - devm_add_action_or_reset(dwmac->dev, - (void(*)(void *))clk_disable_unprepare, - dwmac->rgmii_tx_clk); - - return 0; + return devm_add_action_or_reset(dwmac->dev, + (void(*)(void *))clk_disable_unprepare, + clk); } static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index bfc4a92f1d92b..78be62ecc9a9a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -505,6 +505,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->has_gmac4 = 1; plat_dat->pmt = 1; plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); + if (of_device_is_compatible(np, "qcom,qcs404-ethqos")) + plat_dat->rx_clk_runs_in_lpi = 1; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 2e71e510e127d..5b052fdd2696e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -720,6 +720,8 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, if (fc & FLOW_RX) { pr_debug("\tReceive Flow-Control ON\n"); flow |= GMAC_RX_FLOW_CTRL_RFE; + } else { + pr_debug("\tReceive Flow-Control OFF\n"); } writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index de5255b951e14..d1b8b51bf6ad9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -520,9 +520,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, return 0; } - val |= PPSCMDx(index, 0x2); val |= TRGTMODSELx(index, 0x2); val |= PPSEN0; + writel(val, ioaddr + MAC_PPS_CONTROL); writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); @@ -547,6 +547,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); /* Finally, activate it */ + val |= PPSCMDx(index, 0x2); writel(val, ioaddr + MAC_PPS_CONTROL); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 53efcc9c40e28..0ad5ce874557c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -44,7 +44,8 @@ static void config_sub_second_increment(void __iomem *ioaddr, if (!(value & PTP_TCR_TSCTRLSSR)) data = (data * 1000) / 465; - data &= PTP_SSIR_SSINC_MASK; + if (data > PTP_SSIR_SSINC_MAX) + data = PTP_SSIR_SSINC_MAX; reg_value = data; if (gmac4) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 27b7bb64a0281..1ec000d4c7705 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1043,14 +1043,23 @@ static void stmmac_mac_link_up(struct phylink_config *config, ctrl |= priv->hw->link.duplex; /* Flow Control operation */ - if (tx_pause && rx_pause) - stmmac_mac_flow_ctrl(priv, duplex); + if (rx_pause && tx_pause) + priv->flow_ctrl = FLOW_AUTO; + else if (rx_pause && !tx_pause) + priv->flow_ctrl = FLOW_RX; + else if (!rx_pause && tx_pause) + priv->flow_ctrl = FLOW_TX; + else + priv->flow_ctrl = FLOW_OFF; + + stmmac_mac_flow_ctrl(priv, duplex); writel(ctrl, priv->ioaddr + MAC_CTRL_REG); stmmac_mac_set(priv, priv->ioaddr, true); if (phy && priv->dma_cap.eee) { - priv->eee_active = phy_init_eee(phy, 1) >= 0; + priv->eee_active = + phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0; priv->eee_enabled = stmmac_eee_init(priv); priv->tx_lpi_enabled = priv->eee_enabled; stmmac_set_eee_pls(priv, priv->hw, true); @@ -1117,6 +1126,11 @@ static int stmmac_init_phy(struct net_device *dev) int addr = priv->plat->phy_addr; struct phy_device *phydev; + if (addr < 0) { + netdev_err(priv->dev, "no phy found\n"); + return -ENODEV; + } + phydev = mdiobus_get_phy(priv->mii, addr); if (!phydev) { netdev_err(priv->dev, "no phy at addr %d\n", addr); @@ -2907,6 +2921,15 @@ static int stmmac_open(struct net_device *dev) goto init_error; } + if (priv->plat->serdes_powerup) { + ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: Serdes powerup failed\n", + __func__); + goto init_error; + } + } + ret = stmmac_hw_setup(dev, true); if (ret < 0) { netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); @@ -3022,6 +3045,10 @@ static int stmmac_release(struct net_device *dev) /* Disable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, false); + /* Powerdown Serdes if there is */ + if (priv->plat->serdes_powerdown) + priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); + netif_carrier_off(dev); stmmac_release_ptp(priv); @@ -5178,14 +5205,6 @@ int stmmac_dvr_probe(struct device *device, goto error_netdev_register; } - if (priv->plat->serdes_powerup) { - ret = priv->plat->serdes_powerup(ndev, - priv->plat->bsp_priv); - - if (ret < 0) - goto error_serdes_powerup; - } - #ifdef CONFIG_DEBUG_FS stmmac_init_fs(ndev); #endif @@ -5197,8 +5216,6 @@ int stmmac_dvr_probe(struct device *device, return ret; -error_serdes_powerup: - unregister_netdev(ndev); error_netdev_register: phylink_destroy(priv->phylink); error_phy_setup: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index f70d8d1ce3298..f02ce09020fbc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -108,10 +108,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); - axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); - axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); - axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); - axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); + axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe"); + axi->axi_fb = of_property_read_bool(np, "snps,fb"); + axi->axi_mb = of_property_read_bool(np, "snps,mb"); + axi->axi_rb = of_property_read_bool(np, "snps,rb"); if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) axi->axi_wr_osr_lmt = 1; @@ -559,7 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); - if (plat->force_thresh_dma_mode) { + if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) { plat->force_sf_dma_mode = 0; dev_warn(&pdev->dev, "force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 7abb1d47e7dac..60e6b085e2f6d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -61,7 +61,7 @@ #define PTP_TCR_TSENMACADDR BIT(18) /* SSIR defines */ -#define PTP_SSIR_SSINC_MASK 0xff +#define PTP_SSIR_SSINC_MAX 0xff #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index dd5c4ef92ef3c..ea7200b7b6477 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -1654,12 +1654,16 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv) } ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); - if (ret) + if (ret) { + kfree_skb(skb); goto cleanup; + } ret = dev_set_promiscuity(priv->dev, 1); - if (ret) + if (ret) { + kfree_skb(skb); goto cleanup; + } ret = dev_direct_xmit(skb, 0); if (ret) diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 69fc47089e625..940db4ec57142 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2063,9 +2063,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); - dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); - dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index b0f00b4edd949..5af0f9f8c0975 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -864,6 +864,8 @@ static int cpsw_ndo_open(struct net_device *ndev) err_cleanup: if (!cpsw->usage_count) { + napi_disable(&cpsw->napi_rx); + napi_disable(&cpsw->napi_tx); cpdma_ctlr_stop(cpsw->dma); cpsw_destroy_xdp_rxqs(cpsw); } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index dc50e948195de..f145abb77a497 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1262,7 +1262,7 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, } /* Submit the packet */ -static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_stats *tx_stats = &netcp->stats; diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index c62f474b6d08e..fcebd2418dbd3 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1302,12 +1302,15 @@ static int tsi108_open(struct net_device *dev) data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size, &data->rxdma, GFP_KERNEL); - if (!data->rxring) + if (!data->rxring) { + free_irq(data->irq_num, dev); return -ENOMEM; + } data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size, &data->txdma, GFP_KERNEL); if (!data->txring) { + free_irq(data->irq_num, dev); dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, data->rxdma); return -ENOMEM; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index f6ea4a0ad5dff..02b95afe25066 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -541,7 +541,7 @@ static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue) xemaclite_enable_interrupts(lp); if (lp->deferred_skb) { - dev_kfree_skb(lp->deferred_skb); + dev_kfree_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; dev->stats.tx_errors++; } diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index c7ce6d5491afc..442bdc6e8dc4f 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3844,10 +3844,24 @@ static int dfx_init(void) int status; status = pci_register_driver(&dfx_pci_driver); - if (!status) - status = eisa_driver_register(&dfx_eisa_driver); - if (!status) - status = tc_register_driver(&dfx_tc_driver); + if (status) + goto err_pci_register; + + status = eisa_driver_register(&dfx_eisa_driver); + if (status) + goto err_eisa_register; + + status = tc_register_driver(&dfx_tc_driver); + if (status) + goto err_tc_register; + + return 0; + +err_tc_register: + eisa_driver_unregister(&dfx_eisa_driver); +err_eisa_register: + pci_unregister_driver(&dfx_pci_driver); +err_pci_register: return status; } diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index e4e4981ac1d29..eea9d47157cfe 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -758,7 +758,7 @@ static void epp_bh(struct work_struct *work) * ===================== network driver interface ========================= */ -static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 1ad6085994b1c..5c17c92add8ab 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -533,7 +533,7 @@ static int bpq_device_event(struct notifier_block *this, if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; - if (!dev_is_ethdev(dev)) + if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev)) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 36eeb80406f2b..eeb6c47d81678 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -300,12 +300,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc) spin_lock_irqsave(&scc->lock, flags); if (scc->tx_buff != NULL) { - dev_kfree_skb(scc->tx_buff); + dev_kfree_skb_irq(scc->tx_buff); scc->tx_buff = NULL; } while (!skb_queue_empty(&scc->tx_queue)) - dev_kfree_skb(skb_dequeue(&scc->tx_queue)); + dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue)); spin_unlock_irqrestore(&scc->lock, flags); } @@ -1667,7 +1667,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) { struct sk_buff *skb_del; skb_del = skb_dequeue(&scc->tx_queue); - dev_kfree_skb(skb_del); + dev_kfree_skb_irq(skb_del); } skb_queue_tail(&scc->tx_queue, skb); netif_trans_update(dev); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a0f338cf14247..367878493e704 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -977,7 +977,8 @@ struct net_device_context { u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; - + /* completion variable to confirm vf association */ + struct completion vf_add; /* Is the current data path through the VF NIC? */ bool data_path_is_vf; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 6a7ab930ef70d..d15da8287df32 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1327,6 +1327,10 @@ static void netvsc_send_vf(struct net_device *ndev, net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; + + if (net_device_ctx->vf_alloc) + complete(&net_device_ctx->vf_add); + netdev_info(ndev, "VF slot %u %s\n", net_device_ctx->vf_serial, net_device_ctx->vf_alloc ? "added" : "removed"); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 18484370da0d4..f2020be43cfea 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2290,6 +2290,7 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) { struct device *parent = vf_netdev->dev.parent; struct net_device_context *ndev_ctx; + struct net_device *ndev; struct pci_dev *pdev; u32 serial; @@ -2316,6 +2317,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) return hv_get_drvdata(ndev_ctx->device_ctx); } + /* Fallback path to check synthetic vf with + * help of mac addr + */ + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { + ndev = hv_get_drvdata(ndev_ctx->device_ctx); + if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) { + netdev_notice(vf_netdev, + "falling back to mac addr based matching\n"); + return ndev; + } + } + netdev_notice(vf_netdev, "no netdev found for vf serial:%u\n", serial); return NULL; @@ -2406,6 +2419,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev) return NOTIFY_OK; net_device_ctx->data_path_is_vf = vf_is_up; + if (vf_is_up && !net_device_ctx->vf_alloc) { + netdev_info(ndev, "Waiting for the VF association from host\n"); + wait_for_completion(&net_device_ctx->vf_add); + } + netvsc_switch_datapath(ndev, vf_is_up); netdev_info(ndev, "Data path switched %s VF: %s\n", vf_is_up ? "to" : "from", vf_netdev->name); @@ -2429,6 +2447,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) netvsc_vf_setxdp(vf_netdev, NULL); + reinit_completion(&net_device_ctx->vf_add); netdev_rx_handler_unregister(vf_netdev); netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); @@ -2466,6 +2485,7 @@ static int netvsc_probe(struct hv_device *dev, INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); + init_completion(&net_device_ctx->vf_add); spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index fd9f33c833fa3..95ef3b6f98dd3 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -926,7 +926,7 @@ static int ca8210_spi_transfer( dev_dbg(&spi->dev, "%s called\n", __func__); - cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC); + cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC); if (!cas_ctl) return -ENOMEM; diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index 89c046b204e0c..a8369bfa4050b 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb) goto err_tx; if (status & CC2520_STATUS_TX_UNDERFLOW) { + rc = -EINVAL; dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n"); goto err_tx; } @@ -969,7 +970,7 @@ static int cc2520_hw_init(struct cc2520_private *priv) if (timeout-- <= 0) { dev_err(&priv->spi->dev, "oscillator start failed!\n"); - return ret; + return -ETIMEDOUT; } udelay(1); } while (!(status & CC2520_STATUS_XOSC32M_STABLE)); diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index fe91b72eca36c..64b12e462765e 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1251,20 +1251,18 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) /* Initialize a ring, including allocating DMA memory for its entries */ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) { - size_t size = count * GSI_RING_ELEMENT_SIZE; + u32 size = count * GSI_RING_ELEMENT_SIZE; struct device *dev = gsi->dev; dma_addr_t addr; - /* Hardware requires a 2^n ring size, with alignment equal to size */ + /* Hardware requires a 2^n ring size, with alignment equal to size. + * The DMA address returned by dma_alloc_coherent() is guaranteed to + * be a power-of-2 number of pages, which satisfies the requirement. + */ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); - if (ring->virt && addr % size) { - dma_free_coherent(dev, size, ring->virt, addr); - dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", - size); - return -EINVAL; /* Not a good error value, but distinct */ - } else if (!ring->virt) { + if (!ring->virt) return -ENOMEM; - } + ring->addr = addr; ring->count = count; diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h index 1785c9d3344d1..d58dce46e061a 100644 --- a/drivers/net/ipa/gsi_private.h +++ b/drivers/net/ipa/gsi_private.h @@ -14,7 +14,7 @@ struct gsi_trans; struct gsi_ring; struct gsi_channel; -#define GSI_RING_ELEMENT_SIZE 16 /* bytes */ +#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */ /* Return the entry that follows one provided in a transaction pool */ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element); diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 6c3ed5b17b80c..70c2b585f98d6 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -153,11 +153,10 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, size = __roundup_pow_of_two(size); total_size = (count + max_alloc - 1) * size; - /* The allocator will give us a power-of-2 number of pages. But we - * can't guarantee that, so request it. That way we won't waste any - * memory that would be available beyond the required space. - * - * Note that gsi_trans_pool_exit_dma() assumes the total allocated + /* The allocator will give us a power-of-2 number of pages + * sufficient to satisfy our request. Round up our requested + * size to avoid any unused space in the allocation. This way + * gsi_trans_pool_exit_dma() can assume the total allocated * size is exactly (count * size). */ total_size = get_order(total_size) << PAGE_SHIFT; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index a47378b7d9b2f..dc94ce0356556 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -154,7 +154,7 @@ static void ipa_cmd_validate_build(void) * of entries, as and IPv4 and IPv6 route tables have the same number * of entries. */ -#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE) +#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64)) #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX) BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK)); BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 7fc1058a5ca93..ba05e26c3c60e 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -72,8 +72,8 @@ * that can be included in a single transaction. */ struct gsi_channel_data { - u16 tre_count; - u16 event_count; + u16 tre_count; /* must be a power of 2 */ + u16 event_count; /* must be a power of 2 */ u8 tlv_count; }; diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 1a87a49538c50..880ec353f958f 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) mem = &ipa->mem[IPA_MEM_V4_ROUTE]; req.v4_route_tbl_info_valid = 1; req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE; + req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; mem = &ipa->mem[IPA_MEM_V6_ROUTE]; req.v6_route_tbl_info_valid = 1; req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE; + req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; mem = &ipa->mem[IPA_MEM_V4_FILTER]; req.v4_filter_tbl_start_valid = 1; @@ -352,8 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v4_hash_route_tbl_info_valid = 1; req.v4_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_hash_route_tbl_info.count = - mem->size / IPA_TABLE_ENTRY_SIZE; + req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; } mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]; @@ -361,8 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v6_hash_route_tbl_info_valid = 1; req.v6_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_hash_route_tbl_info.count = - mem->size / IPA_TABLE_ENTRY_SIZE; + req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; } mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED]; diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 73413371e3d3e..ecf9f863c842b 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -271,7 +271,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x12, .offset = offsetof(struct ipa_init_modem_driver_req, v4_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -292,7 +292,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x13, .offset = offsetof(struct ipa_init_modem_driver_req, v6_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -456,7 +456,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x1b, .offset = offsetof(struct ipa_init_modem_driver_req, v4_hash_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -477,7 +477,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x1c, .offset = offsetof(struct ipa_init_modem_driver_req, v6_hash_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index cfac456cea0ca..58de425bb8e61 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -82,9 +82,11 @@ enum ipa_platform_type { IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */ }; -/* This defines the start and end offset of a range of memory. Both - * fields are offsets relative to the start of IPA shared memory. - * The end value is the last addressable byte *within* the range. +/* This defines the start and end offset of a range of memory. The start + * value is a byte offset relative to the start of IPA shared memory. The + * end value is the last addressable unit *within* the range. Typically + * the end value is in units of bytes, however it can also be a maximum + * array index value. */ struct ipa_mem_bounds { u32 start; @@ -125,18 +127,19 @@ struct ipa_init_modem_driver_req { u8 hdr_tbl_info_valid; struct ipa_mem_bounds hdr_tbl_info; - /* Routing table information. These define the location and size of - * non-hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + /* Routing table information. These define the location and maximum + * *index* (not byte) for the modem portion of non-hashable IPv4 and + * IPv6 routing tables. The start values are byte offsets relative + * to the start of IPA shared memory. */ u8 v4_route_tbl_info_valid; - struct ipa_mem_array v4_route_tbl_info; + struct ipa_mem_bounds v4_route_tbl_info; u8 v6_route_tbl_info_valid; - struct ipa_mem_array v6_route_tbl_info; + struct ipa_mem_bounds v6_route_tbl_info; /* Filter table information. These define the location of the * non-hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + * byte offsets relative to the start of IPA shared memory. */ u8 v4_filter_tbl_start_valid; u32 v4_filter_tbl_start; @@ -177,18 +180,20 @@ struct ipa_init_modem_driver_req { u8 zip_tbl_info_valid; struct ipa_mem_bounds zip_tbl_info; - /* Routing table information. These define the location and size - * of hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + /* Routing table information. These define the location and maximum + * *index* (not byte) for the modem portion of hashable IPv4 and IPv6 + * routing tables (if supported by hardware). The start values are + * byte offsets relative to the start of IPA shared memory. */ u8 v4_hash_route_tbl_info_valid; - struct ipa_mem_array v4_hash_route_tbl_info; + struct ipa_mem_bounds v4_hash_route_tbl_info; u8 v6_hash_route_tbl_info_valid; - struct ipa_mem_array v6_hash_route_tbl_info; + struct ipa_mem_bounds v6_hash_route_tbl_info; /* Filter table information. These define the location and size - * of hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + * of hashable IPv4 and IPv6 filter tables (if supported by hardware). + * The start values are byte offsets relative to the start of IPA + * shared memory. */ u8 v4_hash_filter_tbl_start_valid; u32 v4_hash_filter_tbl_start; diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 0747866d60abc..02c1928374144 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -27,28 +27,38 @@ /** * DOC: IPA Filter and Route Tables * - * The IPA has tables defined in its local shared memory that define filter - * and routing rules. Each entry in these tables contains a 64-bit DMA - * address that refers to DRAM (system memory) containing a rule definition. + * The IPA has tables defined in its local (IPA-resident) memory that define + * filter and routing rules. An entry in either of these tables is a little + * endian 64-bit "slot" that holds the address of a rule definition. (The + * size of these slots is 64 bits regardless of the host DMA address size.) + * + * Separate tables (both filter and route) used for IPv4 and IPv6. There + * are normally another set of "hashed" filter and route tables, which are + * used with a hash of message metadata. Hashed operation is not supported + * by all IPA hardware (IPA v4.2 doesn't support hashed tables). + * + * Rules can be in local memory or in DRAM (system memory). The offset of + * an object (such as a route or filter table) in IPA-resident memory must + * 128-byte aligned. An object in system memory (such as a route or filter + * rule) must be at an 8-byte aligned address. We currently only place + * route or filter rules in system memory. + * * A rule consists of a contiguous block of 32-bit values terminated with * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits * represents "no filtering" or "no routing," and is the reset value for - * filter or route table rules. Separate tables (both filter and route) - * used for IPv4 and IPv6. Additionally, there can be hashed filter or - * route tables, which are used when a hash of message metadata matches. - * Hashed operation is not supported by all IPA hardware. + * filter or route table rules. * * Each filter rule is associated with an AP or modem TX endpoint, though - * not all TX endpoints support filtering. The first 64-bit entry in a + * not all TX endpoints support filtering. The first 64-bit slot in a * filter table is a bitmap indicating which endpoints have entries in * the table. The low-order bit (bit 0) in this bitmap represents a * special global filter, which applies to all traffic. This is not * used in the current code. Bit 1, if set, indicates that there is an - * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the - * table. Bit 2, if set, indicates there is an entry for endpoint 1, - * and so on. Space is set aside in IPA local memory to hold as many - * filter table entries as might be required, but typically they are not - * all used. + * entry (i.e. slot containing a system address referring to a rule) for + * endpoint 0 in the table. Bit 3, if set, indicates there is an entry + * for endpoint 2, and so on. Space is set aside in IPA local memory to + * hold as many filter table entries as might be required, but typically + * they are not all used. * * The AP initializes all entries in a filter table to refer to a "zero" * entry. Once initialized the modem and AP update the entries for @@ -96,13 +106,8 @@ * ---------------------- */ -/* IPA hardware constrains filter and route tables alignment */ -#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */ - /* Assignment of route table entries to the modem and AP */ #define IPA_ROUTE_MODEM_MIN 0 -#define IPA_ROUTE_MODEM_COUNT 8 - #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT #define IPA_ROUTE_AP_COUNT \ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT) @@ -118,21 +123,14 @@ /* Check things that can be validated at build time. */ static void ipa_table_validate_build(void) { - /* IPA hardware accesses memory 128 bytes at a time. Addresses - * referred to by entries in filter and route tables must be - * aligned on 128-byte byte boundaries. The only rule address - * ever use is the "zero rule", and it's aligned at the base - * of a coherent DMA allocation. - */ - BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN); - - /* Filter and route tables contain DMA addresses that refer to - * filter or route rules. We use a fixed constant to represent - * the size of either type of table entry. Code in ipa_table_init() - * uses a pointer to __le64 to initialize table entriews. + /* Filter and route tables contain DMA addresses that refer + * to filter or route rules. But the size of a table entry + * is 64 bits regardless of what the size of an AP DMA address + * is. A fixed constant defines the size of an entry, and + * code in ipa_table_init() uses a pointer to __le64 to + * initialize tables. */ - BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t)); - BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64)); + BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64)); /* A "zero rule" is used to represent no filtering or no routing. * It is a 64-bit block of zeroed memory. Code in ipa_table_init() @@ -163,7 +161,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) else mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED] : &ipa->mem[IPA_MEM_V4_ROUTE]; - size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE; + size = IPA_ROUTE_COUNT_MAX * sizeof(__le64); } else { if (ipv6) mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED] @@ -171,7 +169,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) else mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED] : &ipa->mem[IPA_MEM_V4_FILTER]; - size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE; + size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64); } if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed)) @@ -270,8 +268,8 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, if (filter) first++; /* skip over bitmap */ - offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE; - size = count * IPA_TABLE_ENTRY_SIZE; + offset = mem->offset + first * sizeof(__le64); + size = count * sizeof(__le64); addr = ipa_table_addr(ipa, false, count); ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true); @@ -455,11 +453,11 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, count = 1 + hweight32(ipa->filter_map); hash_count = hash_mem->size ? count : 0; } else { - count = mem->size / IPA_TABLE_ENTRY_SIZE; - hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE; + count = mem->size / sizeof(__le64); + hash_count = hash_mem->size / sizeof(__le64); } - size = count * IPA_TABLE_ENTRY_SIZE; - hash_size = hash_count * IPA_TABLE_ENTRY_SIZE; + size = count * sizeof(__le64); + hash_size = hash_count * sizeof(__le64); addr = ipa_table_addr(ipa, filter, count); hash_addr = ipa_table_addr(ipa, filter, hash_count); @@ -662,7 +660,13 @@ int ipa_table_init(struct ipa *ipa) ipa_table_validate_build(); - size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; + /* The IPA hardware requires route and filter table rules to be + * aligned on a 128-byte boundary. We put the "zero rule" at the + * base of the table area allocated here. The DMA address returned + * by dma_alloc_coherent() is guaranteed to be a power-of-2 number + * of pages, which satisfies the rule alignment requirement. + */ + size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -694,7 +698,7 @@ void ipa_table_exit(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; size_t size; - size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; + size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); ipa->table_addr = 0; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 78038d14fcea9..35e519cef25da 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -10,12 +10,12 @@ struct ipa; -/* The size of a filter or route table entry */ -#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */ - /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */ #define IPA_FILTER_COUNT_MAX 14 +/* The number of route table entries allotted to the modem */ +#define IPA_ROUTE_MODEM_COUNT 8 + /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */ #define IPA_ROUTE_COUNT_MAX 15 diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8801d093135c3..a33149ee0ddcf 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -496,7 +496,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) static int ipvlan_process_outbound(struct sk_buff *skb) { - struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; /* The ipvlan is a pseudo-L2 device, so the packets that we receive @@ -506,6 +505,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb) if (skb_mac_header_was_set(skb)) { /* In this mode we dont care about * multicast and broadcast traffic */ + struct ethhdr *ethh = eth_hdr(skb); + if (is_multicast_ether_addr(ethh->h_dest)) { pr_debug_ratelimited( "Dropped {multi|broad}cast of type=[%x]\n", @@ -590,7 +591,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) { const struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ethhdr *eth = eth_hdr(skb); + struct ethhdr *eth = skb_eth_hdr(skb); struct ipvl_addr *addr; void *lyr3h; int addr_type; @@ -620,6 +621,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) return dev_forward_skb(ipvlan->phy_dev, skb); } else if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb, true); return NET_XMIT_SUCCESS; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index a1c77cc004165..498e5c8013efb 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -208,7 +208,7 @@ static __net_init int loopback_net_init(struct net *net) int err; err = -ENOMEM; - dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup); + dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup); if (!dev) goto out; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 70c5905a916b9..4fdb970e34823 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1390,7 +1390,8 @@ static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) return NULL; } -static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) +static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, + bool active) { struct macsec_rx_sc *rx_sc; struct macsec_dev *macsec; @@ -1414,7 +1415,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) } rx_sc->sci = sci; - rx_sc->active = true; + rx_sc->active = active; refcount_set(&rx_sc->refcnt, 1); secy = &macsec_priv(dev)->secy; @@ -1823,6 +1824,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) secy->key_len); err = macsec_offload(ops->mdo_add_rxsa, &ctx); + memzero_explicit(ctx.sa.key, secy->key_len); if (err) goto cleanup; } @@ -1867,7 +1869,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) struct macsec_rx_sc *rx_sc; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct macsec_secy *secy; - bool was_active; + bool active = true; int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) @@ -1889,16 +1891,15 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) secy = &macsec_priv(dev)->secy; sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); - rx_sc = create_rx_sc(dev, sci); + if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) + active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); + + rx_sc = create_rx_sc(dev, sci, active); if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } - was_active = rx_sc->active; - if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) - rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); - if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; @@ -1922,7 +1923,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) return 0; cleanup: - rx_sc->active = was_active; + del_rx_sc(secy, sci); + free_rx_sc(rx_sc); rtnl_unlock(); return ret; } @@ -2065,6 +2067,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) secy->key_len); err = macsec_offload(ops->mdo_add_txsa, &ctx); + memzero_explicit(ctx.sa.key, secy->key_len); if (err) goto cleanup; } @@ -2561,7 +2564,7 @@ static bool macsec_is_configured(struct macsec_dev *macsec) struct macsec_tx_sc *tx_sc = &secy->tx_sc; int i; - if (secy->n_rx_sc > 0) + if (secy->rx_sc) return true; for (i = 0; i < MACSEC_NUM_AN; i++) @@ -2581,7 +2584,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) const struct macsec_ops *ops; struct macsec_context ctx; struct macsec_dev *macsec; - int ret; + int ret = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2594,28 +2597,36 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) macsec_genl_offload_policy, NULL)) return -EINVAL; + rtnl_lock(); + dev = get_dev_from_nl(genl_info_net(info), attrs); - if (IS_ERR(dev)) - return PTR_ERR(dev); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } macsec = macsec_priv(dev); - if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) - return -EINVAL; + if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { + ret = -EINVAL; + goto out; + } offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); if (macsec->offload == offload) - return 0; + goto out; /* Check if the offloading mode is supported by the underlying layers */ if (offload != MACSEC_OFFLOAD_OFF && - !macsec_check_offload(offload, macsec)) - return -EOPNOTSUPP; + !macsec_check_offload(offload, macsec)) { + ret = -EOPNOTSUPP; + goto out; + } /* Check if the net device is busy. */ - if (netif_running(dev)) - return -EBUSY; - - rtnl_lock(); + if (netif_running(dev)) { + ret = -EBUSY; + goto out; + } prev_offload = macsec->offload; macsec->offload = offload; @@ -2645,17 +2656,12 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) if (ret) goto rollback; - /* Force features update, since they are different for SW MACSec and - * HW offloading cases. - */ - netdev_update_features(dev); - rtnl_unlock(); return 0; rollback: macsec->offload = prev_offload; - +out: rtnl_unlock(); return ret; } @@ -3417,16 +3423,9 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, return ret; } -#define SW_MACSEC_FEATURES \ +#define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) -/* If h/w offloading is enabled, use real device features save for - * VLAN_FEATURES - they require additional ops - * HW_MACSEC - no reason to report it - */ -#define REAL_DEV_FEATURES(dev) \ - ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC)) - static int macsec_dev_init(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); @@ -3443,12 +3442,8 @@ static int macsec_dev_init(struct net_device *dev) return err; } - if (macsec_is_offloaded(macsec)) { - dev->features = REAL_DEV_FEATURES(real_dev); - } else { - dev->features = real_dev->features & SW_MACSEC_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; - } + dev->features = real_dev->features & MACSEC_FEATURES; + dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; dev->needed_headroom = real_dev->needed_headroom + MACSEC_NEEDED_HEADROOM; @@ -3477,10 +3472,7 @@ static netdev_features_t macsec_fix_features(struct net_device *dev, struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; - if (macsec_is_offloaded(macsec)) - return REAL_DEV_FEATURES(real_dev); - - features &= (real_dev->features & SW_MACSEC_FEATURES) | + features &= (real_dev->features & MACSEC_FEATURES) | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; features |= NETIF_F_LLTX; @@ -3696,6 +3688,7 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, + [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, }; static void macsec_free_netdev(struct net_device *dev) @@ -3829,7 +3822,6 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; - int ret; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index c8d803d3616c9..5869bc2c3aa79 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -139,7 +139,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source( u32 idx = macvlan_eth_hash(addr); struct hlist_head *h = &vlan->port->vlan_source_hash[idx]; - hlist_for_each_entry_rcu(entry, h, hlist) { + hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (ether_addr_equal_64bits(entry->addr, addr) && entry->vlan == vlan) return entry; @@ -1176,7 +1176,7 @@ void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); - dev->min_mtu = 0; + /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */ dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); @@ -1509,8 +1509,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ - if (create && macvlan_port_get_rtnl(lowerdev)) + if (create && macvlan_port_get_rtnl(lowerdev)) { + macvlan_flush_sources(port, vlan); macvlan_port_destroy(port->dev); + } return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); @@ -1612,7 +1614,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb, struct hlist_head *h = &vlan->port->vlan_source_hash[i]; struct macvlan_source_entry *entry; - hlist_for_each_entry_rcu(entry, h, hlist) { + hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (entry->vlan != vlan) continue; if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr)) diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c index bf86c9c7a2883..ab863530c9e89 100644 --- a/drivers/net/mdio/mdio-mux-meson-g12a.c +++ b/drivers/net/mdio/mdio-mux-meson-g12a.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = { static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) { + u32 value; int ret; /* Enable the phy clock */ @@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) /* Initialize ephy control */ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0); - writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | - FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | - FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | - PHY_CNTL1_CLK_EN | - PHY_CNTL1_CLKFREQ | - PHY_CNTL1_PHY_ENB, - priv->regs + ETH_PHY_CNTL1); + + /* Make sure we get a 0 -> 1 transition on the enable bit */ + value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | + FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | + FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | + PHY_CNTL1_CLK_EN | + PHY_CNTL1_CLKFREQ; + writel(value, priv->regs + ETH_PHY_CNTL1); writel(PHY_CNTL2_USE_INTERNAL | PHY_CNTL2_SMI_SRC_MAC | PHY_CNTL2_RX_CLK_EPHY, priv->regs + ETH_PHY_CNTL2); + value |= PHY_CNTL1_PHY_ENB; + writel(value, priv->regs + ETH_PHY_CNTL1); + + /* The phy needs a bit of time to power up */ + mdelay(10); + return 0; } diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index ea0bf13e8ac3f..5bae47f3da405 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -332,6 +332,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) return 0; unregister: + of_node_put(child); mdiobus_unregister(mdio); return rc; } diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index a5bab614ff845..b701ee83e64a8 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -137,7 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, enqueue_again: rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); ndev->stats.rx_errors++; ndev->stats.rx_fifo_errors++; } @@ -192,7 +192,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, ndev->stats.tx_aborted_errors++; } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { /* Make sure anybody stopping the queue after this sees the new @@ -484,7 +484,14 @@ static int __init ntb_netdev_init_module(void) rc = ntb_transport_register_client_dev(KBUILD_MODNAME); if (rc) return rc; - return ntb_transport_register_client(&ntb_netdev_client); + + rc = ntb_transport_register_client(&ntb_netdev_client); + if (rc) { + ntb_transport_unregister_client_dev(KBUILD_MODNAME); + return rc; + } + + return 0; } module_init(ntb_netdev_init_module); diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 75a62d1cc7375..7045595f8d7d1 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -89,6 +89,9 @@ #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8) #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0) +#define VEND1_GLOBAL_GEN_STAT2 0xc831 +#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15) + #define VEND1_GLOBAL_RSVD_STAT1 0xc885 #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4) #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0) @@ -123,6 +126,12 @@ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1) #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0) +/* Sleep and timeout for checking if the Processor-Intensive + * MDIO operation is finished + */ +#define AQR107_OP_IN_PROG_SLEEP 1000 +#define AQR107_OP_IN_PROG_TIMEOUT 100000 + struct aqr107_hw_stat { const char *name; int reg; @@ -569,16 +578,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev) phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n"); } +static int aqr107_wait_processor_intensive_op(struct phy_device *phydev) +{ + int val, err; + + /* The datasheet notes to wait at least 1ms after issuing a + * processor intensive operation before checking. + * We cannot use the 'sleep_before_read' parameter of read_poll_timeout + * because that just determines the maximum time slept, not the minimum. + */ + usleep_range(1000, 5000); + + err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_GEN_STAT2, val, + !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG), + AQR107_OP_IN_PROG_SLEEP, + AQR107_OP_IN_PROG_TIMEOUT, false); + if (err) { + phydev_err(phydev, "timeout: processor-intensive MDIO operation\n"); + return err; + } + + return 0; +} + static int aqr107_suspend(struct phy_device *phydev) { - return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, - MDIO_CTRL1_LPOWER); + int err; + + err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); + if (err) + return err; + + return aqr107_wait_processor_intensive_op(phydev); } static int aqr107_resume(struct phy_device *phydev) { - return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, - MDIO_CTRL1_LPOWER); + int err; + + err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); + if (err) + return err; + + return aqr107_wait_processor_intensive_op(phydev); } static int aqr107_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 3a8849716459a..81412999445d8 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -247,7 +247,8 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_ENERGY_DET_INT_EN | DP83822_LINK_QUAL_INT_EN); - if (!dp83822->fx_enabled) + /* Private data pointer is NULL on DP83825/26 */ + if (!dp83822 || !dp83822->fx_enabled) misr_status |= DP83822_ANEG_COMPLETE_INT_EN | DP83822_DUP_MODE_CHANGE_INT_EN | DP83822_SPEED_CHANGED_INT_EN; @@ -267,9 +268,9 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_PAGE_RX_INT_EN | DP83822_EEE_ERROR_CHANGE_INT_EN); - if (!dp83822->fx_enabled) - misr_status |= DP83822_MDI_XOVER_INT_EN | - DP83822_ANEG_ERR_INT_EN | + /* Private data pointer is NULL on DP83825/26 */ + if (!dp83822 || !dp83822->fx_enabled) + misr_status |= DP83822_ANEG_ERR_INT_EN | DP83822_WOL_PKT_INT_EN; err = phy_write(phydev, MII_DP83822_MISR2, misr_status); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index f86acad0aad44..c8031e297faf4 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -757,6 +757,14 @@ static int dp83867_config_init(struct phy_device *phydev) else val &= ~DP83867_SGMII_TYPE; phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val); + + /* This is a SW workaround for link instability if RX_CTRL is + * not strapped to mode 3 or 4 in HW. This is required for SGMII + * in addition to clearing bit 7, handled above. + */ + if (dp83867->rxctrl_strap_quirk) + phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + BIT(8)); } val = phy_read(phydev, DP83867_CFG3); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index c1cbdac4b376f..e9303be486556 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -108,7 +108,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device); struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) { - struct mdio_device *mdiodev = bus->mdio_map[addr]; + struct mdio_device *mdiodev; + + if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map)) + return NULL; + + mdiodev = bus->mdio_map[addr]; if (!mdiodev) return NULL; @@ -574,7 +579,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) } for (i = 0; i < PHY_MAX_ADDR; i++) { - if ((bus->phy_mask & (1 << i)) == 0) { + if ((bus->phy_mask & BIT(i)) == 0) { struct phy_device *phydev; phydev = mdiobus_scan(bus, i); diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index e8f2ca625837f..39151ec6f65e2 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -235,6 +235,8 @@ static struct phy_driver meson_gxl_phy[] = { .config_intr = meson_gxl_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_mmd = genphy_read_mmd_unsupported, + .write_mmd = genphy_write_mmd_unsupported, }, { PHY_ID_MATCH_EXACT(0x01803301), .name = "Meson G12A Internal PHY", @@ -245,6 +247,8 @@ static struct phy_driver meson_gxl_phy[] = { .config_intr = meson_gxl_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_mmd = genphy_read_mmd_unsupported, + .write_mmd = genphy_write_mmd_unsupported, }, }; diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index b7b2521c73fb6..c00eef457b850 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -632,6 +632,7 @@ static void vsc8584_macsec_free_flow(struct vsc8531_private *priv, list_del(&flow->list); clear_bit(flow->index, bitmap); + memzero_explicit(flow->key, sizeof(flow->key)); kfree(flow); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index d2f6d8107595a..3ef5aa6b72a7e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1423,6 +1423,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, error_module_put: module_put(d->driver->owner); + d->driver = NULL; error_put_device: put_device(d); if (ndev_owner != bus->owner) diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 151c2a3f0b3a3..7a78dfdfa5bdd 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -82,6 +82,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) if (!priv->phy_dev->drv) { dev_info(dev, "Attached phy not ready\n"); + put_device(&priv->phy_dev->mdio.dev); return -EPROBE_DEFER; } diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 5a0e5a8a8917b..22f7db87ed21a 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -444,12 +444,12 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, } rcv->state = PLIP_PK_DONE; if (rcv->skb) { - kfree_skb(rcv->skb); + dev_kfree_skb_irq(rcv->skb); rcv->skb = NULL; } snd->state = PLIP_PK_DONE; if (snd->skb) { - dev_kfree_skb(snd->skb); + dev_consume_skb_irq(snd->skb); snd->skb = NULL; } spin_unlock_irq(&nl->lock); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 2b9815ec4a622..b825c6a9b6dde 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1610,6 +1610,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) int len; unsigned char *cp; + skb->dev = ppp->dev; + if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER /* check if we should pass this packet */ diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 615f3776b4bee..7117d559a32e4 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1270,10 +1270,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev, } } - netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); + if (dev->flags & IFF_UP) { + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + } port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); @@ -1344,8 +1346,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev) netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); + if (dev->flags & IFF_UP) { + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); + } dev_close(port_dev); team_port_leave(team, port); @@ -1695,6 +1699,14 @@ static int team_open(struct net_device *dev) static int team_close(struct net_device *dev) { + struct team *team = netdev_priv(dev); + struct team_port *port; + + list_for_each_entry(port, &team->port_list, list) { + dev_uc_unsync(port->dev, dev); + dev_mc_unsync(port->dev, dev); + } + return 0; } diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 3160443ef3b9e..5d96dc1b00b36 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1343,12 +1343,21 @@ static int __init tbnet_init(void) TBNET_MATCH_FRAGS_ID); ret = tb_register_property_dir("network", tbnet_dir); - if (ret) { - tb_property_free_dir(tbnet_dir); - return ret; - } + if (ret) + goto err_free_dir; + + ret = tb_register_service_driver(&tbnet_driver); + if (ret) + goto err_unregister; - return tb_register_service_driver(&tbnet_driver); + return 0; + +err_unregister: + tb_unregister_property_dir("network", tbnet_dir); +err_free_dir: + tb_property_free_dir(tbnet_dir); + + return ret; } module_init(tbnet_init); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a643b2f2f4de9..67ce7b779af61 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -698,7 +698,6 @@ static void __tun_detach(struct tun_file *tfile, bool clean) if (tun) xdp_rxq_info_unreg(&tfile->xdp_rxq); ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); - sock_put(&tfile->sk); } } @@ -714,6 +713,9 @@ static void tun_detach(struct tun_file *tfile, bool clean) if (dev) netdev_state_change(dev); rtnl_unlock(); + + if (clean) + sock_put(&tfile->sk); } static void tun_detach_all(struct net_device *dev) @@ -1475,7 +1477,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, int err; int i; - if (it->nr_segs > MAX_SKB_FRAGS + 1) + if (it->nr_segs > MAX_SKB_FRAGS + 1 || + len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) return ERR_PTR(-EMSGSIZE); local_bh_disable(); @@ -1985,17 +1988,25 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { + WARN_ON_ONCE(1); + err = -ENOMEM; this_cpu_inc(tun->pcpu_stats->rx_dropped); +napi_busy: napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); - WARN_ON(1); - return -ENOMEM; + return err; } - local_bh_disable(); - napi_gro_frags(&tfile->napi); - local_bh_enable(); + if (likely(napi_schedule_prep(&tfile->napi))) { + local_bh_disable(); + napi_gro_frags(&tfile->napi); + napi_complete(&tfile->napi); + local_bh_enable(); + } else { + err = -EBUSY; + goto napi_busy; + } mutex_unlock(&tfile->napi_mutex); } else if (tfile->napi_enabled) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 43ddbe61dc58e..935cd296887f2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -763,6 +763,13 @@ static const struct usb_device_id products[] = { }, #endif +/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* ThinkPad USB-C Dock (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index fc5895f85cee2..a552bb1665b8a 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT); if (status != 0) { netdev_err(dev->net, - "Error sending init packet. Status %i, length %i\n", - status, act_len); + "Error sending init packet. Status %i\n", + status); return status; } else if (act_len != init_msg_len) { @@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, if (status != 0) netdev_err(dev->net, - "Error receiving init result. Status %i, length %i\n", - status, act_len); + "Error receiving init result. Status %i\n", + status); else if (act_len != expected_len) netdev_err(dev->net, "Unexpected init result length: %i\n", act_len); diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 17c9c63b8eebb..ce7862dac2b75 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c @@ -57,9 +57,7 @@ static inline int pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index) { - return usbnet_read_cmd(dev, req, - USB_DIR_IN | USB_TYPE_VENDOR | - USB_RECIP_DEVICE, + return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, index, NULL, 0); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 48e8b94e4a7c5..bce151e3706a0 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1024,6 +1024,7 @@ static const struct usb_device_id products[] = { {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1291,6 +1292,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ @@ -1331,6 +1333,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ @@ -1349,6 +1352,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ + {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0bb5b1c786546..f9a79d67d6d4f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1689,7 +1689,9 @@ static void intr_callback(struct urb *urb) "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: - netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); + if (net_ratelimit()) + netif_info(tp, intr, tp->netdev, + "intr status -EOVERFLOW\n"); goto resubmit; /* -EPIPE: should clear the halt */ default: @@ -6868,6 +6870,7 @@ static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)}, diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 1505fe3f87ed3..1ff723e15d523 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -255,7 +255,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, off = le32_to_cpu(u.get_c->offset); len = le32_to_cpu(u.get_c->len); - if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE)) + if (unlikely((off > CONTROL_BUFFER_SIZE - 8) || + (len > CONTROL_BUFFER_SIZE - 8 - off))) goto response_error; if (*reply_len != -1 && len != *reply_len) diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 65d42f5d42a3c..e1cd4c2de2d30 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -61,6 +61,7 @@ struct smsc95xx_priv { u8 suspend_flags; struct mii_bus *mdiobus; struct phy_device *phydev; + struct task_struct *pm_task; }; static bool turbo_mode = true; @@ -70,13 +71,14 @@ MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data, int in_pm) { + struct smsc95xx_priv *pdata = dev->driver_priv; u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16); BUG_ON(!dev); - if (!in_pm) + if (current != pdata->pm_task) fn = usbnet_read_cmd; else fn = usbnet_read_cmd_nopm; @@ -100,13 +102,14 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data, int in_pm) { + struct smsc95xx_priv *pdata = dev->driver_priv; u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16); BUG_ON(!dev); - if (!in_pm) + if (current != pdata->pm_task) fn = usbnet_write_cmd; else fn = usbnet_write_cmd_nopm; @@ -1468,9 +1471,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) u32 val, link_up; int ret; + pdata->pm_task = current; + ret = usbnet_suspend(intf, message); if (ret < 0) { netdev_warn(dev->net, "usbnet_suspend error\n"); + pdata->pm_task = NULL; return ret; } @@ -1717,6 +1723,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) if (ret && PMSG_IS_AUTO(message)) usbnet_resume(intf); + pdata->pm_task = NULL; return ret; } @@ -1737,29 +1744,31 @@ static int smsc95xx_resume(struct usb_interface *intf) /* do this first to ensure it's cleared even in error case */ pdata->suspend_flags = 0; + pdata->pm_task = current; + if (suspend_flags & SUSPEND_ALLMODES) { /* clear wake-up sources */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) - return ret; + goto done; val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_); ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) - return ret; + goto done; /* clear wake-up status */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); if (ret < 0) - return ret; + goto done; val &= ~PM_CTL_WOL_EN_; val |= PM_CTL_WUPS_; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); if (ret < 0) - return ret; + goto done; } ret = usbnet_resume(intf); @@ -1767,15 +1776,21 @@ static int smsc95xx_resume(struct usb_interface *intf) netdev_warn(dev->net, "usbnet_resume error\n"); phy_init_hw(pdata->phydev); + +done: + pdata->pm_task = NULL; return ret; } static int smsc95xx_reset_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); + struct smsc95xx_priv *pdata = dev->driver_priv; int ret; + pdata->pm_task = current; ret = smsc95xx_reset(dev); + pdata->pm_task = NULL; if (ret < 0) return ret; diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index fce6713e970ba..811c8751308c6 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN || len > skb->len) + if (len > ETH_FRAME_LEN || len > skb->len || len < 0) return 0; /* the last packet of current skb */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 1239fd57514bb..43d70348343b2 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1567,6 +1567,7 @@ void usbnet_disconnect (struct usb_interface *intf) struct usbnet *dev; struct usb_device *xdev; struct net_device *net; + struct urb *urb; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); @@ -1583,7 +1584,11 @@ void usbnet_disconnect (struct usb_interface *intf) net = dev->net; unregister_netdev (net); - usb_scuttle_anchored_urbs(&dev->deferred); + while ((urb = usb_get_from_anchor(&dev->deferred))) { + dev_kfree_skb(urb->context); + kfree(urb->sg); + usb_free_urb(urb); + } if (dev->driver_info->unbind) dev->driver_info->unbind (dev, intf); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 5be8ed9105535..5aa23a036ed36 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -849,6 +849,9 @@ static int veth_poll(struct napi_struct *napi, int budget) xdp_set_return_frame_no_direct(); done = veth_xdp_rcv(rq, budget, &bq, &stats); + if (stats.xdp_redirect > 0) + xdp_do_flush(); + if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); @@ -862,8 +865,6 @@ static int veth_poll(struct napi_struct *napi, int budget) if (stats.xdp_tx > 0) veth_xdp_flush(rq, &bq); - if (stats.xdp_redirect > 0) - xdp_do_flush(); xdp_clear_return_frame_no_direct(); return done; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c942cd6a2c65e..d533211161366 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1525,13 +1525,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) received = virtnet_receive(rq, budget, &xdp_xmit); + if (xdp_xmit & VIRTIO_XDP_REDIR) + xdp_do_flush(); + /* Out of packets? */ if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); - if (xdp_xmit & VIRTIO_XDP_REDIR) - xdp_do_flush(); - if (xdp_xmit & VIRTIO_XDP_TX) { sq = virtnet_xdp_get_sq(vi); if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { @@ -1928,8 +1928,8 @@ static int virtnet_close(struct net_device *dev) cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) { - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); napi_disable(&vi->rq[i].napi); + xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); virtnet_napi_tx_disable(&vi->sq[i].napi); } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 6678a734cc4d3..3b889fed98826 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1236,6 +1236,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, (le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { skb->ip_summed = CHECKSUM_UNNECESSARY; + if ((le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) { + skb->csum_level = 1; + } WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && !(le32_to_cpu(gdesc->dword[0]) & (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); @@ -1245,6 +1249,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & (1 << VMXNET3_RCD_TUC_SHIFT))) { skb->ip_summed = CHECKSUM_UNNECESSARY; + if ((le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) { + skb->csum_level = 1; + } WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && !(le32_to_cpu(gdesc->dword[0]) & (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); @@ -1356,6 +1364,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, }; u32 num_pkts = 0; bool skip_page_frags = false; + bool encap_lro = false; struct Vmxnet3_RxCompDesc *rcd; struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; u16 segCnt = 0, mss = 0; @@ -1496,13 +1505,18 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, if (VMXNET3_VERSION_GE_2(adapter) && rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { struct Vmxnet3_RxCompDescExt *rcdlro; + union Vmxnet3_GenericDesc *gdesc; + rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; + gdesc = (union Vmxnet3_GenericDesc *)rcd; segCnt = rcdlro->segCnt; WARN_ON_ONCE(segCnt == 0); mss = rcdlro->mss; if (unlikely(segCnt <= 1)) segCnt = 0; + encap_lro = (le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)); } else { segCnt = 0; } @@ -1570,7 +1584,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, vmxnet3_rx_csum(adapter, skb, (union Vmxnet3_GenericDesc *)rcd); skb->protocol = eth_type_trans(skb, adapter->netdev); - if (!rcd->tcp || + if ((!rcd->tcp && !encap_lro) || !(adapter->netdev->features & NETIF_F_LRO)) goto not_lro; @@ -1579,7 +1593,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, SKB_GSO_TCPV4 : SKB_GSO_TCPV6; skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_segs = segCnt; - } else if (segCnt != 0 || skb->len > mtu) { + } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { u32 hlen; hlen = vmxnet3_get_hdr_len(adapter, skb, @@ -1608,6 +1622,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, napi_gro_receive(&rq->napi, skb); ctx->skb = NULL; + encap_lro = false; num_pkts++; } diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index b50cf11d197d5..36a6958b6a0b2 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2612,6 +2612,7 @@ fst_remove_one(struct pci_dev *pdev) for (i = 0; i < card->nports; i++) { struct net_device *dev = port_to_dev(&card->ports[i]); unregister_hdlc_device(dev); + free_netdev(dev); } fst_disable_intr(card); @@ -2632,6 +2633,7 @@ fst_remove_one(struct pci_dev *pdev) card->tx_dma_handle_card); } fst_card_array[card->card_no] = NULL; + kfree(card); } static struct pci_driver fst_driver = { diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 7eac6a3e1cdee..ae1ae65e7f90a 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1245,9 +1245,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev) free_dev: free_netdev(dev); undo_uhdlc_init: - iounmap(utdm->siram); + if (utdm) + iounmap(utdm->siram); unmap_si_regs: - iounmap(utdm->si_regs); + if (utdm) + iounmap(utdm->si_regs); free_utdm: if (uhdlc_priv->tsa) kfree(utdm); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index f6562a343cb4e..b965eb6a4bb17 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -403,7 +403,7 @@ static int lapbeth_device_event(struct notifier_block *this, if (dev_net(dev) != &init_net) return NOTIFY_DONE; - if (!dev_is_ethdev(dev)) + if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev)) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index d0f3b6d7f4089..5c804bcabfe6b 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs) if (attrs[WGPEER_A_ENDPOINT]) { struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); + struct endpoint endpoint = { { { 0 } } }; - if ((len == sizeof(struct sockaddr_in) && - addr->sa_family == AF_INET) || - (len == sizeof(struct sockaddr_in6) && - addr->sa_family == AF_INET6)) { - struct endpoint endpoint = { { { 0 } } }; - - memcpy(&endpoint.addr, addr, len); + if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) { + endpoint.addr4 = *(struct sockaddr_in *)addr; + wg_socket_set_peer_endpoint(peer, &endpoint); + } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) { + endpoint.addr6 = *(struct sockaddr_in6 *)addr; wg_socket_set_peer_endpoint(peer, &endpoint); } } diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c index ba87d294604fe..d4bb40a695ab6 100644 --- a/drivers/net/wireguard/selftest/ratelimiter.c +++ b/drivers/net/wireguard/selftest/ratelimiter.c @@ -6,29 +6,28 @@ #ifdef DEBUG #include -#include static const struct { bool result; - u64 nsec_to_sleep_before; + unsigned int msec_to_sleep_before; } expected_results[] __initconst = { [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, [PACKETS_BURSTABLE] = { false, 0 }, - [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND }, + [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, [PACKETS_BURSTABLE + 2] = { false, 0 }, - [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, + [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, [PACKETS_BURSTABLE + 4] = { true, 0 }, [PACKETS_BURSTABLE + 5] = { false, 0 } }; static __init unsigned int maximum_jiffies_at_index(int index) { - u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3; + unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; int i; for (i = 0; i <= index; ++i) - total_nsecs += expected_results[i].nsec_to_sleep_before; - return nsecs_to_jiffies(total_nsecs); + total_msecs += expected_results[i].msec_to_sleep_before; + return msecs_to_jiffies(total_msecs); } static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, @@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, loop_start_time = jiffies; for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { - if (expected_results[i].nsec_to_sleep_before) { - ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3), - ns_to_ktime(expected_results[i].nsec_to_sleep_before)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME); - } + if (expected_results[i].msec_to_sleep_before) + msleep(expected_results[i].msec_to_sleep_before); if (time_is_before_jiffies(loop_start_time + maximum_jiffies_at_index(i))) @@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void) if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) return true; - BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0); + BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0); if (wg_ratelimiter_init()) goto out; @@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void) ++test; #endif - for (trials = TRIALS_BEFORE_GIVING_UP;;) { + for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) { int test_count = 0, ret; ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 1baec4b412c8d..efe38b2c1df73 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb) } } +static void ar5523_cancel_tx_cmd(struct ar5523 *ar) +{ + usb_kill_urb(ar->tx_cmd.urb_tx); +} + static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, int ilen, void *odata, int olen, int flags) { @@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, } if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) { + ar5523_cancel_tx_cmd(ar); cmd->odata = NULL; ar5523_err(ar, "timeout waiting for command %02x reply\n", code); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b61cd275fbda2..15f02bf23e9bd 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -853,11 +853,36 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) return 0; } +static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer) +{ + int peer_id, i; + + lockdep_assert_held(&ar->conf_mutex); + + for_each_set_bit(peer_id, peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS) { + ar->peer_map[peer_id] = NULL; + } + + /* Double check that peer is properly un-referenced from + * the peer_map + */ + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + if (ar->peer_map[i] == peer) { + ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", + peer->addr, peer, i); + ar->peer_map[i] = NULL; + } + } + + list_del(&peer->list); + kfree(peer); + ar->num_peers--; +} + static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) { struct ath10k_peer *peer, *tmp; - int peer_id; - int i; lockdep_assert_held(&ar->conf_mutex); @@ -869,25 +894,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", peer->addr, vdev_id); - for_each_set_bit(peer_id, peer->peer_ids, - ATH10K_MAX_NUM_PEER_IDS) { - ar->peer_map[peer_id] = NULL; - } - - /* Double check that peer is properly un-referenced from - * the peer_map - */ - for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { - if (ar->peer_map[i] == peer) { - ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", - peer->addr, peer, i); - ar->peer_map[i] = NULL; - } - } - - list_del(&peer->list); - kfree(peer); - ar->num_peers--; + ath10k_peer_map_cleanup(ar, peer); } spin_unlock_bh(&ar->data_lock); } @@ -7470,10 +7477,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* Clean up the peer object as well since we * must have failed to do this above. */ - list_del(&peer->list); - ar->peer_map[i] = NULL; - kfree(peer); - ar->num_peers--; + ath10k_peer_map_cleanup(ar, peer); } } spin_unlock_bh(&ar->data_lock); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 86f52bcb3e4db..67e240327fb31 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3799,18 +3799,22 @@ static struct pci_driver ath10k_pci_driver = { static int __init ath10k_pci_init(void) { - int ret; + int ret1, ret2; - ret = pci_register_driver(&ath10k_pci_driver); - if (ret) + ret1 = pci_register_driver(&ath10k_pci_driver); + if (ret1) printk(KERN_ERR "failed to register ath10k pci driver: %d\n", - ret); + ret1); - ret = ath10k_ahb_init(); - if (ret) - printk(KERN_ERR "ahb init failed: %d\n", ret); + ret2 = ath10k_ahb_init(); + if (ret2) + printk(KERN_ERR "ahb init failed: %d\n", ret2); - return ret; + if (ret1 && ret2) + return ret1; + + /* registered to at least one bus */ + return 0; } module_init(ath10k_pci_init); diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 44282aec069d3..67faf62999ded 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -3419,6 +3419,8 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + if (nsts > (ar->num_rx_chains - 1)) + nsts = ar->num_rx_chains - 1; value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); } @@ -3459,7 +3461,7 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) { bool subfer, subfee; - int sound_dim = 0; + int sound_dim = 0, nsts = 0; subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)); subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); @@ -3469,6 +3471,11 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) subfer = false; } + if (ar->num_rx_chains < 2) { + *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); + subfee = false; + } + /* If SU Beaformer is not set, then disable MU Beamformer Capability */ if (!subfer) *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); @@ -3481,7 +3488,9 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; - /* TODO: Need to check invalid STS and Sound_dim values set by FW? */ + nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); + nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; /* Enable Sounding Dimension Field only if SU BF is enabled */ if (subfer) { @@ -3493,9 +3502,15 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) *vht_cap |= sound_dim; } - /* Use the STS advertised by FW unless SU Beamformee is not supported*/ - if (!subfee) - *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); + /* Enable Beamformee STS Field only if SU BF is enabled */ + if (subfee) { + if (nsts > (ar->num_rx_chains - 1)) + nsts = ar->num_rx_chains - 1; + + nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; + *vht_cap |= nsts; + } } static struct ieee80211_sta_vht_cap diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index f06eec99de688..f938ac1a4abd4 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -709,14 +709,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) struct rx_buf *rx_buf = (struct rx_buf *)urb->context; struct hif_device_usb *hif_dev = rx_buf->hif_dev; struct sk_buff *skb = rx_buf->skb; - struct sk_buff *nskb; int ret; if (!skb) return; if (!hif_dev) - goto free; + goto free_skb; switch (urb->status) { case 0: @@ -725,7 +724,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: - goto free; + goto free_skb; default: skb_reset_tail_pointer(skb); skb_trim(skb, 0); @@ -736,25 +735,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) if (likely(urb->actual_length != 0)) { skb_put(skb, urb->actual_length); - /* Process the command first */ + /* + * Process the command first. + * skb is either freed here or passed to be + * managed to another callback function. + */ ath9k_htc_rx_msg(hif_dev->htc_handle, skb, skb->len, USB_REG_IN_PIPE); - - nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); - if (!nskb) { + skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); + if (!skb) { dev_err(&hif_dev->udev->dev, "ath9k_htc: REG_IN memory allocation failure\n"); - urb->context = NULL; - return; + goto free_rx_buf; } - rx_buf->skb = nskb; + rx_buf->skb = skb; usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), - nskb->data, MAX_REG_IN_BUF_SIZE, + skb->data, MAX_REG_IN_BUF_SIZE, ath9k_hif_usb_reg_in_cb, rx_buf, 1); } @@ -763,12 +764,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { usb_unanchor_urb(urb); - goto free; + goto free_skb; } return; -free: +free_skb: kfree_skb(skb); +free_rx_buf: kfree(rx_buf); urb->context = NULL; } @@ -781,14 +783,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_buf, list) { - usb_get_urb(tx_buf->urb); - spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); - usb_kill_urb(tx_buf->urb); list_del(&tx_buf->list); usb_free_urb(tx_buf->urb); kfree(tx_buf->buf); kfree(tx_buf); - spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); @@ -1330,10 +1328,24 @@ static int send_eject_command(struct usb_interface *interface) static int ath9k_hif_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { + struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out; struct usb_device *udev = interface_to_usbdev(interface); + struct usb_host_interface *alt; struct hif_device_usb *hif_dev; int ret = 0; + /* Verify the expected endpoints are present */ + alt = interface->cur_altsetting; + if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 || + usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE || + usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE || + usb_endpoint_num(int_in) != USB_REG_IN_PIPE || + usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) { + dev_err(&udev->dev, + "ath9k_htc: Device endpoint numbers are not the expected ones\n"); + return -ENODEV; + } + if (id->driver_info == STORAGE_DEVICE) return send_eject_command(interface); diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 994ec48b2f669..ca05b07a45e67 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -364,33 +364,27 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, } static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle, - struct sk_buff *skb) + struct sk_buff *skb, u32 len) { uint32_t *pattern = (uint32_t *)skb->data; - switch (*pattern) { - case 0x33221199: - { + if (*pattern == 0x33221199 && len >= sizeof(struct htc_panic_bad_vaddr)) { struct htc_panic_bad_vaddr *htc_panic; htc_panic = (struct htc_panic_bad_vaddr *) skb->data; dev_err(htc_handle->dev, "ath: firmware panic! " "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n", htc_panic->exccause, htc_panic->pc, htc_panic->badvaddr); - break; - } - case 0x33221299: - { + return; + } + if (*pattern == 0x33221299) { struct htc_panic_bad_epid *htc_panic; htc_panic = (struct htc_panic_bad_epid *) skb->data; dev_err(htc_handle->dev, "ath: firmware panic! " "bad epid: 0x%08x\n", htc_panic->epid); - break; - } - default: - dev_err(htc_handle->dev, "ath: unknown panic pattern!\n"); - break; + return; } + dev_err(htc_handle->dev, "ath: unknown panic pattern!\n"); } /* @@ -411,16 +405,26 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle, if (!htc_handle || !skb) return; + /* A valid message requires len >= 8. + * + * sizeof(struct htc_frame_hdr) == 8 + * sizeof(struct htc_ready_msg) == 8 + * sizeof(struct htc_panic_bad_vaddr) == 16 + * sizeof(struct htc_panic_bad_epid) == 8 + */ + if (unlikely(len < sizeof(struct htc_frame_hdr))) + goto invalid; htc_hdr = (struct htc_frame_hdr *) skb->data; epid = htc_hdr->endpoint_id; if (epid == 0x99) { - ath9k_htc_fw_panic_report(htc_handle, skb); + ath9k_htc_fw_panic_report(htc_handle, skb, len); kfree_skb(skb); return; } if (epid < 0 || epid >= ENDPOINT_MAX) { +invalid: if (pipe_id != USB_REG_IN_PIPE) dev_kfree_skb_any(skb); else @@ -432,21 +436,30 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle, /* Handle trailer */ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) { - if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) + if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) { /* Move past the Watchdog pattern */ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4); + len -= 4; + } } /* Get the message ID */ + if (unlikely(len < sizeof(struct htc_frame_hdr) + sizeof(__be16))) + goto invalid; msg_id = (__be16 *) ((void *) htc_hdr + sizeof(struct htc_frame_hdr)); /* Now process HTC messages */ switch (be16_to_cpu(*msg_id)) { case HTC_MSG_READY_ID: + if (unlikely(len < sizeof(struct htc_ready_msg))) + goto invalid; htc_process_target_rdy(htc_handle, htc_hdr); break; case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID: + if (unlikely(len < sizeof(struct htc_frame_hdr) + + sizeof(struct htc_conn_svc_rspmsg))) + goto invalid; htc_process_conn_rsp(htc_handle, htc_hdr); break; default: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index c2b6e5c966d04..0a069bc7f1567 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -90,6 +90,9 @@ #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) +#define BRCMF_MAX_CHANSPEC_LIST \ + (BRCMF_DCMD_MEDLEN / sizeof(__le32) - 1) + static bool check_vif_up(struct brcmf_cfg80211_vif *vif) { if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) { @@ -6459,6 +6462,13 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, band->channels[i].flags = IEEE80211_CHAN_DISABLED; total = le32_to_cpu(list->count); + if (total > BRCMF_MAX_CHANSPEC_LIST) { + bphy_err(drvr, "Invalid count of channel Spec. (%u)\n", + total); + err = -EINVAL; + goto fail_pbuf; + } + for (i = 0; i < total; i++) { ch.chspec = (u16)le32_to_cpu(list->element[i]); cfg->d11inf.decchspec(&ch); @@ -6604,6 +6614,13 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ]; list = (struct brcmf_chanspec_list *)pbuf; num_chan = le32_to_cpu(list->count); + if (num_chan > BRCMF_MAX_CHANSPEC_LIST) { + bphy_err(drvr, "Invalid count of channel Spec. (%u)\n", + num_chan); + kfree(pbuf); + return -EINVAL; + } + for (i = 0; i < num_chan; i++) { ch.chspec = (u16)le32_to_cpu(list->element[i]); cfg->d11inf.decchspec(&ch); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 61039538a15bc..c8e1d505f7b5d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -290,6 +290,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, struct brcmf_pub *drvr = ifp->drvr; struct ethhdr *eh; int head_delta; + unsigned int tx_bytes = skb->len; brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); @@ -364,7 +365,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, ndev->stats.tx_dropped++; } else { ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; + ndev->stats.tx_bytes += tx_bytes; } /* Return ok: we always eat the packet */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index a2b8d9171af2a..060889bf6d053 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -703,6 +703,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, u32 i, j; char end = '\0'; + if (chiprev >= BITS_PER_TYPE(u32)) { + brcmf_err("Invalid chip revision %u\n", chiprev); + return NULL; + } + for (i = 0; i < table_size; i++) { if (mapping_table[i].chipid == chip && mapping_table[i].revmask & BIT(chiprev)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index 430d2cca98b33..1285d3685c4f5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work) brcmf_fweh_event_name(event->code), event->code, event->emsg.ifidx, event->emsg.bsscfgidx, event->emsg.addr); + if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) { + bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx); + goto event_free; + } /* convert event message */ emsg_be = &event->emsg; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 61febc9bfa14a..721d587425c7a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -618,7 +618,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, } if (!brcmf_chip_set_active(devinfo->ci, resetintr)) - return -EINVAL; + return -EIO; return 0; } @@ -1109,6 +1109,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) BRCMF_NROF_H2D_COMMON_MSGRINGS; max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS; } + if (max_flowrings > 512) { + brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings); + return -EIO; + } if (devinfo->dma_idx_sz != 0) { bufsz = (max_submissionrings + max_completionrings) * diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c index fabfbb0b40b0c..d0a7465be586d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c @@ -158,12 +158,12 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi) struct brcmf_pno_macaddr_le pfn_mac; u8 *mac_addr = NULL; u8 *mac_mask = NULL; - int err, i; + int err, i, ri; - for (i = 0; i < pi->n_reqs; i++) - if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { - mac_addr = pi->reqs[i]->mac_addr; - mac_mask = pi->reqs[i]->mac_addr_mask; + for (ri = 0; ri < pi->n_reqs; ri++) + if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + mac_addr = pi->reqs[ri]->mac_addr; + mac_mask = pi->reqs[ri]->mac_addr_mask; break; } @@ -185,7 +185,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi) pfn_mac.mac[0] |= 0x02; brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n", - pi->reqs[i]->reqid, pfn_mac.mac); + pi->reqs[ri]->reqid, pfn_mac.mac); err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac, sizeof(pfn_mac)); if (err) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 9929e90866f04..3c0d5c68eaca2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3401,6 +3401,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, /* Take arm out of reset */ if (!brcmf_chip_set_active(bus->ci, rstvec)) { brcmf_err("error getting out of ARM core reset\n"); + bcmerror = -EIO; goto err; } diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 0569f37e9ed59..8c9c6bfbaeee7 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5236,7 +5236,7 @@ static int get_wep_tx_idx(struct airo_info *ai) return -1; } -static int set_wep_key(struct airo_info *ai, u16 index, const char *key, +static int set_wep_key(struct airo_info *ai, u16 index, const u8 *key, u16 keylen, int perm, int lock) { static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; @@ -5287,7 +5287,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file) struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i, rc; - char key[16]; + u8 key[16]; u16 index = 0; int j = 0; @@ -5315,12 +5315,22 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file) } for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) { + int val; + + if (i % 3 == 2) + continue; + + val = hex_to_bin(data->wbuffer[i+j]); + if (val < 0) { + airo_print_err(ai->dev->name, "WebKey passed invalid key hex"); + return; + } switch(i%3) { case 0: - key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4; + key[i/3] = (u8)val << 4; break; case 1: - key[i/3] |= hex_to_bin(data->wbuffer[i+j]); + key[i/3] |= (u8)val; break; } } diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index 532e3b91777d9..150805aec4071 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, /* Repeat initial/next rate. * For legacy IL_NUMBER_TRY == 1, this loop will not execute. * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */ - while (repeat_rate > 0) { + while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) { if (is_legacy(tbl_type.lq_type)) { if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) ant_toggle_cnt++; @@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, cpu_to_le32(new_rate); repeat_rate--; idx++; - if (idx >= LINK_QUAL_MAX_RETRY_NUM) - goto out; } il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, @@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, repeat_rate--; } -out: lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 7186e1dbbd6b5..d310337b16251 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1203,6 +1203,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; + struct sk_buff *orig_skb = skb; if (WARN_ON_ONCE(!mvmsta)) return -1; @@ -1235,8 +1236,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); if (ret) { + /* Free skbs created as part of TSO logic that have not yet been dequeued */ __skb_queue_purge(&mpdus_skbs); - return ret; + /* skb here is not necessarily same as skb that entered this method, + * so free it explicitly. + */ + if (skb == orig_skb) + ieee80211_free_txskb(mvm->hw, skb); + else + kfree_skb(skb); + /* there was error, but we consumed skb one way or another, so return 0 */ + return 0; } } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 8e412125a49c1..255286b2324e2 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -775,6 +775,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *cb; if (!vp->assoc) return; @@ -796,6 +797,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, memcpy(hdr->addr2, mac, ETH_ALEN); memcpy(hdr->addr3, vp->bssid, ETH_ALEN); + cb = IEEE80211_SKB_CB(skb); + cb->control.rates[0].count = 1; + cb->control.rates[1].idx = -1; + rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->chanctx_conf)->def.chan); @@ -3675,6 +3680,8 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, rx_status.band = channel->band; rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); + if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates) + goto out; rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); hdr = (void *)skb->data; @@ -4209,6 +4216,10 @@ static int hwsim_virtio_handle_cmd(struct sk_buff *skb) nlh = nlmsg_hdr(skb); gnlh = nlmsg_data(nlh); + + if (skb->len < nlh->nlmsg_len) + return -EINVAL; + err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX, hwsim_genl_policy, NULL); if (err) { @@ -4251,7 +4262,8 @@ static void hwsim_virtio_rx_work(struct work_struct *work) spin_unlock_irqrestore(&hwsim_virtio_lock, flags); skb->data = skb->head; - skb_set_tail_pointer(skb, len); + skb_reset_tail_pointer(skb); + skb_put(skb, len); hwsim_virtio_handle_cmd(skb); spin_lock_irqsave(&hwsim_virtio_lock, flags); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index bde9e4bbfffe7..7fb6eef409285 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -485,6 +485,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { }; static const struct of_device_id mwifiex_sdio_of_match_table[] = { + { .compatible = "marvell,sd8787" }, { .compatible = "marvell,sd8897" }, { .compatible = "marvell,sd8997" }, { } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index a5be66de1cff1..5a8060790a61f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -884,8 +884,9 @@ static inline bool mt76_is_skb_pktid(u8 pktid) static inline u8 mt76_tx_power_nss_delta(u8 nss) { static const u8 nss_delta[4] = { 0, 6, 9, 12 }; + u8 idx = nss - 1; - return nss_delta[nss - 1]; + return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0; } static inline bool mt76_testmode_enabled(struct mt76_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 1465a92ea3fc9..b26617026e831 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -950,7 +950,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) offset %= 32; val = mt76_rr(dev, addr); - val >>= (tid % 32); + val >>= offset; if (offset > 20) { addr += 4; diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index 6be5ac8ba518d..dd26f20861807 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -939,30 +939,52 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch) return; while (index + sizeof(*e) <= len) { + u16 attr_size; + e = (struct wilc_attr_entry *)&buf[index]; - if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST) + attr_size = le16_to_cpu(e->attr_len); + + if (index + sizeof(*e) + attr_size > len) + return; + + if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST && + attr_size >= (sizeof(struct wilc_attr_ch_list) - sizeof(*e))) ch_list_idx = index; - else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL) + else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL && + attr_size == (sizeof(struct wilc_attr_oper_ch) - sizeof(*e))) op_ch_idx = index; + if (ch_list_idx && op_ch_idx) break; - index += le16_to_cpu(e->attr_len) + sizeof(*e); + + index += sizeof(*e) + attr_size; } if (ch_list_idx) { - u16 attr_size; - struct wilc_ch_list_elem *e; - int i; + unsigned int i; + u16 elem_size; ch_list = (struct wilc_attr_ch_list *)&buf[ch_list_idx]; - attr_size = le16_to_cpu(ch_list->attr_len); - for (i = 0; i < attr_size;) { + /* the number of bytes following the final 'elem' member */ + elem_size = le16_to_cpu(ch_list->attr_len) - + (sizeof(*ch_list) - sizeof(struct wilc_attr_entry)); + for (i = 0; i < elem_size;) { + struct wilc_ch_list_elem *e; + e = (struct wilc_ch_list_elem *)(ch_list->elem + i); + + i += sizeof(*e); + if (i > elem_size) + break; + + i += e->no_of_channels; + if (i > elem_size) + break; + if (e->op_class == WILC_WLAN_OPERATING_CLASS_2_4GHZ) { memset(e->ch_list, sta_ch, e->no_of_channels); break; } - i += e->no_of_channels; } } diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index d025a30930157..b25847799138b 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -467,14 +467,25 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len); if (rsn_ie) { + int rsn_ie_len = sizeof(struct element) + rsn_ie[1]; int offset = 8; - param->mode_802_11i = 2; - param->rsn_found = true; /* extract RSN capabilities */ - offset += (rsn_ie[offset] * 4) + 2; - offset += (rsn_ie[offset] * 4) + 2; - memcpy(param->rsn_cap, &rsn_ie[offset], 2); + if (offset < rsn_ie_len) { + /* skip over pairwise suites */ + offset += (rsn_ie[offset] * 4) + 2; + + if (offset < rsn_ie_len) { + /* skip over authentication suites */ + offset += (rsn_ie[offset] * 4) + 2; + + if (offset + 1 < rsn_ie_len) { + param->mode_802_11i = 2; + param->rsn_found = true; + memcpy(param->rsn_cap, &rsn_ie[offset], 2); + } + } + } } if (param->rsn_found) { diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index e14b9fc2c67ac..58d92311c7820 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -20,6 +20,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) }, { }, }; +MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids); #define WILC_SDIO_BLOCK_SIZE 512 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index fed6d21cd6ce1..4bdd3a95f2d21 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4151,7 +4151,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); - rt2800_bbp_write(rt2x00dev, 86, 0); + if (rt2x00_rt(rt2x00dev, RT6352)) + rt2800_bbp_write(rt2x00dev, 86, 0x38); + else + rt2800_bbp_write(rt2x00dev, 86, 0); } if (rf->channel <= 14) { @@ -4352,7 +4355,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain; rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); - rt2800_iq_calibrate(rt2x00dev, rf->channel); + if (rt2x00_rt(rt2x00dev, RT5592)) + rt2800_iq_calibrate(rt2x00dev, rf->channel); } bbp = rt2800_bbp_read(rt2x00dev, 4); @@ -5625,7 +5629,8 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev, if (qual->vgc_level != vgc_level) { if (rt2x00_rt(rt2x00dev, RT3572) || rt2x00_rt(rt2x00dev, RT3593) || - rt2x00_rt(rt2x00dev, RT3883)) { + rt2x00_rt(rt2x00dev, RT3883) || + rt2x00_rt(rt2x00dev, RT6352)) { rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level); } else if (rt2x00_rt(rt2x00dev, RT5592)) { @@ -5848,7 +5853,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); } else if (rt2x00_rt(rt2x00dev, RT6352)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401); - rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0001); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000); rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0); @@ -6110,6 +6115,27 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) reg = rt2800_register_read(rt2x00dev, US_CYC_CNT); rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 125); rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); + } else if (rt2x00_is_soc(rt2x00dev)) { + struct clk *clk = clk_get_sys("bus", NULL); + int rate; + + if (IS_ERR(clk)) { + clk = clk_get_sys("cpu", NULL); + + if (IS_ERR(clk)) { + rate = 125; + } else { + rate = clk_get_rate(clk) / 3000000; + clk_put(clk); + } + } else { + rate = clk_get_rate(clk) / 1000000; + clk_put(clk); + } + + reg = rt2800_register_read(rt2x00dev, US_CYC_CNT); + rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, rate); + rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); } reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index b28fa0c4d180c..0ed4d67308d78 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1190,7 +1190,7 @@ struct rtl8723bu_c2h { u8 bw; } __packed ra_report; }; -}; +} __packed; struct rtl8xxxu_fileops; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 0d374a2948406..2cb86c28d11fe 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1607,18 +1607,18 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv) static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; - u32 val32, bonding; + u32 val32, bonding, sys_cfg; u16 val16; - val32 = rtl8xxxu_read32(priv, REG_SYS_CFG); - priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >> + sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG); + priv->chip_cut = (sys_cfg & SYS_CFG_CHIP_VERSION_MASK) >> SYS_CFG_CHIP_VERSION_SHIFT; - if (val32 & SYS_CFG_TRP_VAUX_EN) { + if (sys_cfg & SYS_CFG_TRP_VAUX_EN) { dev_info(dev, "Unsupported test chip\n"); return -ENOTSUPP; } - if (val32 & SYS_CFG_BT_FUNC) { + if (sys_cfg & SYS_CFG_BT_FUNC) { if (priv->chip_cut >= 3) { sprintf(priv->chip_name, "8723BU"); priv->rtl_chip = RTL8723B; @@ -1640,7 +1640,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) if (val32 & MULTI_GPS_FUNC_EN) priv->has_gps = 1; priv->is_multi_func = 1; - } else if (val32 & SYS_CFG_TYPE_ID) { + } else if (sys_cfg & SYS_CFG_TYPE_ID) { bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); bonding &= HPON_FSM_BONDING_MASK; if (priv->fops->tx_desc_size == @@ -1688,7 +1688,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) case RTL8188E: case RTL8192E: case RTL8723B: - switch (val32 & SYS_CFG_VENDOR_EXT_MASK) { + switch (sys_cfg & SYS_CFG_VENDOR_EXT_MASK) { case SYS_CFG_VENDOR_ID_TSMC: sprintf(priv->chip_vendor, "TSMC"); break; @@ -1705,7 +1705,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) } break; default: - if (val32 & SYS_CFG_VENDOR_ID) { + if (sys_cfg & SYS_CFG_VENDOR_ID) { sprintf(priv->chip_vendor, "UMC"); priv->vendor_umc = 1; } else { @@ -1874,13 +1874,6 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) /* We have 8 bits to indicate validity */ map_addr = offset * 8; - if (map_addr >= EFUSE_MAP_LEN) { - dev_warn(dev, "%s: Illegal map_addr (%04x), " - "efuse corrupt!\n", - __func__, map_addr); - ret = -EINVAL; - goto exit; - } for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { /* Check word enable condition in the section */ if (word_mask & BIT(i)) { @@ -1891,6 +1884,13 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8); if (ret) goto exit; + if (map_addr >= EFUSE_MAP_LEN - 1) { + dev_warn(dev, "%s: Illegal map_addr (%04x), " + "efuse corrupt!\n", + __func__, map_addr); + ret = -EINVAL; + goto exit; + } priv->efuse_wifi.raw[map_addr++] = val8; ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8); @@ -2925,12 +2925,12 @@ bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv, } if (!(simubitmap & 0x30) && priv->tx_paths > 1) { - /* path B RX OK */ + /* path B TX OK */ for (i = 4; i < 6; i++) result[3][i] = result[c1][i]; } - if (!(simubitmap & 0x30) && priv->tx_paths > 1) { + if (!(simubitmap & 0xc0) && priv->tx_paths > 1) { /* path B RX OK */ for (i = 6; i < 8; i++) result[3][i] = result[c1][i]; @@ -4338,15 +4338,14 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv, h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff; h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff; - h2c.ramask.arg = 0x80; h2c.b_macid_cfg.data1 = rateid; if (sgi) h2c.b_macid_cfg.data1 |= BIT(7); h2c.b_macid_cfg.data2 = bw; - dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n", - __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg)); + dev_dbg(&priv->udev->dev, "%s: rate mask %08x, rateid %02x, sgi %d, size %zi\n", + __func__, ramask, rateid, sgi, sizeof(h2c.b_macid_cfg)); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg)); } @@ -4370,12 +4369,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv, void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, u8 macid, bool connect) { -#ifdef RTL8XXXU_GEN2_REPORT_CONNECT /* - * Barry Day reports this causes issues with 8192eu and 8723bu - * devices reconnecting. The reason for this is unclear, but - * until it is better understood, leave the code in place but - * disabled, so it is not lost. + * The firmware turns on the rate control when it knows it's + * connected to a network. */ struct h2c_cmd h2c; @@ -4388,7 +4384,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, h2c.media_status_rpt.parm &= ~BIT(0); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt)); -#endif } void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) @@ -4508,6 +4503,53 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta) return network_type; } +static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time) +{ + u32 reg_edca_param[IEEE80211_NUM_ACS] = { + [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM, + [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM, + [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM, + [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM, + }; + u32 val32; + u16 wireless_mode = 0; + u8 aifs, aifsn, sifs; + int i; + + if (priv->vif) { + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid); + if (sta) + wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta); + rcu_read_unlock(); + } + + if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ || + (wireless_mode & WIRELESS_MODE_N_24G)) + sifs = 16; + else + sifs = 10; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + val32 = rtl8xxxu_read32(priv, reg_edca_param[i]); + + /* It was set in conf_tx. */ + aifsn = val32 & 0xff; + + /* aifsn not set yet or already fixed */ + if (aifsn < 2 || aifsn > 15) + continue; + + aifs = aifsn * slot_time + sifs; + + val32 &= ~0xff; + val32 |= aifs; + rtl8xxxu_write32(priv, reg_edca_param[i], val32); + } +} + static void rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) @@ -4593,6 +4635,8 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, else val8 = 20; rtl8xxxu_write8(priv, REG_SLOT, val8); + + rtl8xxxu_set_aifs(priv, val8); } if (changed & BSS_CHANGED_BSSID) { @@ -4984,6 +5028,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (control && control->sta) sta = control->sta; + queue = rtl8xxxu_queue_select(hw, skb); + tx_desc = skb_push(skb, tx_desc_size); memset(tx_desc, 0, tx_desc_size); @@ -4996,7 +5042,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, is_broadcast_ether_addr(ieee80211_get_DA(hdr))) tx_desc->txdw0 |= TXDESC_BROADMULTICAST; - queue = rtl8xxxu_queue_select(hw, skb); tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT); if (tx_info->control.hw_key) { @@ -5471,7 +5516,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) rarpt->txrate.flags = 0; rate = c2h->ra_report.rate; sgi = c2h->ra_report.sgi; - bw = c2h->ra_report.bw; if (rate < DESC_RATE_MCS0) { rarpt->txrate.legacy = @@ -5488,8 +5532,13 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) RATE_INFO_FLAGS_SHORT_GI; } - if (bw == RATE_INFO_BW_20) - rarpt->txrate.bw |= RATE_INFO_BW_20; + if (skb->len >= offsetofend(typeof(*c2h), ra_report.bw)) { + if (c2h->ra_report.bw == RTL8XXXU_CHANNEL_WIDTH_40) + bw = RATE_INFO_BW_40; + else + bw = RATE_INFO_BW_20; + rarpt->txrate.bw = bw; + } } bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate); rarpt->bit_rate = bit_rate; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index e34d33e73e525..d3027f8fbd383 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -2385,14 +2385,10 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel) rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "Just Read IQK Matrix reg for channel:%d....\n", channel); - if ((rtlphy->iqk_matrix[indexforchannel]. - value[0] != NULL) - /*&&(regea4 != 0) */) + if (rtlphy->iqk_matrix[indexforchannel].value[0][0] != 0) _rtl92d_phy_patha_fill_iqk_matrix(hw, true, - rtlphy->iqk_matrix[ - indexforchannel].value, 0, - (rtlphy->iqk_matrix[ - indexforchannel].value[0][2] == 0)); + rtlphy->iqk_matrix[indexforchannel].value, 0, + rtlphy->iqk_matrix[indexforchannel].value[0][2] == 0); if (IS_92D_SINGLEPHY(rtlhal->version)) { if ((rtlphy->iqk_matrix[ indexforchannel].value[0][4] != 0) diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 75b5d545b49e8..dc076d8448680 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -694,8 +694,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len) struct rndis_query *get; struct rndis_query_c *get_c; } u; - int ret, buflen; - int resplen, respoffs, copylen; + int ret; + size_t buflen, resplen, respoffs, copylen; buflen = *len + sizeof(*u.get); if (buflen < CONTROL_BUFFER_SIZE) @@ -730,22 +730,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len) if (respoffs > buflen) { /* Device returned data offset outside buffer, error. */ - netdev_dbg(dev->net, "%s(%s): received invalid " - "data offset: %d > %d\n", __func__, - oid_to_string(oid), respoffs, buflen); + netdev_dbg(dev->net, + "%s(%s): received invalid data offset: %zu > %zu\n", + __func__, oid_to_string(oid), respoffs, buflen); ret = -EINVAL; goto exit_unlock; } - if ((resplen + respoffs) > buflen) { - /* Device would have returned more data if buffer would - * have been big enough. Copy just the bits that we got. - */ - copylen = buflen - respoffs; - } else { - copylen = resplen; - } + copylen = min(resplen, buflen - respoffs); if (copylen > *len) copylen = *len; diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 9c4c585572488..b7fa03813da11 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -466,7 +466,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) tid, 0); } } - if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + + if (IEEE80211_SKB_CB(skb)->control.flags & + IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { q_num = MGMT_SOFT_Q; skb->priority = q_num; } diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index dca81a4bbdd7f..30d2eccbcadd5 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -162,12 +162,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) u8 header_size; u8 vap_id = 0; u8 dword_align_bytes; + bool tx_eapol; u16 seq_num; info = IEEE80211_SKB_CB(skb); vif = info->control.vif; tx_params = (struct skb_info *)info->driver_data; + tx_eapol = IEEE80211_SKB_CB(skb)->control.flags & + IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); @@ -231,7 +235,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) } } - if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + if (tx_eapol) { rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n"); data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 6a9178896c909..1ba9749692164 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -48,7 +48,6 @@ #include typedef unsigned int pending_ring_idx_t; -#define INVALID_PENDING_RING_IDX (~0U) struct pending_tx_info { struct xen_netif_tx_request req; /* tx request */ @@ -82,8 +81,6 @@ struct xenvif_rx_meta { /* Discriminate from any valid pending_idx value. */ #define INVALID_PENDING_IDX 0xFFFF -#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE - #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE /* The maximum number of frags is derived from the size of a grant (same @@ -367,11 +364,6 @@ void xenvif_free(struct xenvif *vif); int xenvif_xenbus_init(void); void xenvif_xenbus_fini(void); -int xenvif_schedulable(struct xenvif *vif); - -int xenvif_queue_stopped(struct xenvif_queue *queue); -void xenvif_wake_queue(struct xenvif_queue *queue); - /* (Un)Map communication rings. */ void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue); int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, @@ -394,17 +386,13 @@ int xenvif_dealloc_kthread(void *data); irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread); -void xenvif_rx_action(struct xenvif_queue *queue); -void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); +bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_carrier_on(struct xenvif *vif); /* Callback from stack when TX packet can be released */ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); -/* Unmap a pending page and release it back to the guest */ -void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); - static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) { return MAX_PENDING_REQS - diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7ce9807fc24c5..97cf5bc48902a 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -70,7 +70,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) wake_up(&queue->dealloc_wq); } -int xenvif_schedulable(struct xenvif *vif) +static int xenvif_schedulable(struct xenvif *vif) { return netif_running(vif->dev) && test_bit(VIF_STATUS_CONNECTED, &vif->status) && @@ -178,20 +178,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -int xenvif_queue_stopped(struct xenvif_queue *queue) -{ - struct net_device *dev = queue->vif->dev; - unsigned int id = queue->id; - return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); -} - -void xenvif_wake_queue(struct xenvif_queue *queue) -{ - struct net_device *dev = queue->vif->dev; - unsigned int id = queue->id; - netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); -} - static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { @@ -269,14 +255,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) skb_clear_hash(skb); - xenvif_rx_queue_tail(queue, skb); + if (!xenvif_rx_queue_tail(queue, skb)) + goto drop; + xenvif_kick_thread(queue); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index b0cbc7fead745..f9373a88cf37c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -112,6 +112,8 @@ static void make_tx_response(struct xenvif_queue *queue, s8 st); static void push_tx_responses(struct xenvif_queue *queue); +static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); + static inline int tx_work_todo(struct xenvif_queue *queue); static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, @@ -330,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue, struct xenvif_tx_cb { - u16 pending_idx; + u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; + u8 copy_count; }; #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) +#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i]) +#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count) static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, u16 pending_idx, @@ -368,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) return skb; } -static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, - struct sk_buff *skb, - struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop, - unsigned int frag_overflow, - struct sk_buff *nskb) +static void xenvif_get_requests(struct xenvif_queue *queue, + struct sk_buff *skb, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txfrags, + unsigned *copy_ops, + unsigned *map_ops, + unsigned int frag_overflow, + struct sk_buff *nskb, + unsigned int extra_count, + unsigned int data_len) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; - int start; + u16 pending_idx; pending_ring_idx_t index; unsigned int nr_slots; + struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; + struct xen_netif_tx_request *txp = first; + + nr_slots = shinfo->nr_frags + 1; + + copy_count(skb) = 0; - nr_slots = shinfo->nr_frags; + /* Create copy ops for exactly data_len bytes into the skb head. */ + __skb_put(skb, data_len); + while (data_len > 0) { + int amount = data_len > txp->size ? txp->size : data_len; - /* Skip first skb fragment if it is on same page as header fragment. */ - start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); + cop->source.u.ref = txp->gref; + cop->source.domid = queue->vif->domid; + cop->source.offset = txp->offset; - for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, txp++, gop++) { + cop->dest.domid = DOMID_SELF; + cop->dest.offset = (offset_in_page(skb->data + + skb_headlen(skb) - + data_len)) & ~XEN_PAGE_MASK; + cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) + - data_len); + + cop->len = amount; + cop->flags = GNTCOPY_source_gref; + + index = pending_index(queue->pending_cons); + pending_idx = queue->pending_ring[index]; + callback_param(queue, pending_idx).ctx = NULL; + copy_pending_idx(skb, copy_count(skb)) = pending_idx; + copy_count(skb)++; + + cop++; + data_len -= amount; + + if (amount == txp->size) { + /* The copy op covered the full tx_request */ + + memcpy(&queue->pending_tx_info[pending_idx].req, + txp, sizeof(*txp)); + queue->pending_tx_info[pending_idx].extra_count = + (txp == first) ? extra_count : 0; + + if (txp == first) + txp = txfrags; + else + txp++; + queue->pending_cons++; + nr_slots--; + } else { + /* The copy op partially covered the tx_request. + * The remainder will be mapped. + */ + txp->offset += amount; + txp->size -= amount; + } + } + + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; + shinfo->nr_frags++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; - xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); + xenvif_tx_create_map_op(queue, pending_idx, txp, + txp == first ? extra_count : 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + + if (txp == first) + txp = txfrags; + else + txp++; } if (frag_overflow) { @@ -413,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que skb_shinfo(skb)->frag_list = nskb; } - return gop; + (*copy_ops) = cop - queue->tx_copy_ops; + (*map_ops) = gop - queue->tx_map_ops; } static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, @@ -449,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct gnttab_copy **gopp_copy) { struct gnttab_map_grant_ref *gop_map = *gopp_map; - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + u16 pending_idx; /* This always points to the shinfo of the skb being checked, which * could be either the first or the one on the frag_list */ @@ -460,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct skb_shared_info *first_shinfo = NULL; int nr_frags = shinfo->nr_frags; const bool sharedslot = nr_frags && - frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; - int i, err; + frag_get_pending_idx(&shinfo->frags[0]) == + copy_pending_idx(skb, copy_count(skb) - 1); + int i, err = 0; - /* Check status of header. */ - err = (*gopp_copy)->status; - if (unlikely(err)) { - if (net_ratelimit()) - netdev_dbg(queue->vif->dev, - "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", - (*gopp_copy)->status, - pending_idx, - (*gopp_copy)->source.u.ref); - /* The first frag might still have this slot mapped */ - if (!sharedslot) - xenvif_idx_release(queue, pending_idx, - XEN_NETIF_RSP_ERROR); + for (i = 0; i < copy_count(skb); i++) { + int newerr; + + /* Check status of header. */ + pending_idx = copy_pending_idx(skb, i); + + newerr = (*gopp_copy)->status; + if (likely(!newerr)) { + /* The first frag might still have this slot mapped */ + if (i < copy_count(skb) - 1 || !sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } else { + err = newerr; + if (net_ratelimit()) + netdev_dbg(queue->vif->dev, + "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", + (*gopp_copy)->status, + pending_idx, + (*gopp_copy)->source.u.ref); + /* The first frag might still have this slot mapped */ + if (i < copy_count(skb) - 1 || !sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + } + (*gopp_copy)++; } - (*gopp_copy)++; check_frags: for (i = 0; i < nr_frags; i++, gop_map++) { @@ -524,14 +605,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, if (err) continue; - /* First error: if the header haven't shared a slot with the - * first frag, release it as well. - */ - if (!sharedslot) - xenvif_idx_release(queue, - XENVIF_TX_CB(skb)->pending_idx, - XEN_NETIF_RSP_OKAY); - /* Invalidate preceding fragments of this skb. */ for (j = 0; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); @@ -801,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops; struct sk_buff *skb, *nskb; int ret; unsigned int frag_overflow; @@ -883,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, continue; } + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? + XEN_NETBACK_TX_COPY_LEN : txreq.size; + ret = xenvif_count_requests(queue, &txreq, extra_count, txfrags, work_to_do); + if (unlikely(ret < 0)) break; @@ -910,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, index = pending_index(queue->pending_cons); pending_idx = queue->pending_ring[index]; - data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && - ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - XEN_NETBACK_TX_COPY_LEN : txreq.size; + if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) + data_len = txreq.size; skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { @@ -923,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, } skb_shinfo(skb)->nr_frags = ret; - if (data_len < txreq.size) - skb_shinfo(skb)->nr_frags++; /* At this point shinfo->nr_frags is in fact the number of * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. */ @@ -986,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, type); } - XENVIF_TX_CB(skb)->pending_idx = pending_idx; - - __skb_put(skb, data_len); - queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; - queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; - queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; - - queue->tx_copy_ops[*copy_ops].dest.u.gmfn = - virt_to_gfn(skb->data); - queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; - queue->tx_copy_ops[*copy_ops].dest.offset = - offset_in_page(skb->data) & ~XEN_PAGE_MASK; - - queue->tx_copy_ops[*copy_ops].len = data_len; - queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; - - (*copy_ops)++; - - if (data_len < txreq.size) { - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], - pending_idx); - xenvif_tx_create_map_op(queue, pending_idx, &txreq, - extra_count, gop); - gop++; - } else { - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], - INVALID_PENDING_IDX); - memcpy(&queue->pending_tx_info[pending_idx].req, - &txreq, sizeof(txreq)); - queue->pending_tx_info[pending_idx].extra_count = - extra_count; - } - - queue->pending_cons++; - - gop = xenvif_get_requests(queue, skb, txfrags, gop, - frag_overflow, nskb); + xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops, + map_ops, frag_overflow, nskb, extra_count, + data_len); __skb_queue_tail(&queue->tx_queue, skb); queue->tx.req_cons = idx; - if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || + if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) break; } - (*map_ops) = gop - queue->tx_map_ops; return; } @@ -1112,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; - unsigned data_len; - pending_idx = XENVIF_TX_CB(skb)->pending_idx; + pending_idx = copy_pending_idx(skb, 0); txp = &queue->pending_tx_info[pending_idx].req; /* Check the remap error code. */ @@ -1133,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) continue; } - data_len = skb->len; - callback_param(queue, pending_idx).ctx = NULL; - if (data_len < txp->size) { - /* Append the packet payload as a fragment. */ - txp->offset += data_len; - txp->size -= data_len; - } else { - /* Schedule a response immediately. */ - xenvif_idx_release(queue, pending_idx, - XEN_NETIF_RSP_OKAY); - } - if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) @@ -1330,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) /* Called after netfront has transmitted */ int xenvif_tx_action(struct xenvif_queue *queue, int budget) { - unsigned nr_mops, nr_cops = 0; + unsigned nr_mops = 0, nr_cops = 0; int work_done, ret; if (unlikely(!tx_work_todo(queue))) @@ -1417,7 +1442,7 @@ static void push_tx_responses(struct xenvif_queue *queue) notify_remote_via_irq(queue->tx_irq); } -void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) +static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) { int ret; struct gnttab_unmap_grant_ref tx_unmap_op; diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index a0335407be423..0ba754ebc5baa 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) return false; } -void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) +bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) { unsigned long flags; + bool ret = true; spin_lock_irqsave(&queue->rx_queue.lock, flags); @@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) struct net_device *dev = queue->vif->dev; netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); - kfree_skb(skb); - queue->vif->dev->stats.rx_dropped++; + ret = false; } else { if (skb_queue_empty(&queue->rx_queue)) xenvif_update_needed_slots(queue, skb); @@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) } spin_unlock_irqrestore(&queue->rx_queue.lock, flags); + + return ret; } static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) @@ -486,7 +488,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue) #define RX_BATCH_SIZE 64 -void xenvif_rx_action(struct xenvif_queue *queue) +static void xenvif_rx_action(struct xenvif_queue *queue) { struct sk_buff_head completed_skbs; unsigned int work_done = 0; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index ca261e0fc9c9b..9ee9ce0493fe6 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be) unsigned int queue_index; xen_unregister_watchers(vif); - xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ @@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev) struct backend_info *be = dev_get_drvdata(&dev->dev); unregister_hotplug_status_watch(be); + xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); if (be->vif) { kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); backend_disconnect(be); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 569f3c8e7b756..3d149890fa36e 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1868,6 +1868,12 @@ static int netfront_resume(struct xenbus_device *dev) netif_tx_unlock_bh(info->netdev); xennet_disconnect_backend(info); + + rtnl_lock(); + if (info->queues) + xennet_destroy_queues(info); + rtnl_unlock(); + return 0; } diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index 4dc7bd7e02b6b..90bea6a1db692 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -238,9 +238,6 @@ static int fdp_nci_open(struct nci_dev *ndev) { int r; struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); r = info->phy_ops->enable(info->phy); @@ -249,35 +246,26 @@ static int fdp_nci_open(struct nci_dev *ndev) static int fdp_nci_close(struct nci_dev *ndev) { - struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); return 0; } static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); + int ret; if (atomic_dec_and_test(&info->data_pkt_counter)) info->data_pkt_counter_cb(ndev); - return info->phy_ops->write(info->phy, skb); -} - -int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) -{ - struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; + ret = info->phy_ops->write(info->phy, skb); + if (ret < 0) { + kfree_skb(skb); + return ret; + } - dev_dbg(dev, "%s\n", __func__); - return nci_recv_frame(ndev, skb); + consume_skb(skb); + return 0; } -EXPORT_SYMBOL(fdp_nci_recv_frame); static int fdp_nci_request_firmware(struct nci_dev *ndev) { @@ -489,8 +477,6 @@ static int fdp_nci_setup(struct nci_dev *ndev) int r; u8 patched = 0; - dev_dbg(dev, "%s\n", __func__); - r = nci_core_init(ndev); if (r) goto error; @@ -598,9 +584,7 @@ static int fdp_nci_core_reset_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - dev_dbg(dev, "%s\n", __func__); info->setup_reset_ntf = 1; wake_up(&info->setup_wq); @@ -611,9 +595,7 @@ static int fdp_nci_prop_patch_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - dev_dbg(dev, "%s\n", __func__); info->setup_patch_ntf = 1; info->setup_patch_status = skb->data[0]; wake_up(&info->setup_wq); @@ -786,11 +768,6 @@ EXPORT_SYMBOL(fdp_nci_probe); void fdp_nci_remove(struct nci_dev *ndev) { - struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); - nci_unregister_device(ndev); nci_free_device(ndev); } diff --git a/drivers/nfc/fdp/fdp.h b/drivers/nfc/fdp/fdp.h index 9bd1f3f23e2d1..ead3b21ccae68 100644 --- a/drivers/nfc/fdp/fdp.h +++ b/drivers/nfc/fdp/fdp.h @@ -25,6 +25,5 @@ int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops, struct nci_dev **ndev, int tx_headroom, int tx_tailroom, u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg); void fdp_nci_remove(struct nci_dev *ndev); -int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); #endif /* __LOCAL_FDP_H_ */ diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index ad0abb1f0bae9..5e300788be525 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -49,7 +49,6 @@ static int fdp_nci_i2c_enable(void *phy_id) { struct fdp_i2c_phy *phy = phy_id; - dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__); fdp_nci_i2c_reset(phy); return 0; @@ -59,7 +58,6 @@ static void fdp_nci_i2c_disable(void *phy_id) { struct fdp_i2c_phy *phy = phy_id; - dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__); fdp_nci_i2c_reset(phy); } @@ -197,7 +195,6 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb) static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) { struct fdp_i2c_phy *phy = phy_id; - struct i2c_client *client; struct sk_buff *skb; int r; @@ -206,9 +203,6 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) return IRQ_NONE; } - client = phy->i2c_dev; - dev_dbg(&client->dev, "%s\n", __func__); - r = fdp_nci_i2c_read(phy, &skb); if (r == -EREMOTEIO) @@ -217,7 +211,7 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) return IRQ_HANDLED; if (skb != NULL) - fdp_nci_recv_frame(phy->ndev, skb); + nci_recv_frame(phy->ndev, skb); return IRQ_HANDLED; } @@ -288,8 +282,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client) u32 clock_freq; int r = 0; - dev_dbg(dev, "%s\n", __func__); - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(dev, "No I2C_FUNC_I2C support\n"); return -ENODEV; @@ -351,8 +343,6 @@ static int fdp_nci_i2c_remove(struct i2c_client *client) { struct fdp_i2c_phy *phy = i2c_get_clientdata(client); - dev_dbg(&client->dev, "%s\n", __func__); - fdp_nci_remove(phy->ndev); fdp_nci_i2c_disable(phy); diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c index f81f1cae93243..41f27e1cac20a 100644 --- a/drivers/nfc/nfcmrvl/i2c.c +++ b/drivers/nfc/nfcmrvl/i2c.c @@ -151,10 +151,15 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv, ret = -EREMOTEIO; } else ret = 0; + } + + if (ret) { kfree_skb(skb); + return ret; } - return ret; + consume_skb(skb); + return 0; } static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv, diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c index a0ce95a287c54..b68b315689c3a 100644 --- a/drivers/nfc/nxp-nci/core.c +++ b/drivers/nfc/nxp-nci/core.c @@ -70,22 +70,20 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) struct nxp_nci_info *info = nci_get_drvdata(ndev); int r; - if (!info->phy_ops->write) { - r = -ENOTSUPP; - goto send_exit; - } + if (!info->phy_ops->write) + return -EOPNOTSUPP; - if (info->mode != NXP_NCI_MODE_NCI) { - r = -EINVAL; - goto send_exit; - } + if (info->mode != NXP_NCI_MODE_NCI) + return -EINVAL; r = info->phy_ops->write(info->phy_id, skb); - if (r < 0) + if (r < 0) { kfree_skb(skb); + return r; + } -send_exit: - return r; + consume_skb(skb); + return 0; } static struct nci_ops nxp_nci_ops = { @@ -104,10 +102,8 @@ int nxp_nci_probe(void *phy_id, struct device *pdev, int r; info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL); - if (!info) { - r = -ENOMEM; - goto probe_exit; - } + if (!info) + return -ENOMEM; info->phy_id = phy_id; info->pdev = pdev; @@ -120,31 +116,25 @@ int nxp_nci_probe(void *phy_id, struct device *pdev, if (info->phy_ops->set_mode) { r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); if (r < 0) - goto probe_exit; + return r; } info->mode = NXP_NCI_MODE_COLD; info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS, NXP_NCI_HDR_LEN, 0); - if (!info->ndev) { - r = -ENOMEM; - goto probe_exit; - } + if (!info->ndev) + return -ENOMEM; nci_set_parent_dev(info->ndev, pdev); nci_set_drvdata(info->ndev, info); r = nci_register_device(info->ndev); - if (r < 0) - goto probe_exit_free_nci; + if (r < 0) { + nci_free_device(info->ndev); + return r; + } *ndev = info->ndev; - - goto probe_exit; - -probe_exit_free_nci: - nci_free_device(info->ndev); -probe_exit: return r; } EXPORT_SYMBOL(nxp_nci_probe); diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index 8d7e29d953b7e..87e1296c68381 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -1319,6 +1319,8 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg, if (IS_ERR(resp)) return PTR_ERR(resp); + memset(&nfc_target, 0, sizeof(struct nfc_target)); + rsp = (struct pn533_cmd_jump_dep_response *)resp->data; rc = rsp->status & PN533_CMD_RET_MASK; @@ -1960,6 +1962,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, dev_dbg(dev->dev, "Creating new target\n"); + memset(&nfc_target, 0, sizeof(struct nfc_target)); + nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; nfc_target.nfcid1_len = 10; memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len); diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index 84f2983bf3841..57b07446bb768 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags) return usb_submit_urb(phy->ack_urb, flags); } +struct pn533_out_arg { + struct pn533_usb_phy *phy; + struct completion done; +}; + static int pn533_usb_send_frame(struct pn533 *dev, struct sk_buff *out) { struct pn533_usb_phy *phy = dev->phy; + struct pn533_out_arg arg; + void *cntx; int rc; if (phy->priv == NULL) @@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev, print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); + init_completion(&arg.done); + cntx = phy->out_urb->context; + phy->out_urb->context = &arg; + rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); if (rc) return rc; + wait_for_completion(&arg.done); + phy->out_urb->context = cntx; + if (dev->protocol_type == PN533_PROTO_REQ_RESP) { /* request for response for sent packet directly */ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL); @@ -412,7 +426,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) return arg.rc; } -static void pn533_send_complete(struct urb *urb) +static void pn533_out_complete(struct urb *urb) +{ + struct pn533_out_arg *arg = urb->context; + struct pn533_usb_phy *phy = arg->phy; + + switch (urb->status) { + case 0: + break; /* success */ + case -ECONNRESET: + case -ENOENT: + dev_dbg(&phy->udev->dev, + "The urb has been stopped (status %d)\n", + urb->status); + break; + case -ESHUTDOWN: + default: + nfc_err(&phy->udev->dev, + "Urb failure (status %d)\n", + urb->status); + } + + complete(&arg->done); +} + +static void pn533_ack_complete(struct urb *urb) { struct pn533_usb_phy *phy = urb->context; @@ -500,10 +538,10 @@ static int pn533_usb_probe(struct usb_interface *interface, usb_fill_bulk_urb(phy->out_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), - NULL, 0, pn533_send_complete, phy); + NULL, 0, pn533_out_complete, phy); usb_fill_bulk_urb(phy->ack_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), - NULL, 0, pn533_send_complete, phy); + NULL, 0, pn533_ack_complete, phy); switch (id->driver_info) { case PN533_DEVICE_STD: diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index ba6c486d64659..9b43cd3a45afc 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -97,11 +97,15 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb) } ret = s3fwrn5_write(info, skb); - if (ret < 0) + if (ret < 0) { kfree_skb(skb); + mutex_unlock(&info->mutex); + return ret; + } + consume_skb(skb); mutex_unlock(&info->mutex); - return ret; + return 0; } static int s3fwrn5_nci_post_setup(struct nci_dev *ndev) diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index 807eae04c1e34..37d397aae9b9d 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -327,7 +327,7 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, * AID 81 5 to 16 * PARAMETERS 82 0 to 255 */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && + if (skb->len < NFC_MIN_AID_LENGTH + 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; @@ -340,8 +340,10 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, /* Check next byte is PARAMETERS tag (82) */ if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + NFC_EVT_TRANSACTION_PARAMS_TAG) { + devm_kfree(dev, transaction); return -EPROTO; + } transaction->params_len = skb->data[transaction->aid_len + 3]; memcpy(transaction->params, skb->data + diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ab060b4911ffd..e162f1dfbafe9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2285,18 +2285,21 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, enum pr_type type, bool abort) { u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); + return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); } static int nvme_pr_clear(struct block_device *bdev, u64 key) { - u32 cdw10 = 1 | (key ? 1 << 3 : 0); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); + u32 cdw10 = 1 | (key ? 0 : 1 << 3); + + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); + u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } @@ -2946,7 +2949,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) nvme_init_subnqn(subsys, ctrl, id); memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); memcpy(subsys->model, id->mn, sizeof(subsys->model)); - memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); subsys->vendor_id = le16_to_cpu(id->vid); subsys->cmic = id->cmic; subsys->awupf = le16_to_cpu(id->awupf); @@ -3090,10 +3092,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (!ctrl->identified) { int i; - ret = nvme_init_subsystem(ctrl, id); - if (ret) - goto out_free; - /* * Check for quirks. Quirk can depend on firmware version, * so, in principle, the set of quirks present can change @@ -3106,7 +3104,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (quirk_matches(id, &core_quirks[i])) ctrl->quirks |= core_quirks[i].quirks; } + + ret = nvme_init_subsystem(ctrl, id); + if (ret) + goto out_free; } + memcpy(ctrl->subsys->firmware_rev, id->fr, + sizeof(ctrl->subsys->firmware_rev)); if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); @@ -3228,8 +3232,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) return ret; if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { + /* + * Do not return errors unless we are in a controller reset, + * the controller works perfectly fine without hwmon. + */ ret = nvme_hwmon_init(ctrl); - if (ret < 0) + if (ret == -EINTR) return ret; } @@ -3322,11 +3330,17 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, case NVME_IOCTL_IO_CMD: return nvme_dev_user_cmd(ctrl, argp); case NVME_IOCTL_RESET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; dev_warn(ctrl->device, "resetting controller\n"); return nvme_reset_ctrl_sync(ctrl); case NVME_IOCTL_SUBSYS_RESET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; return nvme_reset_subsystem(ctrl); case NVME_IOCTL_RESCAN: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; nvme_queue_scan(ctrl); return 0; default: @@ -4481,6 +4495,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { + nvme_hwmon_exit(ctrl); nvme_fault_inject_fini(&ctrl->fault_inject); dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 552dbc04567bc..9e6e56c20ec99 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -12,7 +12,7 @@ struct nvme_hwmon_data { struct nvme_ctrl *ctrl; - struct nvme_smart_log log; + struct nvme_smart_log *log; struct mutex read_lock; }; @@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) { return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, - NVME_CSI_NVM, &data->log, sizeof(data->log), 0); + NVME_CSI_NVM, data->log, sizeof(*data->log), 0); } static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct nvme_hwmon_data *data = dev_get_drvdata(dev); - struct nvme_smart_log *log = &data->log; + struct nvme_smart_log *log = data->log; int temp; int err; @@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || - (channel && data->log.temp_sensor[channel - 1])) { + (channel && data->log->temp_sensor[channel - 1])) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; @@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, break; case hwmon_temp_input: case hwmon_temp_label: - if (!channel || data->log.temp_sensor[channel - 1]) + if (!channel || data->log->temp_sensor[channel - 1]) return 0444; break; default: @@ -223,33 +223,57 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = { int nvme_hwmon_init(struct nvme_ctrl *ctrl) { - struct device *dev = ctrl->dev; + struct device *dev = ctrl->device; struct nvme_hwmon_data *data; struct device *hwmon; int err; - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) - return 0; + return -ENOMEM; + + data->log = kzalloc(sizeof(*data->log), GFP_KERNEL); + if (!data->log) { + err = -ENOMEM; + goto err_free_data; + } data->ctrl = ctrl; mutex_init(&data->read_lock); err = nvme_hwmon_get_smart_log(data); if (err) { - dev_warn(ctrl->device, - "Failed to read smart log (error %d)\n", err); - devm_kfree(dev, data); - return err; + dev_warn(dev, "Failed to read smart log (error %d)\n", err); + goto err_free_log; } - hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data, - &nvme_hwmon_chip_info, - NULL); + hwmon = hwmon_device_register_with_info(dev, "nvme", + data, &nvme_hwmon_chip_info, + NULL); if (IS_ERR(hwmon)) { dev_warn(dev, "Failed to instantiate hwmon device\n"); - devm_kfree(dev, data); + err = PTR_ERR(hwmon); + goto err_free_log; } - + ctrl->hwmon_device = hwmon; return 0; + +err_free_log: + kfree(data->log); +err_free_data: + kfree(data); + return err; +} + +void nvme_hwmon_exit(struct nvme_ctrl *ctrl) +{ + if (ctrl->hwmon_device) { + struct nvme_hwmon_data *data = + dev_get_drvdata(ctrl->hwmon_device); + + hwmon_device_unregister(ctrl->hwmon_device); + ctrl->hwmon_device = NULL; + kfree(data->log); + kfree(data); + } } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 58cf9e39d613e..c3e4d9b6f9c0d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -257,6 +257,9 @@ struct nvme_ctrl { struct rw_semaphore namespaces_rwsem; struct device ctrl_device; struct device *device; /* char device */ +#ifdef CONFIG_NVME_HWMON + struct device *hwmon_device; +#endif struct cdev cdev; struct work_struct reset_work; struct work_struct delete_work; @@ -541,11 +544,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) static inline void nvme_should_fail(struct request *req) {} #endif +bool nvme_wait_reset(struct nvme_ctrl *ctrl); +int nvme_try_sched_reset(struct nvme_ctrl *ctrl); + static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) { + int ret; + if (!ctrl->subsystem) return -ENOTTY; - return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); + if (!nvme_wait_reset(ctrl)) + return -EBUSY; + + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); + if (ret) + return ret; + + return nvme_try_sched_reset(ctrl); } /* @@ -632,7 +647,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl); void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state); -bool nvme_wait_reset(struct nvme_ctrl *ctrl); int nvme_disable_ctrl(struct nvme_ctrl *ctrl); int nvme_enable_ctrl(struct nvme_ctrl *ctrl); int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); @@ -685,7 +699,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); -int nvme_try_sched_reset(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, @@ -736,7 +749,7 @@ static inline void nvme_trace_bio_complete(struct request *req, { struct nvme_ns *ns = req->q->queuedata; - if (req->cmd_flags & REQ_NVME_MPATH) + if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) trace_block_bio_complete(ns->head->disk->queue, req->bio); } @@ -876,11 +889,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) #ifdef CONFIG_NVME_HWMON int nvme_hwmon_init(struct nvme_ctrl *ctrl); +void nvme_hwmon_exit(struct nvme_ctrl *ctrl); #else static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) { return 0; } + +static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) +{ +} #endif u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ce129655ef0a3..c47512da9872a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -33,7 +33,7 @@ #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) -#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) /* * These can be higher, but we need to ensure that any command doesn't @@ -139,9 +139,9 @@ struct nvme_dev { mempool_t *iod_mempool; /* shadow doorbell buffer support: */ - u32 *dbbuf_dbs; + __le32 *dbbuf_dbs; dma_addr_t dbbuf_dbs_dma_addr; - u32 *dbbuf_eis; + __le32 *dbbuf_eis; dma_addr_t dbbuf_eis_dma_addr; /* host memory buffer support: */ @@ -209,10 +209,10 @@ struct nvme_queue { #define NVMEQ_SQ_CMB 1 #define NVMEQ_DELETE_ERROR 2 #define NVMEQ_POLLED 3 - u32 *dbbuf_sq_db; - u32 *dbbuf_cq_db; - u32 *dbbuf_sq_ei; - u32 *dbbuf_cq_ei; + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; struct completion delete_done; }; @@ -334,11 +334,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) } /* Update dbbuf and return true if an MMIO is required */ -static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, - volatile u32 *dbbuf_ei) +static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, + volatile __le32 *dbbuf_ei) { if (dbbuf_db) { - u16 old_value; + u16 old_value, event_idx; /* * Ensure that the queue is written before updating @@ -346,8 +346,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ wmb(); - old_value = *dbbuf_db; - *dbbuf_db = value; + old_value = le32_to_cpu(*dbbuf_db); + *dbbuf_db = cpu_to_le32(value); /* * Ensure that the doorbell is updated before reading the event @@ -357,7 +357,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ mb(); - if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) + event_idx = le32_to_cpu(*dbbuf_ei); + if (!nvme_dbbuf_need_event(event_idx, value, old_value)) return false; } @@ -371,9 +372,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ static int nvme_pci_npages_prp(void) { - unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, - NVME_CTRL_PAGE_SIZE); - return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); + unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; + unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); + return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); } /* @@ -383,7 +384,7 @@ static int nvme_pci_npages_prp(void) static int nvme_pci_npages_sgl(void) { return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), - PAGE_SIZE); + NVME_CTRL_PAGE_SIZE); } static size_t nvme_pci_iod_alloc_size(void) @@ -734,7 +735,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, sge->length = cpu_to_le32(entries * sizeof(*sge)); sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } else { - sge->length = cpu_to_le32(PAGE_SIZE); + sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE); sge->type = NVME_SGL_FMT_SEG_DESC << 4; } } @@ -817,6 +818,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); if (bv->bv_len > first_prp_len) cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); + else + cmnd->dptr.prp2 = 0; return BLK_STS_OK; } @@ -1289,7 +1292,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) else nvme_poll_irqdisable(nvmeq); - if (blk_mq_request_completed(req)) { + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, completion polled\n", req->tag, nvmeq->qid); @@ -2624,6 +2627,8 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out_unlock; + dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); + /* * Limit the max command size to prevent iod->sg allocations going * over a single page. @@ -2636,7 +2641,6 @@ static void nvme_reset_work(struct work_struct *work) * Don't limit the IOMMU merged segment size. */ dma_set_max_seg_size(dev->dev, 0xffffffff); - dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); mutex_unlock(&dev->shutdown_lock); @@ -3251,6 +3255,10 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, + { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index fe8c27bbc3f20..57df87def8c33 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -118,7 +118,6 @@ struct nvme_tcp_queue { struct mutex send_mutex; struct llist_head req_list; struct list_head send_list; - bool more_requests; /* recv state */ void *pdu; @@ -314,7 +313,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) { return !list_empty(&queue->send_list) || - !llist_empty(&queue->req_list) || queue->more_requests; + !llist_empty(&queue->req_list); } static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, @@ -333,9 +332,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, */ if (queue->io_cpu == raw_smp_processor_id() && sync && empty && mutex_trylock(&queue->send_mutex)) { - queue->more_requests = !last; nvme_tcp_send_all(queue); - queue->more_requests = false; mutex_unlock(&queue->send_mutex); } @@ -1196,7 +1193,7 @@ static void nvme_tcp_io_work(struct work_struct *w) else if (unlikely(result < 0)) return; - if (!pending) + if (!pending || !queue->rd_enabled) return; } while (!time_after(jiffies, deadline)); /* quota is exhausted */ diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 9a8fa2e582d5b..bc88ff2912f56 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -730,6 +730,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { + struct nvmet_ns *ns = req->ns; + if (!req->sq->sqhd_disabled) nvmet_update_sq_head(req); req->cqe->sq_id = cpu_to_le16(req->sq->qid); @@ -740,9 +742,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) trace_nvmet_req_complete(req); - if (req->ns) - nvmet_put_namespace(req->ns); req->ops->queue_response(req); + if (ns) + nvmet_put_namespace(ns); } void nvmet_req_complete(struct nvmet_req *req, u16 status) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 640031cbda7cc..46fc44ce86712 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1675,8 +1675,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, else { queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, be16_to_cpu(rqst->assoc_cmd.sqsize)); - if (!queue) + if (!queue) { ret = VERR_QUEUE_ALLOC_FAIL; + nvmet_fc_tgt_a_put(iod->assoc); + } } } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index d24251ece5023..f76d01028df0b 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -259,14 +259,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) } /* - * If there are effects for the command we are about to execute, or - * an end_req function we need to use nvme_execute_passthru_rq() - * synchronously in a work item seeing the end_req function and - * nvme_passthru_end() can't be called in the request done callback - * which is typically in interrupt context. + * If a command needs post-execution fixups, or there are any + * non-trivial effects, make sure to execute the command synchronously + * in a workqueue so that nvme_passthru_end gets called. */ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); - if (req->p.use_workqueue || effects) { + if (req->p.use_workqueue || + (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) { INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); req->p.rq = rq; schedule_work(&req->p.work); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index d030d5e69dc50..2ddbd4f4f6281 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -922,10 +922,17 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; - if (likely(queue->nr_cmds)) + if (likely(queue->nr_cmds)) { + if (unlikely(data->ttag >= queue->nr_cmds)) { + pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", + queue->idx, data->ttag, queue->nr_cmds); + nvmet_tcp_fatal_error(queue); + return -EPROTO; + } cmd = &queue->cmds[data->ttag]; - else + } else { cmd = &queue->connect; + } if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { pr_err("ttag %u unexpected data offset %u (expected %u)\n", @@ -1471,6 +1478,9 @@ static void nvmet_tcp_state_change(struct sock *sk) goto done; switch (sk->sk_state) { + case TCP_FIN_WAIT2: + case TCP_LAST_ACK: + break; case TCP_FIN_WAIT1: case TCP_CLOSE_WAIT: case TCP_CLOSE: diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 21d89d80d0838..1505c745154e7 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -625,31 +625,32 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) return ERR_PTR(rval); } - if (config->wp_gpio) - nvmem->wp_gpio = config->wp_gpio; - else + nvmem->id = rval; + + nvmem->dev.type = &nvmem_provider_type; + nvmem->dev.bus = &nvmem_bus_type; + nvmem->dev.parent = config->dev; + + device_initialize(&nvmem->dev); + + if (!config->ignore_wp) nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", GPIOD_OUT_HIGH); if (IS_ERR(nvmem->wp_gpio)) { - ida_free(&nvmem_ida, nvmem->id); rval = PTR_ERR(nvmem->wp_gpio); - kfree(nvmem); - return ERR_PTR(rval); + nvmem->wp_gpio = NULL; + goto err_put_device; } kref_init(&nvmem->refcnt); INIT_LIST_HEAD(&nvmem->cells); - nvmem->id = rval; nvmem->owner = config->owner; if (!nvmem->owner && config->dev->driver) nvmem->owner = config->dev->driver->owner; nvmem->stride = config->stride ?: 1; nvmem->word_size = config->word_size ?: 1; nvmem->size = config->size; - nvmem->dev.type = &nvmem_provider_type; - nvmem->dev.bus = &nvmem_bus_type; - nvmem->dev.parent = config->dev; nvmem->root_only = config->root_only; nvmem->priv = config->priv; nvmem->type = config->type; @@ -660,18 +661,21 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) switch (config->id) { case NVMEM_DEVID_NONE: - dev_set_name(&nvmem->dev, "%s", config->name); + rval = dev_set_name(&nvmem->dev, "%s", config->name); break; case NVMEM_DEVID_AUTO: - dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); + rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); break; default: - dev_set_name(&nvmem->dev, "%s%d", + rval = dev_set_name(&nvmem->dev, "%s%d", config->name ? : "nvmem", config->name ? config->id : nvmem->id); break; } + if (rval) + goto err_put_device; + nvmem->read_only = device_property_present(config->dev, "read-only") || config->read_only || !nvmem->reg_write; @@ -679,22 +683,16 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) nvmem->dev.groups = nvmem_dev_groups; #endif - dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); - - rval = device_register(&nvmem->dev); - if (rval) - goto err_put_device; - if (config->compat) { rval = nvmem_sysfs_setup_compat(nvmem, config); if (rval) - goto err_device_del; + goto err_put_device; } if (config->cells) { rval = nvmem_add_cells(nvmem, config->cells, config->ncells); if (rval) - goto err_teardown_compat; + goto err_remove_cells; } rval = nvmem_add_cells_from_table(nvmem); @@ -705,17 +703,20 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (rval) goto err_remove_cells; + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); + + rval = device_add(&nvmem->dev); + if (rval) + goto err_remove_cells; + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); return nvmem; err_remove_cells: nvmem_device_remove_all_cells(nvmem); -err_teardown_compat: if (config->compat) nvmem_sysfs_remove_compat(nvmem, config); -err_device_del: - device_del(&nvmem->dev); err_put_device: put_device(&nvmem->dev); diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c index f6e9f96933ca2..1549bfcc4c2d9 100644 --- a/drivers/nvmem/qcom-spmi-sdam.c +++ b/drivers/nvmem/qcom-spmi-sdam.c @@ -166,6 +166,7 @@ static const struct of_device_id sdam_match_table[] = { { .compatible = "qcom,spmi-sdam" }, {}, }; +MODULE_DEVICE_TABLE(of, sdam_match_table); static struct platform_driver sdam_driver = { .driver = { diff --git a/drivers/of/address.c b/drivers/of/address.c index 73ddf2540f3fe..f686fb5011b87 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -990,8 +990,19 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map) } of_dma_range_parser_init(&parser, node); - for_each_of_range(&parser, &range) + for_each_of_range(&parser, &range) { + if (range.cpu_addr == OF_BAD_ADDR) { + pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n", + range.bus_addr, node); + continue; + } num_ranges++; + } + + if (!num_ranges) { + ret = -EINVAL; + goto out; + } r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL); if (!r) { @@ -1000,18 +1011,16 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map) } /* - * Record all info in the generic DMA ranges array for struct device. + * Record all info in the generic DMA ranges array for struct device, + * returning an error if we don't find any parsable ranges. */ *map = r; of_dma_range_parser_init(&parser, node); for_each_of_range(&parser, &range) { pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", range.bus_addr, range.cpu_addr, range.size); - if (range.cpu_addr == OF_BAD_ADDR) { - pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n", - range.bus_addr, node); + if (range.cpu_addr == OF_BAD_ADDR) continue; - } r->cpu_start = range.cpu_addr; r->dma_start = range.bus_addr; r->size = range.size; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 57ff31b6b1e47..5a1b8688b4605 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -315,7 +315,7 @@ static int unflatten_dt_nodes(const void *blob, for (offset = 0; offset >= 0 && depth >= initial_depth; offset = fdt_next_node(blob, offset, &depth)) { - if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) + if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1)) continue; if (!IS_ENABLED(CONFIG_OF_KOBJ) && diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index c8a0c0e9dec1c..67b404f36e79c 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -547,7 +547,7 @@ static int find_dup_cset_node_entry(struct overlay_changeset *ovcs, fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); - node_path_match = !strcmp(fn_1, fn_2); + node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2); kfree(fn_1); kfree(fn_2); if (node_path_match) { @@ -582,7 +582,7 @@ static int find_dup_cset_prop(struct overlay_changeset *ovcs, fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); - node_path_match = !strcmp(fn_1, fn_2); + node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2); kfree(fn_1); kfree(fn_2); if (node_path_match && diff --git a/drivers/of/property.c b/drivers/of/property.c index 1d7d24e7094b0..8f998351bf4fb 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -956,8 +956,10 @@ of_fwnode_get_reference_args(const struct fwnode_handle *fwnode, nargs, index, &of_args); if (ret < 0) return ret; - if (!args) + if (!args) { + of_node_put(of_args.np); return 0; + } args->nargs = of_args.args_count; args->fwnode = of_fwnode_handle(of_args.np); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index b916fab9b1618..be81b765858be 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr) } } -static void __init ccio_init_resources(struct ioc *ioc) +static int __init ccio_init_resources(struct ioc *ioc) { struct resource *res = ioc->mmio_region; char *name = kmalloc(14, GFP_KERNEL); - + if (unlikely(!name)) + return -ENOMEM; snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path); ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low); ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv); + return 0; } static int new_ioc_area(struct resource *res, unsigned long size, @@ -1543,7 +1545,11 @@ static int __init ccio_probe(struct parisc_device *dev) return -ENOMEM; } ccio_ioc_init(ioc); - ccio_init_resources(ioc); + if (ccio_init_resources(ioc)) { + iounmap(ioc->ioc_regs); + kfree(ioc); + return -ENOMEM; + } hppa_dma_ops = &ccio_ops; hba = kzalloc(sizeof(*hba), GFP_KERNEL); diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 8a3b0c3a1e92b..fd99735dca3e6 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -875,6 +875,7 @@ int iosapic_serial_irq(struct parisc_device *dev) return vi->txn_irq; } +EXPORT_SYMBOL(iosapic_serial_irq); #endif diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 36c6613f7a36b..4854120fc0954 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -137,6 +137,9 @@ static int start_task(void) /* Create the work queue and queue the LED task */ led_wq = create_singlethread_workqueue("led_wq"); + if (!led_wq) + return -ENOMEM; + queue_delayed_work(led_wq, &led_task, 0); return 0; diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index eda4ded4d5e52..925be41eeebec 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -468,7 +468,7 @@ static size_t parport_pc_fifo_write_block_pio(struct parport *port, const unsigned char *bufp = buf; size_t left = length; unsigned long expire = jiffies + port->physport->cad->timeout; - const int fifo = FIFO(port); + const unsigned long fifo = FIFO(port); int poll_for = 8; /* 80 usecs */ const struct parport_pc_private *priv = port->physport->private_data; const int fifo_depth = priv->fifo_depth; diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 2b74ff88c5c56..28945351da14f 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -589,7 +589,7 @@ void dw_pcie_setup(struct dw_pcie *pci) if (pci->n_fts[1]) { val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); val &= ~PORT_LOGIC_N_FTS_MASK; - val |= pci->n_fts[pci->link_gen - 1]; + val |= pci->n_fts[1]; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); } diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index ddfeca9016a02..ef52f5097eb37 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -870,7 +870,7 @@ static int pci_epf_test_bind(struct pci_epf *epf) if (ret) epf_test->dma_supported = false; - if (linkup_notifier) { + if (linkup_notifier || core_init_notifier) { epf->nb.notifier_call = pci_epf_test_notifier; pci_epc_register_notifier(epc, &epf->nb); } else { diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index 12ecd0aaa28d6..0050e8f6814ed 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -44,6 +44,8 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, va_start(ap, fmt); devname = kvasprintf(GFP_KERNEL, fmt, ap); va_end(ap); + if (!devname) + return -ENOMEM; ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn, irqflags, devname, dev_id); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index d15c881e2e7e7..361de072dfdca 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1141,11 +1141,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) sysfs_bin_attr_init(res_attr); if (write_combine) { - pdev->res_attr_wc[num] = res_attr; sprintf(res_attr_name, "resource%d_wc", num); res_attr->mmap = pci_mmap_resource_wc; } else { - pdev->res_attr[num] = res_attr; sprintf(res_attr_name, "resource%d", num); if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { res_attr->read = pci_read_resource_io; @@ -1161,10 +1159,17 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) res_attr->size = pci_resource_len(pdev, num); res_attr->private = (void *)(unsigned long)num; retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); - if (retval) + if (retval) { kfree(res_attr); + return retval; + } - return retval; + if (write_combine) + pdev->res_attr_wc[num] = res_attr; + else + pdev->res_attr[num] = res_attr; + + return 0; } /** diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 1162734546481..262577c81d307 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -6152,6 +6152,8 @@ bool pci_device_is_present(struct pci_dev *pdev) { u32 v; + /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ + pdev = pci_physfn(pdev); if (pci_dev_is_disconnected(pdev)) return false; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7f1acb3918d0c..875d50c16f19d 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -210,6 +210,17 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, root = pci_find_parent_resource(dev, res); if (!root) { + /* + * If dev is behind a bridge, accesses will only reach it + * if res is inside the relevant bridge window. + */ + if (pci_upstream_bridge(dev)) + return -ENXIO; + + /* + * On the root bus, assume the host bridge will forward + * everything. + */ if (res->flags & IORESOURCE_IO) root = &ioport_resource; else diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 98e68ed7db85f..1db8eccc9735c 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -866,7 +866,11 @@ static int __init dsu_pmu_init(void) if (ret < 0) return ret; dsu_pmu_cpuhp_state = ret; - return platform_driver_register(&dsu_pmu_driver); + ret = platform_driver_register(&dsu_pmu_driver); + if (ret) + cpuhp_remove_multi_state(dsu_pmu_cpuhp_state); + + return ret; } static void __exit dsu_pmu_exit(void) diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index ef9676418c9f4..2e1f3680d8466 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) if (num_irqs == 1) { int irq = platform_get_irq(pdev, 0); - if (irq && irq_is_percpu_devid(irq)) + if ((irq > 0) && irq_is_percpu_devid(irq)) return pmu_parse_percpu_irq(pmu, irq); } diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index afa8efbdad8fa..f5a33dbe7acb9 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -870,6 +870,8 @@ static struct platform_driver smmu_pmu_driver = { static int __init arm_smmu_pmu_init(void) { + int ret; + cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/arm/pmcg:online", NULL, @@ -877,7 +879,11 @@ static int __init arm_smmu_pmu_init(void) if (cpuhp_state_num < 0) return cpuhp_state_num; - return platform_driver_register(&smmu_pmu_driver); + ret = platform_driver_register(&smmu_pmu_driver); + if (ret) + cpuhp_remove_multi_state(cpuhp_state_num); + + return ret; } module_init(arm_smmu_pmu_init); diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index b901a0d4e2a80..cd2240ea2c9a8 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -101,9 +101,9 @@ static int brcm_pm_notifier(struct notifier_block *notifier, static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { - struct phy *gphy = dev_id; + struct device *dev = dev_id; - pm_wakeup_event(&gphy->dev, 0); + pm_wakeup_event(dev, 0); return IRQ_HANDLED; } @@ -437,7 +437,7 @@ static int brcm_usb_phy_dvr_init(struct platform_device *pdev, if (priv->wake_irq >= 0) { err = devm_request_irq(dev, priv->wake_irq, brcm_usb_phy_wake_isr, 0, - dev_name(dev), gphy); + dev_name(dev), dev); if (err < 0) return err; device_set_wakeup_capable(dev, 1); diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c index 04d18d52f700d..d4741c2dbbb56 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c @@ -54,8 +54,10 @@ static int qcom_usb_hsic_phy_power_on(struct phy *phy) /* Configure pins for HSIC functionality */ pins_default = pinctrl_lookup_state(uphy->pctl, PINCTRL_STATE_DEFAULT); - if (IS_ERR(pins_default)) - return PTR_ERR(pins_default); + if (IS_ERR(pins_default)) { + ret = PTR_ERR(pins_default); + goto err_ulpi; + } ret = pinctrl_select_state(uphy->pctl, pins_default); if (ret) diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index 46ebdb1460a3d..cab6a94bf1610 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -467,8 +467,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy) return ret; ret = property_enable(base, &rport->port_cfg->phy_sus, false); - if (ret) + if (ret) { + clk_disable_unprepare(rphy->clk480m); return ret; + } /* waiting for the utmi_clk to become stable */ usleep_range(1500, 2000); diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 2b3639cba51aa..03fc567e9f188 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -393,6 +393,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) ret = of_property_read_u32(child, "reg", &index); if (ret || index > usbphyc->nphys) { dev_err(&phy->dev, "invalid reg property: %d\n", ret); + if (!ret) + ret = -EINVAL; goto put_child; } diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig index 15a3bcf323086..b905902d57508 100644 --- a/drivers/phy/ti/Kconfig +++ b/drivers/phy/ti/Kconfig @@ -23,7 +23,7 @@ config PHY_DM816X_USB config PHY_AM654_SERDES tristate "TI AM654 SERDES support" - depends on OF && ARCH_K3 || COMPILE_TEST + depends on OF && (ARCH_K3 || COMPILE_TEST) depends on COMMON_CLK select GENERIC_PHY select MULTIPLEXER @@ -35,7 +35,7 @@ config PHY_AM654_SERDES config PHY_J721E_WIZ tristate "TI J721E WIZ (SERDES Wrapper) support" - depends on OF && ARCH_K3 || COMPILE_TEST + depends on OF && (ARCH_K3 || COMPILE_TEST) depends on HAS_IOMEM && OF_ADDRESS depends on COMMON_CLK select GENERIC_PHY diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index e792318c38946..d26d859546275 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -121,7 +121,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx, int ret = 0; if (!exprs) - return true; + return -EINVAL; while (*exprs && !ret) { ret = aspeed_sig_expr_disable(ctx, *exprs); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 3fb2387147189..eac55fee5281c 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -220,6 +220,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev) for (state = 0; ; state++) { /* Retrieve the pinctrl-* property */ propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state); + if (!propname) + return -ENOMEM; prop = of_find_property(np, propname, &size); kfree(propname); if (!prop) { diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 4de832ac47d38..2ef9e2d8fd9c5 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -426,9 +426,14 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) writel(value, padcfg0); } +static int __intel_gpio_get_gpio_mode(u32 value) +{ + return (value & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; +} + static int intel_gpio_get_gpio_mode(void __iomem *padcfg0) { - return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; + return __intel_gpio_get_gpio_mode(readl(padcfg0)); } static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) @@ -1601,9 +1606,16 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_ EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data); #ifdef CONFIG_PM_SLEEP +static bool __intel_gpio_is_direct_irq(u32 value) +{ + return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) && + (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO); +} + static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin) { const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin); + u32 value; if (!pd || !intel_pad_usable(pctrl, pin)) return false; @@ -1618,6 +1630,24 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int gpiochip_line_is_irq(&pctrl->chip, intel_pin_to_gpio(pctrl, pin))) return true; + /* + * The firmware on some systems may configure GPIO pins to be + * an interrupt source in so called "direct IRQ" mode. In such + * cases the GPIO controller driver has no idea if those pins + * are being used or not. At the same time, there is a known bug + * in the firmwares that don't restore the pin settings correctly + * after suspend, i.e. by an unknown reason the Rx value becomes + * inverted. + * + * Hence, let's save and restore the pins that are configured + * as GPIOs in the input mode with GPIROUTIOXAPIC bit set. + * + * See https://bugzilla.kernel.org/show_bug.cgi?id=214749. + */ + value = readl(intel_get_padcfg(pctrl, pin, PADCFG0)); + if (__intel_gpio_is_direct_irq(value)) + return true; + return false; } @@ -1745,7 +1775,12 @@ int intel_pinctrl_resume_noirq(struct device *dev) for (i = 0; i < pctrl->soc->npins; i++) { const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; - if (!intel_pinctrl_should_save(pctrl, desc->number)) + if (!(intel_pinctrl_should_save(pctrl, desc->number) || + /* + * If the firmware mangled the register contents too much, + * check the saved value for the Direct IRQ mode. + */ + __intel_gpio_is_direct_irq(pads[i].padcfg0))) continue; intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0); diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index 22736f60c16ca..64a32d3ca4813 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -278,12 +278,15 @@ static struct irq_chip mtk_eint_irq_chip = { static unsigned int mtk_eint_hw_init(struct mtk_eint *eint) { - void __iomem *reg = eint->base + eint->regs->dom_en; + void __iomem *dom_en = eint->base + eint->regs->dom_en; + void __iomem *mask_set = eint->base + eint->regs->mask_set; unsigned int i; for (i = 0; i < eint->hw->ap_num; i += 32) { - writel(0xffffffff, reg); - reg += 4; + writel(0xffffffff, dom_en); + writel(0xffffffff, mask_set); + dom_en += 4; + mask_set += 4; } return 0; diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 42e27dba62e26..762abb0dfebba 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -393,8 +393,10 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, for_each_available_child_of_node(np_config, np) { ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps, type); - if (ret < 0) + if (ret < 0) { + of_node_put(np); goto exit; + } } return 0; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 17aa0d542d925..22e471933b373 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -372,6 +372,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector, if (!pcs->fmask) return 0; function = pinmux_generic_get_function(pctldev, fselector); + if (!function) + return -EINVAL; func = function->data; if (!func) return -EINVAL; @@ -726,7 +728,7 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs) mux_bytes = pcs->width / BITS_PER_BYTE; - if (pcs->bits_per_mux) { + if (pcs->bits_per_mux && pcs->fmask) { pcs->bits_per_pin = fls(pcs->fmask); nr_pins = (pcs->size * BITS_PER_BYTE) / pcs->bits_per_pin; num_pins_in_register = pcs->width / pcs->bits_per_pin; diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c index 21054fcacd345..18088f6f44b23 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c @@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match); static struct platform_driver a100_r_pinctrl_driver = { .probe = a100_r_pinctrl_probe, .driver = { - .name = "sun50iw10p1-r-pinctrl", + .name = "sun50i-a100-r-pinctrl", .of_match_table = a100_r_pinctrl_match, }, }; diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 472a03daa8693..109c191d35cfc 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -718,6 +718,7 @@ static int __init chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop, const struct chromeos_laptop *src) { + struct i2c_peripheral *i2c_peripherals; struct i2c_peripheral *i2c_dev; struct i2c_board_info *info; int i; @@ -726,17 +727,15 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop, if (!src->num_i2c_peripherals) return 0; - cros_laptop->i2c_peripherals = kmemdup(src->i2c_peripherals, - src->num_i2c_peripherals * - sizeof(*src->i2c_peripherals), - GFP_KERNEL); - if (!cros_laptop->i2c_peripherals) + i2c_peripherals = kmemdup(src->i2c_peripherals, + src->num_i2c_peripherals * + sizeof(*src->i2c_peripherals), + GFP_KERNEL); + if (!i2c_peripherals) return -ENOMEM; - cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals; - - for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) { - i2c_dev = &cros_laptop->i2c_peripherals[i]; + for (i = 0; i < src->num_i2c_peripherals; i++) { + i2c_dev = &i2c_peripherals[i]; info = &i2c_dev->board_info; error = chromeos_laptop_setup_irq(i2c_dev); @@ -754,16 +753,19 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop, } } + cros_laptop->i2c_peripherals = i2c_peripherals; + cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals; + return 0; err_out: while (--i >= 0) { - i2c_dev = &cros_laptop->i2c_peripherals[i]; + i2c_dev = &i2c_peripherals[i]; info = &i2c_dev->board_info; if (info->properties) property_entries_free(info->properties); } - kfree(cros_laptop->i2c_peripherals); + kfree(i2c_peripherals); return error; } diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index c4de8c4db1930..5a622666a0755 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -332,10 +332,16 @@ EXPORT_SYMBOL(cros_ec_suspend); static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev) { + bool wake_event; + while (ec_dev->mkbp_event_supported && - cros_ec_get_next_event(ec_dev, NULL, NULL) > 0) + cros_ec_get_next_event(ec_dev, &wake_event, NULL) > 0) { blocking_notifier_call_chain(&ec_dev->event_notifier, 1, ec_dev); + + if (wake_event && device_may_wakeup(ec_dev->dev)) + pm_wakeup_event(ec_dev->dev, 0); + } } /** diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c index fd33de546aee0..0de7c255254e0 100644 --- a/drivers/platform/chrome/cros_ec_chardev.c +++ b/drivers/platform/chrome/cros_ec_chardev.c @@ -327,6 +327,9 @@ static long cros_ec_chardev_ioctl_readmem(struct cros_ec_dev *ec, if (copy_from_user(&s_mem, arg, sizeof(s_mem))) return -EFAULT; + if (s_mem.bytes > sizeof(s_mem.buffer)) + return -EINVAL; + num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes, s_mem.buffer); if (num <= 0) diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 3a2a78ff33304..8ffbf92ec1e07 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -748,6 +748,7 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, u8 event_type; u32 host_event; int ret; + u32 ver_mask; /* * Default value for wake_event. @@ -769,6 +770,37 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, return get_keyboard_state_event(ec_dev); ret = get_next_event(ec_dev); + /* + * -ENOPROTOOPT is returned when EC returns EC_RES_INVALID_VERSION. + * This can occur when EC based device (e.g. Fingerprint MCU) jumps to + * the RO image which doesn't support newer version of the command. In + * this case we will attempt to update maximum supported version of the + * EC_CMD_GET_NEXT_EVENT. + */ + if (ret == -ENOPROTOOPT) { + dev_dbg(ec_dev->dev, + "GET_NEXT_EVENT returned invalid version error.\n"); + ret = cros_ec_get_host_command_version_mask(ec_dev, + EC_CMD_GET_NEXT_EVENT, + &ver_mask); + if (ret < 0 || ver_mask == 0) + /* + * Do not change the MKBP supported version if we can't + * obtain supported version correctly. Please note that + * calling EC_CMD_GET_NEXT_EVENT returned + * EC_RES_INVALID_VERSION which means that the command + * is present. + */ + return -ENOPROTOOPT; + + ec_dev->mkbp_event_supported = fls(ver_mask); + dev_dbg(ec_dev->dev, "MKBP support version changed to %u\n", + ec_dev->mkbp_event_supported - 1); + + /* Try to get next event with new MKBP support version set. */ + ret = get_next_event(ec_dev); + } + if (ret <= 0) return ret; diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c index 7f36142ab12a8..19390147ac9d6 100644 --- a/drivers/platform/chrome/cros_usbpd_notify.c +++ b/drivers/platform/chrome/cros_usbpd_notify.c @@ -284,7 +284,11 @@ static int __init cros_usbpd_notify_init(void) return ret; #ifdef CONFIG_ACPI - platform_driver_register(&cros_usbpd_notify_acpi_driver); + ret = platform_driver_register(&cros_usbpd_notify_acpi_driver); + if (ret) { + platform_driver_unregister(&cros_usbpd_notify_plat_driver); + return ret; + } #endif return 0; } diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 80983f9dfcd55..ebec49957ed09 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -93,6 +93,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ + {KE_KEY, 0x27, {KEY_HELP} }, {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ {KE_IGNORE, 0x41, {KEY_MUTE} }, {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, @@ -106,7 +107,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { {KE_IGNORE, 0x48, {KEY_VOLUMEUP} }, {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} }, {KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} }, - {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} }, + /* + * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event + * with the "Video Bus" input device events. But sometimes it is not + * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that + * udev/hwdb can override it on systems where it is not a dup. + */ + {KE_KEY, 0x61, {KEY_UNKNOWN} }, {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} }, {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} }, {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ @@ -529,6 +536,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = { }, .driver_data = (void *)ACER_CAP_KBD_DOCK, }, + { + .callback = set_force_caps, + .ident = "Acer Aspire Switch V 10 SW5-017", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)ACER_CAP_KBD_DOCK, + }, { .callback = set_force_caps, .ident = "Acer One 10 (S1003)", diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 949ddeb673bc5..74637bd0433e6 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -478,6 +478,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x30, { KEY_VOLUMEUP } }, { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x32, { KEY_MUTE } }, + { KE_KEY, 0x33, { KEY_SCREENLOCK } }, { KE_KEY, 0x35, { KEY_SCREENLOCK } }, { KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, { KE_KEY, 0x41, { KEY_NEXTSONG } }, diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 39e1a6396e08d..db369cf261119 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -1212,6 +1212,8 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus) pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, cpu_to_le32(ports_available)); + pci_dev_put(xhci_pdev); + pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n", orig_ports_available, ports_available); } diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index bbdb3e8608927..6ef327a80ccf4 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -259,6 +259,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = { { KE_KEY, 0x57, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 0x58, { KEY_BRIGHTNESSUP } }, + /*Speaker Mute*/ + { KE_KEY, 0x109, { KEY_MUTE} }, + /* Mic mute */ { KE_KEY, 0x150, { KEY_MICMUTE } }, diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 012639f6d3354..6642d09b17b55 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -63,6 +63,7 @@ enum hp_wmi_event_ids { HPWMI_PEAKSHIFT_PERIOD = 0x0F, HPWMI_BATTERY_CHARGE_PERIOD = 0x10, HPWMI_SANITIZATION_MODE = 0x17, + HPWMI_SMART_EXPERIENCE_APP = 0x21, }; struct bios_args { @@ -632,6 +633,8 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_SANITIZATION_MODE: break; + case HPWMI_SMART_EXPERIENCE_APP: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; @@ -900,8 +903,16 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) wwan_rfkill = NULL; rfkill2_count = 0; - if (hp_wmi_rfkill_setup(device)) - hp_wmi_rfkill2_setup(device); + /* + * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that + * BIOS no longer controls the power for the wireless + * devices. All features supported by this command will no + * longer be supported. + */ + if (!hp_wmi_bios_2009_later()) { + if (hp_wmi_rfkill_setup(device)) + hp_wmi_rfkill2_setup(device); + } thermal_policy_setup(device); diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c index eac3e6b4ea113..935562c870c3d 100644 --- a/drivers/platform/x86/huawei-wmi.c +++ b/drivers/platform/x86/huawei-wmi.c @@ -760,6 +760,9 @@ static int huawei_wmi_input_setup(struct device *dev, const char *guid, struct input_dev **idev) { + acpi_status status; + int err; + *idev = devm_input_allocate_device(dev); if (!*idev) return -ENOMEM; @@ -769,10 +772,19 @@ static int huawei_wmi_input_setup(struct device *dev, (*idev)->id.bustype = BUS_HOST; (*idev)->dev.parent = dev; - return sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL) || - input_register_device(*idev) || - wmi_install_notify_handler(guid, huawei_wmi_input_notify, - *idev); + err = sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL); + if (err) + return err; + + err = input_register_device(*idev); + if (err) + return err; + + status = wmi_install_notify_handler(guid, huawei_wmi_input_notify, *idev); + if (ACPI_FAILURE(status)) + return -EIO; + + return 0; } static void huawei_wmi_input_exit(struct device *dev, const char *guid) diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 8a0cd5bf00657..cebddefba2f42 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -93,6 +93,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), }, }, + { + .ident = "Microsoft Surface Go 3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index 15ca8afdd973d..ddfba38c21044 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -18,6 +18,8 @@ #include #include +#include + static void intel_pmc_core_release(struct device *dev) { kfree(dev); @@ -53,6 +55,13 @@ static int __init pmc_core_platform_init(void) if (acpi_dev_present("INT33A1", NULL, -1)) return -ENODEV; + /* + * Skip forcefully attaching the device for VMs. Make an exception for + * Xen dom0, which does have full hardware access. + */ + if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain()) + return -ENODEV; + if (!x86_match_cpu(intel_pmc_core_platform_ids)) return -ENODEV; diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 69d706039cb20..bdeb888c0fea4 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -583,7 +583,6 @@ __intel_scu_ipc_register(struct device *parent, scu->dev.parent = parent; scu->dev.class = &intel_scu_ipc_class; scu->dev.release = intel_scu_ipc_release; - dev_set_name(&scu->dev, "intel_scu_ipc"); if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem), "intel_scu_ipc")) { @@ -612,6 +611,7 @@ __intel_scu_ipc_register(struct device *parent, * After this point intel_scu_ipc_release() takes care of * releasing the SCU IPC resources once refcount drops to zero. */ + dev_set_name(&scu->dev, "intel_scu_ipc"); err = device_register(&scu->dev); if (err) { put_device(&scu->dev); diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 24ffc8e2d2d1e..0e804b6c2d242 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -596,11 +596,10 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = { { .ident = "MSI S270", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"), DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"), DMI_MATCH(DMI_PRODUCT_VERSION, "0131"), - DMI_MATCH(DMI_CHASSIS_VENDOR, - "MICRO-STAR INT'L CO.,LTD") + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT") }, .driver_data = &quirk_old_ec_model, .callback = dmi_check_cb @@ -633,8 +632,7 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = { DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"), DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"), DMI_MATCH(DMI_PRODUCT_VERSION, "0131"), - DMI_MATCH(DMI_CHASSIS_VENDOR, - "MICRO-STAR INT'L CO.,LTD") + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT") }, .driver_data = &quirk_old_ec_model, .callback = dmi_check_cb @@ -1048,8 +1046,7 @@ static int __init msi_init(void) return -EINVAL; /* Register backlight stuff */ - - if (quirks->old_ec_model || + if (quirks->old_ec_model && acpi_video_get_backlight_type() == acpi_backlight_vendor) { struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); @@ -1117,6 +1114,8 @@ static int __init msi_init(void) fail_create_group: if (quirks->load_scm_model) { i8042_remove_filter(msi_laptop_i8042_filter); + cancel_delayed_work_sync(&msi_touchpad_dwork); + input_unregister_device(msi_laptop_input_dev); cancel_delayed_work_sync(&msi_rfkill_dwork); cancel_work_sync(&msi_rfkill_work); rfkill_cleanup(); @@ -1137,6 +1136,7 @@ static void __exit msi_cleanup(void) { if (quirks->load_scm_model) { i8042_remove_filter(msi_laptop_i8042_filter); + cancel_delayed_work_sync(&msi_touchpad_dwork); input_unregister_device(msi_laptop_input_dev); cancel_delayed_work_sync(&msi_rfkill_dwork); cancel_work_sync(&msi_rfkill_work); diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c index 9a19fbd2f7341..9a457956025a5 100644 --- a/drivers/platform/x86/mxm-wmi.c +++ b/drivers/platform/x86/mxm-wmi.c @@ -35,13 +35,11 @@ int mxm_wmi_call_mxds(int adapter) .xarg = 1, }; struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; - struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; printk("calling mux switch %d\n", adapter); - status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, - &output); + status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL); if (ACPI_FAILURE(status)) return status; @@ -60,13 +58,11 @@ int mxm_wmi_call_mxmx(int adapter) .xarg = 1, }; struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; - struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; printk("calling mux switch %d\n", adapter); - status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, - &output); + status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL); if (ACPI_FAILURE(status)) return status; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e5a1b55334081..f070e4eb74f4a 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1892,14 +1892,21 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd, break; } - ret = sony_call_snc_handle(handle, probe_base, &result); - if (ret) - return ret; + /* + * Only probe if there is a separate probe_base, otherwise the probe call + * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in + * the keyboard backlight being turned off. + */ + if (probe_base) { + ret = sony_call_snc_handle(handle, probe_base, &result); + if (ret) + return ret; - if ((handle == 0x0137 && !(result & 0x02)) || - !(result & 0x01)) { - dprintk("no backlight keyboard found\n"); - return 0; + if ((handle == 0x0137 && !(result & 0x02)) || + !(result & 0x01)) { + dprintk("no backlight keyboard found\n"); + return 0; + } } kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL); diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index ab6a9369649db..b96fbc8dba09d 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -255,6 +255,23 @@ static const struct ts_dmi_data connect_tablet9_data = { .properties = connect_tablet9_props, }; +static const struct property_entry csl_panther_tab_hd_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 20), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1526), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data csl_panther_tab_hd_data = { + .acpi_name = "MSSL1680:00", + .properties = csl_panther_tab_hd_props, +}; + static const struct property_entry cube_iwork8_air_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 1), PROPERTY_ENTRY_U32("touchscreen-min-y", 3), @@ -756,6 +773,22 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, }; +static const struct property_entry rca_cambio_w101_v2_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 4), + PROPERTY_ENTRY_U32("touchscreen-min-y", 20), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1644), + PROPERTY_ENTRY_U32("touchscreen-size-y", 874), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rca_cambio_w101_v2_data = { + .acpi_name = "MSSL1680:00", + .properties = rca_cambio_w101_v2_props, +}; + static const struct property_entry rwc_nanote_p8_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 46), PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), @@ -997,6 +1030,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"), }, }, + { + /* Chuwi Vi8 (CWI501) */ + .driver_data = (void *)&chuwi_vi8_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "i86"), + DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"), + }, + }, { /* Chuwi Vi8 (CWI506) */ .driver_data = (void *)&chuwi_vi8_data, @@ -1041,6 +1083,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"), }, }, + { + /* CSL Panther Tab HD */ + .driver_data = (void *)&csl_panther_tab_hd_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"), + DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"), + }, + }, { /* CUBE iwork8 Air */ .driver_data = (void *)&cube_iwork8_air_data, @@ -1341,6 +1391,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), }, }, + { + /* RCA Cambio W101 v2 */ + /* https://github.com/onitake/gsl-firmware/discussions/193 */ + .driver_data = (void *)&rca_cambio_w101_v2_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RCA"), + DMI_MATCH(DMI_PRODUCT_NAME, "W101SA23T1"), + }, + }, { /* RWC NANOTE P8 */ .driver_data = (void *)&rwc_nanote_p8_data, diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index a50ab002e9e41..14bf75ba941d4 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c @@ -160,14 +160,14 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, dev->dev.coherent_dma_mask = dev->dma_mask; dev->dev.release = &pnp_release_device; - dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number); - dev_id = pnp_add_id(dev, pnpid); if (!dev_id) { kfree(dev); return NULL; } + dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number); + return dev; } diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c index 003557043ab3a..daee1161c3059 100644 --- a/drivers/power/supply/adp5061.c +++ b/drivers/power/supply/adp5061.c @@ -427,11 +427,11 @@ static int adp5061_get_chg_type(struct adp5061_state *st, if (ret < 0) return ret; - chg_type = adp5061_chg_type[ADP5061_CHG_STATUS_1_CHG_STATUS(status1)]; - if (chg_type > ADP5061_CHG_FAST_CV) + chg_type = ADP5061_CHG_STATUS_1_CHG_STATUS(status1); + if (chg_type >= ARRAY_SIZE(adp5061_chg_type)) val->intval = POWER_SUPPLY_STATUS_UNKNOWN; else - val->intval = chg_type; + val->intval = adp5061_chg_type[chg_type]; return ret; } diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 280c54c23e37e..2b644590fa8e0 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -677,6 +677,11 @@ int power_supply_get_battery_info(struct power_supply *psy, int i, tab_len, size; propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index); + if (!propname) { + power_supply_put_battery_info(psy, info); + err = -ENOMEM; + goto out_put_node; + } list = of_get_property(battery_np, propname, &size); if (!list || !size) { dev_err(&psy->dev, "failed to get %s\n", propname); @@ -1201,8 +1206,8 @@ __power_supply_register(struct device *parent, register_cooler_failed: psy_unregister_thermal(psy); register_thermal_failed: - device_del(dev); wakeup_init_failed: + device_del(dev); device_add_failed: check_supplies_failed: dev_set_name_failed: diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 70d6d52bc1e21..285420c1eb7c3 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -938,6 +938,9 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, y = value & 0x1f; value = (1 << y) * (4 + f) * rp->time_unit / 4; } else { + if (value < rp->time_unit) + return 0; + do_div(value, rp->time_unit); y = ilog2(value); f = div64_u64(4 * (value - (1 << y)), 1 << y); @@ -979,7 +982,6 @@ static const struct rapl_defaults rapl_defaults_spr_server = { .check_unit = rapl_check_unit_core, .set_floor_freq = set_floor_freq_default, .compute_time_window = rapl_compute_time_window_core, - .dram_domain_energy_unit = 15300, .psys_domain_energy_unit = 1000000000, }; diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index 9cc0612f08498..12e9e23272ab1 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -217,8 +217,11 @@ static int pwm_sifive_clock_notifier(struct notifier_block *nb, struct pwm_sifive_ddata *ddata = container_of(nb, struct pwm_sifive_ddata, notifier); - if (event == POST_RATE_CHANGE) + if (event == POST_RATE_CHANGE) { + mutex_lock(&ddata->lock); pwm_sifive_update_clock(ddata, ndata->new_rate); + mutex_unlock(&ddata->lock); + } return NOTIFY_OK; } diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 8c4e6657b61e7..f3528c56e8947 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -142,8 +142,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * source clock rate as required_clk_rate, PWM controller will * be able to configure the requested period. */ - required_clk_rate = - (NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH; + required_clk_rate = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC << PWM_DUTY_WIDTH, + period_ns); err = clk_set_rate(pc->clk, required_clk_rate); if (err < 0) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 94331d999d273..5ac2dc1e2abd8 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -1803,8 +1803,11 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); err = rio_add_device(rdev); - if (err) - goto cleanup; + if (err) { + put_device(&rdev->dev); + return err; + } + rio_dev_get(rdev); return 0; @@ -1900,10 +1903,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) priv->md = chdev; - mutex_lock(&chdev->file_mutex); - list_add_tail(&priv->list, &chdev->file_list); - mutex_unlock(&chdev->file_mutex); - INIT_LIST_HEAD(&priv->db_filters); INIT_LIST_HEAD(&priv->pw_filters); spin_lock_init(&priv->fifo_lock); @@ -1912,6 +1911,7 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) sizeof(struct rio_event) * MPORT_EVENT_DEPTH, GFP_KERNEL); if (ret < 0) { + put_device(&chdev->dev); dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); ret = -ENOMEM; goto err_fifo; @@ -1922,6 +1922,9 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) spin_lock_init(&priv->req_lock); mutex_init(&priv->dma_lock); #endif + mutex_lock(&chdev->file_mutex); + list_add_tail(&priv->list, &chdev->file_list); + mutex_unlock(&chdev->file_mutex); filp->private_data = priv; goto out; diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 19b0c33f4a62a..fdcf742b2adbc 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -454,8 +454,12 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, 0, 0xffff); ret = rio_add_device(rdev); - if (ret) - goto cleanup; + if (ret) { + if (rswitch) + kfree(rswitch->route_table); + put_device(&rdev->dev); + return NULL; + } rio_dev_get(rdev); diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 606986c5ba2c9..fcab174e58888 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -2267,11 +2267,16 @@ int rio_register_mport(struct rio_mport *port) atomic_set(&port->state, RIO_DEVICE_RUNNING); res = device_register(&port->dev); - if (res) + if (res) { dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", port->id, res); - else + mutex_lock(&rio_mport_list_lock); + list_del(&port->node); + mutex_unlock(&rio_mport_list_lock); + put_device(&port->dev); + } else { dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id); + } return res; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 6e3f3511e7ddd..6dd698b2d0af3 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -980,7 +980,7 @@ static int drms_uA_update(struct regulator_dev *rdev) /* get input voltage */ input_uV = 0; if (rdev->supply) - input_uV = regulator_get_voltage(rdev->supply); + input_uV = regulator_get_voltage_rdev(rdev->supply->rdev); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) { @@ -1428,7 +1428,13 @@ static int set_machine_constraints(struct regulator_dev *rdev) if (rdev->supply_name && !rdev->supply) return -EPROBE_DEFER; - if (rdev->supply) { + /* If supplying regulator has already been enabled, + * it's not intended to have use_count increment + * when rdev is only boot-on. + */ + if (rdev->supply && + (rdev->constraints->always_on || + !regulator_is_enabled(rdev->supply))) { ret = regulator_enable(rdev->supply); if (ret < 0) { _regulator_put(rdev->supply); @@ -1472,6 +1478,7 @@ static int set_supply(struct regulator_dev *rdev, rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { + module_put(supply_rdev->owner); err = -ENOMEM; return err; } @@ -1645,7 +1652,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); if (regulator == NULL) { - kfree(supply_name); + kfree_const(supply_name); return NULL; } @@ -1775,6 +1782,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev, node = of_get_regulator(dev, supply); if (node) { r = of_find_regulator_by_node(node); + of_node_put(node); if (r) return r; @@ -2544,7 +2552,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev) * expired, return -ETIMEDOUT. */ if (rdev->desc->poll_enabled_time) { - unsigned int time_remaining = delay; + int time_remaining = delay; while (time_remaining > 0) { _regulator_enable_delay(rdev->desc->poll_enabled_time); @@ -2596,13 +2604,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev) */ static int _regulator_handle_consumer_enable(struct regulator *regulator) { + int ret; struct regulator_dev *rdev = regulator->rdev; lockdep_assert_held_once(&rdev->mutex.base); regulator->enable_count++; - if (regulator->uA_load && regulator->enable_count == 1) - return drms_uA_update(rdev); + if (regulator->uA_load && regulator->enable_count == 1) { + ret = drms_uA_update(rdev); + if (ret) + regulator->enable_count--; + return ret; + } return 0; } @@ -4923,6 +4936,7 @@ static void regulator_dev_release(struct device *dev) { struct regulator_dev *rdev = dev_get_drvdata(dev); + debugfs_remove_recursive(rdev->debugfs); kfree(rdev->constraints); of_node_put(rdev->dev.of_node); kfree(rdev); @@ -5392,15 +5406,20 @@ regulator_register(const struct regulator_desc *regulator_desc, regulator_remove_coupling(rdev); mutex_unlock(®ulator_list_mutex); wash: + regulator_put(rdev->supply); kfree(rdev->coupling_desc.coupled_rdevs); mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(®ulator_list_mutex); + put_device(&rdev->dev); + rdev = NULL; clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); + if (rdev && rdev->dev.of_node) + of_node_put(rdev->dev.of_node); + kfree(rdev); kfree(config); - put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); @@ -5429,7 +5448,6 @@ void regulator_unregister(struct regulator_dev *rdev) mutex_lock(®ulator_list_mutex); - debugfs_remove_recursive(rdev->debugfs); WARN_ON(rdev->open_count); regulator_remove_coupling(rdev); unset_regulator_supplies(rdev); diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index e01b32d1fa17d..00828f5baa972 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -498,6 +498,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c) chip->chip_irq = i2c->irq; + ret = da9211_regulator_init(chip); + if (ret < 0) { + dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret); + return ret; + } + if (chip->chip_irq != 0) { ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL, da9211_irq_handler, @@ -512,11 +518,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c) dev_warn(chip->dev, "No IRQ configured\n"); } - ret = da9211_regulator_init(chip); - - if (ret < 0) - dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret); - return ret; } diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 0a19500d3725e..44a8e500fb304 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -791,7 +791,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client, ((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001")))); memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators, - sizeof(pfuze_chip->regulator_descs)); + regulator_num * sizeof(struct pfuze_regulator)); ret = pfuze_parse_regulators_dt(pfuze_chip); if (ret) diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c index 7f9d66ac37ff8..3c41b71a1f529 100644 --- a/drivers/regulator/qcom_rpm-regulator.c +++ b/drivers/regulator/qcom_rpm-regulator.c @@ -802,6 +802,12 @@ static const struct rpm_regulator_data rpm_pm8018_regulators[] = { }; static const struct rpm_regulator_data rpm_pm8058_regulators[] = { + { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" }, + { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" }, + { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" }, + { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" }, + { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" }, + { "l0", QCOM_RPM_PM8058_LDO0, &pm8058_nldo, "vdd_l0_l1_lvs" }, { "l1", QCOM_RPM_PM8058_LDO1, &pm8058_nldo, "vdd_l0_l1_lvs" }, { "l2", QCOM_RPM_PM8058_LDO2, &pm8058_pldo, "vdd_l2_l11_l12" }, @@ -829,12 +835,6 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = { { "l24", QCOM_RPM_PM8058_LDO24, &pm8058_nldo, "vdd_l23_l24_l25" }, { "l25", QCOM_RPM_PM8058_LDO25, &pm8058_nldo, "vdd_l23_l24_l25" }, - { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" }, - { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" }, - { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" }, - { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" }, - { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" }, - { "lvs0", QCOM_RPM_PM8058_LVS0, &pm8058_switch, "vdd_l0_l1_lvs" }, { "lvs1", QCOM_RPM_PM8058_LVS1, &pm8058_switch, "vdd_l0_l1_lvs" }, @@ -843,6 +843,12 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = { }; static const struct rpm_regulator_data rpm_pm8901_regulators[] = { + { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" }, + { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" }, + { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" }, + { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" }, + { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" }, + { "l0", QCOM_RPM_PM8901_LDO0, &pm8901_nldo, "vdd_l0" }, { "l1", QCOM_RPM_PM8901_LDO1, &pm8901_pldo, "vdd_l1" }, { "l2", QCOM_RPM_PM8901_LDO2, &pm8901_pldo, "vdd_l2" }, @@ -851,12 +857,6 @@ static const struct rpm_regulator_data rpm_pm8901_regulators[] = { { "l5", QCOM_RPM_PM8901_LDO5, &pm8901_pldo, "vdd_l5" }, { "l6", QCOM_RPM_PM8901_LDO6, &pm8901_pldo, "vdd_l6" }, - { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" }, - { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" }, - { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" }, - { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" }, - { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" }, - { "lvs0", QCOM_RPM_PM8901_LVS0, &pm8901_switch, "lvs0_in" }, { "lvs1", QCOM_RPM_PM8901_LVS1, &pm8901_switch, "lvs1_in" }, { "lvs2", QCOM_RPM_PM8901_LVS2, &pm8901_switch, "lvs2_in" }, diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index 75a941fb3c2bd..1b2eee95ad3f9 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -457,6 +457,8 @@ static int slg51000_i2c_probe(struct i2c_client *client) chip->cs_gpiod = cs_gpiod; } + usleep_range(10000, 11000); + i2c_set_clientdata(client, chip); chip->chip_irq = client->irq; chip->dev = dev; diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index 430265c404d65..f3856750944f4 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -67,6 +67,7 @@ struct twlreg_info { #define TWL6030_CFG_STATE_SLEEP 0x03 #define TWL6030_CFG_STATE_GRP_SHIFT 5 #define TWL6030_CFG_STATE_APP_SHIFT 2 +#define TWL6030_CFG_STATE_MASK 0x03 #define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT) #define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\ TWL6030_CFG_STATE_APP_SHIFT) @@ -128,13 +129,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev) if (grp < 0) return grp; grp &= P1_GRP_6030; + val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); + val = TWL6030_CFG_STATE_APP(val); } else { + val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); + val &= TWL6030_CFG_STATE_MASK; grp = 1; } - val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); - val = TWL6030_CFG_STATE_APP(val); - return grp && (val == TWL6030_CFG_STATE_ON); } @@ -187,7 +189,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev) val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); - switch (TWL6030_CFG_STATE_APP(val)) { + if (info->features & TWL6032_SUBCLASS) + val &= TWL6030_CFG_STATE_MASK; + else + val = TWL6030_CFG_STATE_APP(val); + + switch (val) { case TWL6030_CFG_STATE_ON: return REGULATOR_STATUS_NORMAL; @@ -530,6 +537,7 @@ static const struct twlreg_info TWL6030_INFO_##label = { \ #define TWL6032_ADJUSTABLE_LDO(label, offset) \ static const struct twlreg_info TWL6032_INFO_##label = { \ .base = offset, \ + .features = TWL6032_SUBCLASS, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ @@ -562,6 +570,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \ #define TWL6032_ADJUSTABLE_SMPS(label, offset) \ static const struct twlreg_info TWLSMPS_INFO_##label = { \ .base = offset, \ + .features = TWL6032_SUBCLASS, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 0678b417707ef..1a0d6eb9425bb 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -365,6 +365,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; @@ -472,6 +473,7 @@ static int adsp_probe(struct platform_device *pdev) detach_active_pds: adsp_pds_detach(adsp, adsp->active_pds, adsp->active_pd_count); free_rproc: + device_init_wakeup(adsp->dev, false); rproc_free(rproc); return ret; @@ -487,6 +489,8 @@ static int adsp_remove(struct platform_device *pdev) qcom_remove_sysmon_subdev(adsp->sysmon); qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev); qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); + adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + device_init_wakeup(adsp->dev, false); rproc_free(adsp->rproc); return 0; diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c index a26221a6f6c22..c348ea35e47c3 100644 --- a/drivers/remoteproc/qcom_sysmon.c +++ b/drivers/remoteproc/qcom_sysmon.c @@ -625,7 +625,9 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, if (sysmon->shutdown_irq != -ENODATA) { dev_err(sysmon->dev, "failed to retrieve shutdown-ack IRQ\n"); - return ERR_PTR(sysmon->shutdown_irq); + ret = sysmon->shutdown_irq; + kfree(sysmon); + return ERR_PTR(ret); } } else { ret = devm_request_threaded_irq(sysmon->dev, @@ -636,6 +638,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, if (ret) { dev_err(sysmon->dev, "failed to acquire shutdown-ack IRQ\n"); + kfree(sysmon); return ERR_PTR(ret); } } diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 369a97f3eca99..cc55ff0128cf2 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1741,12 +1741,18 @@ static void rproc_crash_handler_work(struct work_struct *work) mutex_lock(&rproc->lock); - if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) { + if (rproc->state == RPROC_CRASHED) { /* handle only the first crash detected */ mutex_unlock(&rproc->lock); return; } + if (rproc->state == RPROC_OFFLINE) { + /* Don't recover if the remote processor was stopped */ + mutex_unlock(&rproc->lock); + goto out; + } + rproc->state = RPROC_CRASHED; dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt, rproc->name); @@ -1756,6 +1762,7 @@ static void rproc_crash_handler_work(struct work_struct *work) if (!rproc->recovery_disabled) rproc_trigger_recovery(rproc); +out: pm_relax(rproc->dev.parent); } diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c index 185a333df66c5..d2408725eb2c3 100644 --- a/drivers/reset/reset-imx7.c +++ b/drivers/reset/reset-imx7.c @@ -329,6 +329,7 @@ static int imx8mp_reset_set(struct reset_controller_dev *rcdev, break; case IMX8MP_RESET_PCIE_CTRL_APPS_EN: + case IMX8MP_RESET_PCIEPHY_PERST: value = assert ? 0 : bit; break; } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 4840886532ff7..7cbed0310c09f 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1472,7 +1472,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) cancel_work_sync(&channel->intent_work); if (channel->rpdev) { - strncpy(chinfo.name, channel->name, sizeof(chinfo.name)); + strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = RPMSG_ADDR_ANY; diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 0b1e853d8c91a..b5167ef93abf9 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1073,7 +1073,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) /* Assign public information to the rpmsg_device */ rpdev = &qsdev->rpdev; - strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE); + strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE); rpdev->src = RPMSG_ADDR_ANY; rpdev->dst = RPMSG_ADDR_ANY; @@ -1304,7 +1304,7 @@ static void qcom_channel_state_worker(struct work_struct *work) spin_unlock_irqrestore(&edge->channels_lock, flags); - strncpy(chinfo.name, channel->name, sizeof(chinfo.name)); + strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = RPMSG_ADDR_ANY; rpmsg_unregister_device(&edge->dev, &chinfo); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 58c6382a2807c..7f560937bf7cb 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -222,6 +222,8 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr) static int cmos_read_time(struct device *dev, struct rtc_time *t) { + int ret; + /* * If pm_trace abused the RTC for storage, set the timespec to 0, * which tells the caller that this RTC value is unusable. @@ -229,29 +231,64 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t) if (!pm_trace_rtc_valid()) return -EIO; - /* REVISIT: if the clock has a "century" register, use - * that instead of the heuristic in mc146818_get_time(). - * That'll make Y3K compatility (year > 2070) easy! - */ - mc146818_get_time(t); + ret = mc146818_get_time(t); + if (ret < 0) { + dev_err_ratelimited(dev, "unable to read current time\n"); + return ret; + } + return 0; } static int cmos_set_time(struct device *dev, struct rtc_time *t) { - /* REVISIT: set the "century" register if available - * - * NOTE: this ignores the issue whereby updating the seconds + /* NOTE: this ignores the issue whereby updating the seconds * takes effect exactly 500ms after we write the register. * (Also queueing and other delays before we get this far.) */ return mc146818_set_time(t); } +struct cmos_read_alarm_callback_param { + struct cmos_rtc *cmos; + struct rtc_time *time; + unsigned char rtc_control; +}; + +static void cmos_read_alarm_callback(unsigned char __always_unused seconds, + void *param_in) +{ + struct cmos_read_alarm_callback_param *p = + (struct cmos_read_alarm_callback_param *)param_in; + struct rtc_time *time = p->time; + + time->tm_sec = CMOS_READ(RTC_SECONDS_ALARM); + time->tm_min = CMOS_READ(RTC_MINUTES_ALARM); + time->tm_hour = CMOS_READ(RTC_HOURS_ALARM); + + if (p->cmos->day_alrm) { + /* ignore upper bits on readback per ACPI spec */ + time->tm_mday = CMOS_READ(p->cmos->day_alrm) & 0x3f; + if (!time->tm_mday) + time->tm_mday = -1; + + if (p->cmos->mon_alrm) { + time->tm_mon = CMOS_READ(p->cmos->mon_alrm); + if (!time->tm_mon) + time->tm_mon = -1; + } + } + + p->rtc_control = CMOS_READ(RTC_CONTROL); +} + static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) { struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char rtc_control; + struct cmos_read_alarm_callback_param p = { + .cmos = cmos, + .time = &t->time, + }; /* This not only a rtc_op, but also called directly */ if (!is_valid_irq(cmos->irq)) @@ -262,28 +299,18 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) * the future. */ - spin_lock_irq(&rtc_lock); - t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM); - t->time.tm_min = CMOS_READ(RTC_MINUTES_ALARM); - t->time.tm_hour = CMOS_READ(RTC_HOURS_ALARM); - - if (cmos->day_alrm) { - /* ignore upper bits on readback per ACPI spec */ - t->time.tm_mday = CMOS_READ(cmos->day_alrm) & 0x3f; - if (!t->time.tm_mday) - t->time.tm_mday = -1; - - if (cmos->mon_alrm) { - t->time.tm_mon = CMOS_READ(cmos->mon_alrm); - if (!t->time.tm_mon) - t->time.tm_mon = -1; - } - } - - rtc_control = CMOS_READ(RTC_CONTROL); - spin_unlock_irq(&rtc_lock); + /* Some Intel chipsets disconnect the alarm registers when the clock + * update is in progress - during this time reads return bogus values + * and writes may fail silently. See for example "7th Generation Intel® + * Processor Family I/O for U/Y Platforms [...] Datasheet", section + * 27.7.1 + * + * Use the mc146818_avoid_UIP() function to avoid this. + */ + if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p)) + return -EIO; - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { if (((unsigned)t->time.tm_sec) < 0x60) t->time.tm_sec = bcd2bin(t->time.tm_sec); else @@ -312,7 +339,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) } } - t->enabled = !!(rtc_control & RTC_AIE); + t->enabled = !!(p.rtc_control & RTC_AIE); t->pending = 0; return 0; @@ -443,10 +470,57 @@ static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } +struct cmos_set_alarm_callback_param { + struct cmos_rtc *cmos; + unsigned char mon, mday, hrs, min, sec; + struct rtc_wkalrm *t; +}; + +/* Note: this function may be executed by mc146818_avoid_UIP() more then + * once + */ +static void cmos_set_alarm_callback(unsigned char __always_unused seconds, + void *param_in) +{ + struct cmos_set_alarm_callback_param *p = + (struct cmos_set_alarm_callback_param *)param_in; + + /* next rtc irq must not be from previous alarm setting */ + cmos_irq_disable(p->cmos, RTC_AIE); + + /* update alarm */ + CMOS_WRITE(p->hrs, RTC_HOURS_ALARM); + CMOS_WRITE(p->min, RTC_MINUTES_ALARM); + CMOS_WRITE(p->sec, RTC_SECONDS_ALARM); + + /* the system may support an "enhanced" alarm */ + if (p->cmos->day_alrm) { + CMOS_WRITE(p->mday, p->cmos->day_alrm); + if (p->cmos->mon_alrm) + CMOS_WRITE(p->mon, p->cmos->mon_alrm); + } + + if (use_hpet_alarm()) { + /* + * FIXME the HPET alarm glue currently ignores day_alrm + * and mon_alrm ... + */ + hpet_set_alarm_time(p->t->time.tm_hour, p->t->time.tm_min, + p->t->time.tm_sec); + } + + if (p->t->enabled) + cmos_irq_enable(p->cmos, RTC_AIE); +} + static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char mon, mday, hrs, min, sec, rtc_control; + struct cmos_set_alarm_callback_param p = { + .cmos = cmos, + .t = t + }; + unsigned char rtc_control; int ret; /* This not only a rtc_op, but also called directly */ @@ -457,11 +531,11 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) if (ret < 0) return ret; - mon = t->time.tm_mon + 1; - mday = t->time.tm_mday; - hrs = t->time.tm_hour; - min = t->time.tm_min; - sec = t->time.tm_sec; + p.mon = t->time.tm_mon + 1; + p.mday = t->time.tm_mday; + p.hrs = t->time.tm_hour; + p.min = t->time.tm_min; + p.sec = t->time.tm_sec; spin_lock_irq(&rtc_lock); rtc_control = CMOS_READ(RTC_CONTROL); @@ -469,43 +543,21 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { /* Writing 0xff means "don't care" or "match all". */ - mon = (mon <= 12) ? bin2bcd(mon) : 0xff; - mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff; - hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff; - min = (min < 60) ? bin2bcd(min) : 0xff; - sec = (sec < 60) ? bin2bcd(sec) : 0xff; - } - - spin_lock_irq(&rtc_lock); - - /* next rtc irq must not be from previous alarm setting */ - cmos_irq_disable(cmos, RTC_AIE); - - /* update alarm */ - CMOS_WRITE(hrs, RTC_HOURS_ALARM); - CMOS_WRITE(min, RTC_MINUTES_ALARM); - CMOS_WRITE(sec, RTC_SECONDS_ALARM); - - /* the system may support an "enhanced" alarm */ - if (cmos->day_alrm) { - CMOS_WRITE(mday, cmos->day_alrm); - if (cmos->mon_alrm) - CMOS_WRITE(mon, cmos->mon_alrm); - } - - if (use_hpet_alarm()) { - /* - * FIXME the HPET alarm glue currently ignores day_alrm - * and mon_alrm ... - */ - hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, - t->time.tm_sec); + p.mon = (p.mon <= 12) ? bin2bcd(p.mon) : 0xff; + p.mday = (p.mday >= 1 && p.mday <= 31) ? bin2bcd(p.mday) : 0xff; + p.hrs = (p.hrs < 24) ? bin2bcd(p.hrs) : 0xff; + p.min = (p.min < 60) ? bin2bcd(p.min) : 0xff; + p.sec = (p.sec < 60) ? bin2bcd(p.sec) : 0xff; } - if (t->enabled) - cmos_irq_enable(cmos, RTC_AIE); - - spin_unlock_irq(&rtc_lock); + /* + * Some Intel chipsets disconnect the alarm registers when the clock + * update is in progress - during this time writes fail silently. + * + * Use mc146818_avoid_UIP() to avoid this. + */ + if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p)) + return -EIO; cmos->alarm_expires = rtc_tm_to_time64(&t->time); @@ -652,11 +704,10 @@ static struct cmos_rtc cmos_rtc; static irqreturn_t cmos_interrupt(int irq, void *p) { - unsigned long flags; u8 irqstat; u8 rtc_control; - spin_lock_irqsave(&rtc_lock, flags); + spin_lock(&rtc_lock); /* When the HPET interrupt handler calls us, the interrupt * status is passed as arg1 instead of the irq number. But @@ -690,7 +741,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); } - spin_unlock_irqrestore(&rtc_lock, flags); + spin_unlock(&rtc_lock); if (is_intr(irqstat)) { rtc_update_irq(p, 1, irqstat); @@ -699,6 +750,168 @@ static irqreturn_t cmos_interrupt(int irq, void *p) return IRQ_NONE; } +#ifdef CONFIG_ACPI + +#include + +static u32 rtc_handler(void *context) +{ + struct device *dev = context; + struct cmos_rtc *cmos = dev_get_drvdata(dev); + unsigned char rtc_control = 0; + unsigned char rtc_intr; + unsigned long flags; + + + /* + * Always update rtc irq when ACPI is used as RTC Alarm. + * Or else, ACPI SCI is enabled during suspend/resume only, + * update rtc irq in that case. + */ + if (cmos_use_acpi_alarm()) + cmos_interrupt(0, (void *)cmos->rtc); + else { + /* Fix me: can we use cmos_interrupt() here as well? */ + spin_lock_irqsave(&rtc_lock, flags); + if (cmos_rtc.suspend_ctrl) + rtc_control = CMOS_READ(RTC_CONTROL); + if (rtc_control & RTC_AIE) { + cmos_rtc.suspend_ctrl &= ~RTC_AIE; + CMOS_WRITE(rtc_control, RTC_CONTROL); + rtc_intr = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, rtc_intr); + } + spin_unlock_irqrestore(&rtc_lock, flags); + } + + pm_wakeup_hard_event(dev); + acpi_clear_event(ACPI_EVENT_RTC); + acpi_disable_event(ACPI_EVENT_RTC, 0); + return ACPI_INTERRUPT_HANDLED; +} + +static void acpi_rtc_event_setup(struct device *dev) +{ + if (acpi_disabled) + return; + + acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); + /* + * After the RTC handler is installed, the Fixed_RTC event should + * be disabled. Only when the RTC alarm is set will it be enabled. + */ + acpi_clear_event(ACPI_EVENT_RTC); + acpi_disable_event(ACPI_EVENT_RTC, 0); +} + +static void acpi_rtc_event_cleanup(void) +{ + if (acpi_disabled) + return; + + acpi_remove_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler); +} + +static void rtc_wake_on(struct device *dev) +{ + acpi_clear_event(ACPI_EVENT_RTC); + acpi_enable_event(ACPI_EVENT_RTC, 0); +} + +static void rtc_wake_off(struct device *dev) +{ + acpi_disable_event(ACPI_EVENT_RTC, 0); +} + +#ifdef CONFIG_X86 +/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */ +static void use_acpi_alarm_quirks(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; + + if (!is_hpet_enabled()) + return; + + if (dmi_get_bios_year() < 2015) + return; + + use_acpi_alarm = true; +} +#else +static inline void use_acpi_alarm_quirks(void) { } +#endif + +static void acpi_cmos_wake_setup(struct device *dev) +{ + if (acpi_disabled) + return; + + use_acpi_alarm_quirks(); + + cmos_rtc.wake_on = rtc_wake_on; + cmos_rtc.wake_off = rtc_wake_off; + + /* ACPI tables bug workaround. */ + if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) { + dev_dbg(dev, "bogus FADT month_alarm (%d)\n", + acpi_gbl_FADT.month_alarm); + acpi_gbl_FADT.month_alarm = 0; + } + + cmos_rtc.day_alrm = acpi_gbl_FADT.day_alarm; + cmos_rtc.mon_alrm = acpi_gbl_FADT.month_alarm; + cmos_rtc.century = acpi_gbl_FADT.century; + + if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE) + dev_info(dev, "RTC can wake from S4\n"); + + /* RTC always wakes from S1/S2/S3, and often S4/STD */ + device_init_wakeup(dev, 1); +} + +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ + struct cmos_rtc *cmos = dev_get_drvdata(dev); + acpi_event_status rtc_status; + acpi_status status; + + if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) + return; + + status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Could not get RTC status\n"); + } else if (rtc_status & ACPI_EVENT_FLAG_SET) { + unsigned char mask; + *rtc_control &= ~RTC_AIE; + CMOS_WRITE(*rtc_control, RTC_CONTROL); + mask = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, mask); + } +} + +#else /* !CONFIG_ACPI */ + +static inline void acpi_rtc_event_setup(struct device *dev) +{ +} + +static inline void acpi_rtc_event_cleanup(void) +{ +} + +static inline void acpi_cmos_wake_setup(struct device *dev) +{ +} + +static inline void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ +} +#endif /* CONFIG_ACPI */ + #ifdef CONFIG_PNP #define INITSECTION @@ -782,19 +995,27 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) if (info->address_space) address_space = info->address_space; - if (info->rtc_day_alarm && info->rtc_day_alarm < 128) - cmos_rtc.day_alrm = info->rtc_day_alarm; - if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128) - cmos_rtc.mon_alrm = info->rtc_mon_alarm; - if (info->rtc_century && info->rtc_century < 128) - cmos_rtc.century = info->rtc_century; + cmos_rtc.day_alrm = info->rtc_day_alarm; + cmos_rtc.mon_alrm = info->rtc_mon_alarm; + cmos_rtc.century = info->rtc_century; if (info->wake_on && info->wake_off) { cmos_rtc.wake_on = info->wake_on; cmos_rtc.wake_off = info->wake_off; } + } else { + acpi_cmos_wake_setup(dev); } + if (cmos_rtc.day_alrm >= 128) + cmos_rtc.day_alrm = 0; + + if (cmos_rtc.mon_alrm >= 128) + cmos_rtc.mon_alrm = 0; + + if (cmos_rtc.century >= 128) + cmos_rtc.century = 0; + cmos_rtc.dev = dev; dev_set_drvdata(dev, &cmos_rtc); @@ -806,6 +1027,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); + if (!mc146818_does_rtc_work()) { + dev_warn(dev, "broken or not accessible\n"); + retval = -ENXIO; + goto cleanup1; + } + spin_lock_irq(&rtc_lock); if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) { @@ -876,6 +1103,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) if (rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg)) dev_err(dev, "nvmem registration failed\n"); + /* + * Everything has gone well so far, so by default register a handler for + * the ACPI RTC fixed event. + */ + if (!info) + acpi_rtc_event_setup(dev); + dev_info(dev, "%s%s, %d bytes nvram%s\n", !is_valid_irq(rtc_irq) ? "no alarms" : cmos_rtc.mon_alrm ? "alarms up to one year" : @@ -921,6 +1155,9 @@ static void cmos_do_remove(struct device *dev) hpet_unregister_irq_handler(cmos_interrupt); } + if (!dev_get_platdata(dev)) + acpi_rtc_event_cleanup(); + cmos->rtc = NULL; ports = cmos->iomem; @@ -1054,7 +1291,9 @@ static void cmos_check_wkalrm(struct device *dev) * ACK the rtc irq here */ if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { + local_irq_disable(); cmos_interrupt(0, (void *)cmos->rtc); + local_irq_enable(); return; } @@ -1068,9 +1307,6 @@ static void cmos_check_wkalrm(struct device *dev) } } -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control); - static int __maybe_unused cmos_resume(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -1137,174 +1373,16 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); * predate even PNPBIOS should set up platform_bus devices. */ -#ifdef CONFIG_ACPI - -#include - -static u32 rtc_handler(void *context) -{ - struct device *dev = context; - struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char rtc_control = 0; - unsigned char rtc_intr; - unsigned long flags; - - - /* - * Always update rtc irq when ACPI is used as RTC Alarm. - * Or else, ACPI SCI is enabled during suspend/resume only, - * update rtc irq in that case. - */ - if (cmos_use_acpi_alarm()) - cmos_interrupt(0, (void *)cmos->rtc); - else { - /* Fix me: can we use cmos_interrupt() here as well? */ - spin_lock_irqsave(&rtc_lock, flags); - if (cmos_rtc.suspend_ctrl) - rtc_control = CMOS_READ(RTC_CONTROL); - if (rtc_control & RTC_AIE) { - cmos_rtc.suspend_ctrl &= ~RTC_AIE; - CMOS_WRITE(rtc_control, RTC_CONTROL); - rtc_intr = CMOS_READ(RTC_INTR_FLAGS); - rtc_update_irq(cmos->rtc, 1, rtc_intr); - } - spin_unlock_irqrestore(&rtc_lock, flags); - } - - pm_wakeup_hard_event(dev); - acpi_clear_event(ACPI_EVENT_RTC); - acpi_disable_event(ACPI_EVENT_RTC, 0); - return ACPI_INTERRUPT_HANDLED; -} - -static inline void rtc_wake_setup(struct device *dev) -{ - acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); - /* - * After the RTC handler is installed, the Fixed_RTC event should - * be disabled. Only when the RTC alarm is set will it be enabled. - */ - acpi_clear_event(ACPI_EVENT_RTC); - acpi_disable_event(ACPI_EVENT_RTC, 0); -} - -static void rtc_wake_on(struct device *dev) -{ - acpi_clear_event(ACPI_EVENT_RTC); - acpi_enable_event(ACPI_EVENT_RTC, 0); -} - -static void rtc_wake_off(struct device *dev) -{ - acpi_disable_event(ACPI_EVENT_RTC, 0); -} - -#ifdef CONFIG_X86 -/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */ -static void use_acpi_alarm_quirks(void) -{ - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) - return; - - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return; - - if (!is_hpet_enabled()) - return; - - if (dmi_get_bios_year() < 2015) - return; - - use_acpi_alarm = true; -} -#else -static inline void use_acpi_alarm_quirks(void) { } -#endif - -/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find - * its device node and pass extra config data. This helps its driver use - * capabilities that the now-obsolete mc146818 didn't have, and informs it - * that this board's RTC is wakeup-capable (per ACPI spec). - */ -static struct cmos_rtc_board_info acpi_rtc_info; - -static void cmos_wake_setup(struct device *dev) -{ - if (acpi_disabled) - return; - - use_acpi_alarm_quirks(); - - rtc_wake_setup(dev); - acpi_rtc_info.wake_on = rtc_wake_on; - acpi_rtc_info.wake_off = rtc_wake_off; - - /* workaround bug in some ACPI tables */ - if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) { - dev_dbg(dev, "bogus FADT month_alarm (%d)\n", - acpi_gbl_FADT.month_alarm); - acpi_gbl_FADT.month_alarm = 0; - } - - acpi_rtc_info.rtc_day_alarm = acpi_gbl_FADT.day_alarm; - acpi_rtc_info.rtc_mon_alarm = acpi_gbl_FADT.month_alarm; - acpi_rtc_info.rtc_century = acpi_gbl_FADT.century; - - /* NOTE: S4_RTC_WAKE is NOT currently useful to Linux */ - if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE) - dev_info(dev, "RTC can wake from S4\n"); - - dev->platform_data = &acpi_rtc_info; - - /* RTC always wakes from S1/S2/S3, and often S4/STD */ - device_init_wakeup(dev, 1); -} - -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control) -{ - struct cmos_rtc *cmos = dev_get_drvdata(dev); - acpi_event_status rtc_status; - acpi_status status; - - if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) - return; - - status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); - if (ACPI_FAILURE(status)) { - dev_err(dev, "Could not get RTC status\n"); - } else if (rtc_status & ACPI_EVENT_FLAG_SET) { - unsigned char mask; - *rtc_control &= ~RTC_AIE; - CMOS_WRITE(*rtc_control, RTC_CONTROL); - mask = CMOS_READ(RTC_INTR_FLAGS); - rtc_update_irq(cmos->rtc, 1, mask); - } -} - -#else - -static void cmos_wake_setup(struct device *dev) -{ -} - -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control) -{ -} - -#endif - #ifdef CONFIG_PNP #include static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { - cmos_wake_setup(&pnp->dev); + int irq; if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { - unsigned int irq = 0; + irq = 0; #ifdef CONFIG_X86 /* Some machines contain a PNP entry for the RTC, but * don't define the IRQ. It should always be safe to @@ -1313,13 +1391,11 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) if (nr_legacy_irqs()) irq = RTC_IRQ; #endif - return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); } else { - return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), - pnp_irq(pnp, 0)); + irq = pnp_irq(pnp, 0); } + + return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); } static void cmos_pnp_remove(struct pnp_dev *pnp) @@ -1406,7 +1482,6 @@ static int __init cmos_platform_probe(struct platform_device *pdev) int irq; cmos_of_init(pdev); - cmos_wake_setup(&pdev->dev); if (RTC_IOMAPPED) resource = platform_get_resource(pdev, IORESOURCE_IO, 0); diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c index 7025cf3fb9f8d..03267ac657474 100644 --- a/drivers/rtc/rtc-ds1347.c +++ b/drivers/rtc/rtc-ds1347.c @@ -112,7 +112,7 @@ static int ds1347_set_time(struct device *dev, struct rtc_time *dt) return err; century = (dt->tm_year / 100) + 19; - err = regmap_write(map, DS1347_CENTURY_REG, century); + err = regmap_write(map, DS1347_CENTURY_REG, bin2bcd(century)); if (err) return err; diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index b036ff33fbe61..347655d24b5d3 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -9,40 +9,143 @@ #endif /* - * Returns true if a clock update is in progress + * Execute a function while the UIP (Update-in-progress) bit of the RTC is + * unset. + * + * Warning: callback may be executed more then once. */ -static inline unsigned char mc146818_is_updating(void) +bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param), + void *param) { - unsigned char uip; + int i; unsigned long flags; + unsigned char seconds; - spin_lock_irqsave(&rtc_lock, flags); - uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); - spin_unlock_irqrestore(&rtc_lock, flags); - return uip; + for (i = 0; i < 10; i++) { + spin_lock_irqsave(&rtc_lock, flags); + + /* + * Check whether there is an update in progress during which the + * readout is unspecified. The maximum update time is ~2ms. Poll + * every msec for completion. + * + * Store the second value before checking UIP so a long lasting + * NMI which happens to hit after the UIP check cannot make + * an update cycle invisible. + */ + seconds = CMOS_READ(RTC_SECONDS); + + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { + spin_unlock_irqrestore(&rtc_lock, flags); + mdelay(1); + continue; + } + + /* Revalidate the above readout */ + if (seconds != CMOS_READ(RTC_SECONDS)) { + spin_unlock_irqrestore(&rtc_lock, flags); + continue; + } + + if (callback) + callback(seconds, param); + + /* + * Check for the UIP bit again. If it is set now then + * the above values may contain garbage. + */ + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { + spin_unlock_irqrestore(&rtc_lock, flags); + mdelay(1); + continue; + } + + /* + * A NMI might have interrupted the above sequence so check + * whether the seconds value has changed which indicates that + * the NMI took longer than the UIP bit was set. Unlikely, but + * possible and there is also virt... + */ + if (seconds != CMOS_READ(RTC_SECONDS)) { + spin_unlock_irqrestore(&rtc_lock, flags); + continue; + } + spin_unlock_irqrestore(&rtc_lock, flags); + + return true; + } + return false; } +EXPORT_SYMBOL_GPL(mc146818_avoid_UIP); -unsigned int mc146818_get_time(struct rtc_time *time) +/* + * If the UIP (Update-in-progress) bit of the RTC is set for more then + * 10ms, the RTC is apparently broken or not present. + */ +bool mc146818_does_rtc_work(void) +{ + int i; + unsigned char val; + unsigned long flags; + + for (i = 0; i < 10; i++) { + spin_lock_irqsave(&rtc_lock, flags); + val = CMOS_READ(RTC_FREQ_SELECT); + spin_unlock_irqrestore(&rtc_lock, flags); + + if ((val & RTC_UIP) == 0) + return true; + + mdelay(1); + } + + return false; +} +EXPORT_SYMBOL_GPL(mc146818_does_rtc_work); + +int mc146818_get_time(struct rtc_time *time) { unsigned char ctrl; unsigned long flags; + unsigned int iter_count = 0; unsigned char century = 0; + bool retry; #ifdef CONFIG_MACH_DECSTATION unsigned int real_year; #endif +again: + if (iter_count > 10) { + memset(time, 0, sizeof(*time)); + return -EIO; + } + iter_count++; + + spin_lock_irqsave(&rtc_lock, flags); + /* - * read RTC once any update in progress is done. The update - * can take just over 2ms. We wait 20ms. There is no need to - * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. - * If you need to know *exactly* when a second has started, enable - * periodic update complete interrupts, (via ioctl) and then - * immediately read /dev/rtc which will block until you get the IRQ. - * Once the read clears, read the RTC time (again via ioctl). Easy. + * Check whether there is an update in progress during which the + * readout is unspecified. The maximum update time is ~2ms. Poll + * every msec for completion. + * + * Store the second value before checking UIP so a long lasting NMI + * which happens to hit after the UIP check cannot make an update + * cycle invisible. */ - if (mc146818_is_updating()) - mdelay(20); + time->tm_sec = CMOS_READ(RTC_SECONDS); + + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { + spin_unlock_irqrestore(&rtc_lock, flags); + mdelay(1); + goto again; + } + + /* Revalidate the above readout */ + if (time->tm_sec != CMOS_READ(RTC_SECONDS)) { + spin_unlock_irqrestore(&rtc_lock, flags); + goto again; + } /* * Only the values that we read from the RTC are set. We leave @@ -50,8 +153,6 @@ unsigned int mc146818_get_time(struct rtc_time *time) * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated * by the RTC when initially set to a non-zero value. */ - spin_lock_irqsave(&rtc_lock, flags); - time->tm_sec = CMOS_READ(RTC_SECONDS); time->tm_min = CMOS_READ(RTC_MINUTES); time->tm_hour = CMOS_READ(RTC_HOURS); time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); @@ -66,8 +167,24 @@ unsigned int mc146818_get_time(struct rtc_time *time) century = CMOS_READ(acpi_gbl_FADT.century); #endif ctrl = CMOS_READ(RTC_CONTROL); + /* + * Check for the UIP bit again. If it is set now then + * the above values may contain garbage. + */ + retry = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP; + /* + * A NMI might have interrupted the above sequence so check whether + * the seconds value has changed which indicates that the NMI took + * longer than the UIP bit was set. Unlikely, but possible and + * there is also virt... + */ + retry |= time->tm_sec != CMOS_READ(RTC_SECONDS); + spin_unlock_irqrestore(&rtc_lock, flags); + if (retry) + goto again; + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { time->tm_sec = bcd2bin(time->tm_sec); @@ -95,7 +212,7 @@ unsigned int mc146818_get_time(struct rtc_time *time) time->tm_mon--; - return RTC_24H; + return 0; } EXPORT_SYMBOL_GPL(mc146818_get_time); @@ -132,7 +249,6 @@ int mc146818_set_time(struct rtc_time *time) if (yrs > 255) /* They are unsigned */ return -EINVAL; - spin_lock_irqsave(&rtc_lock, flags); #ifdef CONFIG_MACH_DECSTATION real_yrs = yrs; leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) || @@ -161,16 +277,16 @@ int mc146818_set_time(struct rtc_time *time) /* These limits and adjustments are independent of * whether the chip is in binary mode or not. */ - if (yrs > 169) { - spin_unlock_irqrestore(&rtc_lock, flags); + if (yrs > 169) return -EINVAL; - } if (yrs >= 100) yrs -= 100; - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) - || RTC_ALWAYS_BCD) { + spin_lock_irqsave(&rtc_lock, flags); + save_control = CMOS_READ(RTC_CONTROL); + spin_unlock_irqrestore(&rtc_lock, flags); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); @@ -180,6 +296,7 @@ int mc146818_set_time(struct rtc_time *time) century = bin2bcd(century); } + spin_lock_irqsave(&rtc_lock, flags); save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c index d349cef09cb7c..48595b00ebb39 100644 --- a/drivers/rtc/rtc-mxc_v2.c +++ b/drivers/rtc/rtc-mxc_v2.c @@ -337,8 +337,10 @@ static int mxc_rtc_probe(struct platform_device *pdev) } pdata->rtc = devm_rtc_allocate_device(&pdev->dev); - if (IS_ERR(pdata->rtc)) + if (IS_ERR(pdata->rtc)) { + clk_disable_unprepare(pdata->clk); return PTR_ERR(pdata->rtc); + } pdata->rtc->ops = &mxc_rtc_ops; pdata->rtc->range_max = U32_MAX; diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 62684ca3a665e..449204d84c61d 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -167,10 +167,10 @@ static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) if (ret) return ret; - alrm->time.tm_sec = bcd2bin(buf[0]); - alrm->time.tm_min = bcd2bin(buf[1]); - alrm->time.tm_hour = bcd2bin(buf[2]); - alrm->time.tm_mday = bcd2bin(buf[3]); + alrm->time.tm_sec = bcd2bin(buf[0] & 0x7f); + alrm->time.tm_min = bcd2bin(buf[1] & 0x7f); + alrm->time.tm_hour = bcd2bin(buf[2] & 0x3f); + alrm->time.tm_mday = bcd2bin(buf[3] & 0x3f); ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val); if (ret) @@ -430,7 +430,7 @@ static int pcf85063_clkout_control(struct clk_hw *hw, bool enable) unsigned int buf; int ret; - ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf); + ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf); if (ret < 0) return ret; buf &= PCF85063_REG_CLKO_F_MASK; diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c index 2b69467446548..7be1ca1633fcf 100644 --- a/drivers/rtc/rtc-pic32.c +++ b/drivers/rtc/rtc-pic32.c @@ -324,16 +324,16 @@ static int pic32_rtc_probe(struct platform_device *pdev) spin_lock_init(&pdata->alarm_lock); + pdata->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(pdata->rtc)) + return PTR_ERR(pdata->rtc); + clk_prepare_enable(pdata->clk); pic32_rtc_enable(pdata, 1); device_init_wakeup(&pdev->dev, 1); - pdata->rtc = devm_rtc_allocate_device(&pdev->dev); - if (IS_ERR(pdata->rtc)) - return PTR_ERR(pdata->rtc); - pdata->rtc->ops = &pic32_rtcops; pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; pdata->rtc->range_max = RTC_TIMESTAMP_END_2099; diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index 0263d996b8a86..cc7f6c4216bc8 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -32,6 +32,14 @@ #define SNVS_LPPGDR_INIT 0x41736166 #define CNTR_TO_SECS_SH 15 +/* The maximum RTC clock cycles that are allowed to pass between two + * consecutive clock counter register reads. If the values are corrupted a + * bigger difference is expected. The RTC frequency is 32kHz. With 320 cycles + * we end at 10ms which should be enough for most cases. If it once takes + * longer than expected we do a retry. + */ +#define MAX_RTC_READ_DIFF_CYCLES 320 + struct snvs_rtc_data { struct rtc_device *rtc; struct regmap *regmap; @@ -56,6 +64,7 @@ static u64 rtc_read_lpsrt(struct snvs_rtc_data *data) static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) { u64 read1, read2; + s64 diff; unsigned int timeout = 100; /* As expected, the registers might update between the read of the LSB @@ -66,7 +75,8 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) do { read2 = read1; read1 = rtc_read_lpsrt(data); - } while (read1 != read2 && --timeout); + diff = read1 - read2; + } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout); if (!timeout) dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n"); @@ -78,13 +88,15 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb) { u32 count1, count2; + s32 diff; unsigned int timeout = 100; regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); do { count2 = count1; regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); - } while (count1 != count2 && --timeout); + diff = count1 - count2; + } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout); if (!timeout) { dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n"); return -ETIMEDOUT; diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c index 0c65448b85eef..7d53f7e2febcc 100644 --- a/drivers/rtc/rtc-st-lpc.c +++ b/drivers/rtc/rtc-st-lpc.c @@ -238,6 +238,7 @@ static int st_rtc_probe(struct platform_device *pdev) rtc->clkrate = clk_get_rate(rtc->clk); if (!rtc->clkrate) { + clk_disable_unprepare(rtc->clk); dev_err(&pdev->dev, "Unable to fetch clock rate\n"); return -EINVAL; } diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index dc78a523a69f2..b6b938aa66158 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device) struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) { struct dasd_eckd_private *alias_priv, *private = base_device->private; - struct alias_pav_group *group = private->pavgroup; struct alias_lcu *lcu = private->lcu; struct dasd_device *alias_device; + struct alias_pav_group *group; unsigned long flags; - if (!group || !lcu) + if (!lcu) return NULL; if (lcu->pav == NO_PAV || lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) @@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) } spin_lock_irqsave(&lcu->lock, flags); + group = private->pavgroup; + if (!group) { + spin_unlock_irqrestore(&lcu->lock, flags); + return NULL; + } alias_device = group->next; if (!alias_device) { if (list_empty(&group->aliaslist)) { diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 7749deb614d75..53d22975a32fd 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4627,7 +4627,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, struct dasd_device *basedev; struct req_iterator iter; struct dasd_ccw_req *cqr; - unsigned int first_offs; unsigned int trkcount; unsigned long *idaws; unsigned int size; @@ -4661,7 +4660,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / DASD_RAW_SECTORS_PER_TRACK; trkcount = last_trk - first_trk + 1; - first_offs = 0; if (rq_data_dir(req) == READ) cmd = DASD_ECKD_CCW_READ_TRACK; @@ -4705,13 +4703,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, if (use_prefix) { prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev, - startdev, 1, first_offs + 1, trkcount, 0, 0); + startdev, 1, 0, trkcount, 0, 0); } else { define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0); ccw[-1].flags |= CCW_FLAG_CC; data += sizeof(struct DE_eckd_data); - locate_record_ext(ccw++, data, first_trk, first_offs + 1, + locate_record_ext(ccw++, data, first_trk, 0, trkcount, cmd, basedev, 0, 0); } diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index d06809eac16d3..fb0e8f1cabdcb 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -865,16 +865,9 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) /** * Start transmission of a packet. * Called from generic network device layer. - * - * skb Pointer to buffer containing the packet. - * dev Pointer to interface struct. - * - * returns 0 if packet consumed, !0 if packet rejected. - * Note: If we return !0, then the packet is free'd by - * the generic network layer. */ /* first merge version - leaving both functions separated */ -static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev) { struct ctcm_priv *priv = dev->ml_priv; @@ -917,7 +910,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) } /* unmerged MPC variant of ctcm_tx */ -static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) { int len = 0; struct ctcm_priv *priv = dev->ml_priv; diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 06a322bdced6d..7e743f4717a91 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1518,9 +1518,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) /** * Packet transmit function called by network stack */ -static int -__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, - struct net_device *dev) +static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, + struct net_device *dev) { struct lcs_header *header; int rc = NETDEV_TX_OK; @@ -1581,8 +1580,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, return rc; } -static int -lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lcs_card *card; int rc; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 260860cf3aa18..a2f403c4ec382 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1260,15 +1260,8 @@ static int netiucv_close(struct net_device *dev) /** * Start transmission of a packet. * Called from generic network device layer. - * - * @param skb Pointer to buffer containing the packet. - * @param dev Pointer to interface struct. - * - * @return 0 if packet consumed, !0 if packet rejected. - * Note: If we return !0, then the packet is free'd by - * the generic network layer. */ -static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev) { struct netiucv_priv *privptr = netdev_priv(dev); int rc; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 8401c42db5419..524947bf21b97 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -866,7 +866,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); struct zfcp_adapter *adapter = req->adapter; struct zfcp_qdio *qdio = adapter->qdio; - int req_id = req->req_id; + unsigned long req_id = req->req_id; zfcp_reqlist_add(adapter->req_list, req); diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 3337b1e80412b..f6f92033132a8 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2014,7 +2014,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) retval = pci_enable_device(pdev); if (retval) { TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); - goto out_disable_device; + return -ENODEV; } pci_set_master(pdev); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0f9274960dc6b..30afcbbe1f862 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2504,6 +2504,7 @@ static int __init fcoe_init(void) out_free: mutex_unlock(&fcoe_config_mutex); + fcoe_transport_detach(&fcoe_sw_transport); out_destroy: destroy_workqueue(fcoe_wq); return rc; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index ffef2c8eddc64..68d8027d5108e 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); error = device_register(&ctlr->dev); - if (error) - goto out_del_q2; + if (error) { + destroy_workqueue(ctlr->devloss_work_q); + destroy_workqueue(ctlr->work_q); + put_device(&ctlr->dev); + return NULL; + } return ctlr; -out_del_q2: - destroy_workqueue(ctlr->devloss_work_q); - ctlr->devloss_work_q = NULL; out_del_q: destroy_workqueue(ctlr->work_q); ctlr->work_q = NULL; @@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, fcf->selected = new_fcf->selected; error = device_register(&fcf->dev); - if (error) - goto out_del; + if (error) { + put_device(&fcf->dev); + goto out; + } fcf->state = FCOE_FCF_STATE_CONNECTED; list_add_tail(&fcf->peers, &ctlr->fcfs); return fcf; -out_del: - kfree(fcf); out: return NULL; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 1feca45384c7a..e5b9229310a0f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1408,7 +1408,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) device->linkrate = phy->sas_phy.linkrate; hisi_hba->hw->setup_itct(hisi_hba, sas_dev); - } else + } else if (!port->port_attached) port->id = 0xff; } } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index dfe7e6370d84f..cd41dc061d874 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2738,7 +2738,6 @@ static int slave_configure_v3_hw(struct scsi_device *sdev) struct hisi_hba *hisi_hba = shost_priv(shost); struct device *dev = hisi_hba->dev; int ret = sas_slave_configure(sdev); - unsigned int max_sectors; if (ret) return ret; @@ -2756,12 +2755,6 @@ static int slave_configure_v3_hw(struct scsi_device *sdev) } } - /* Set according to IOMMU IOVA caching limit */ - max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue), - (PAGE_SIZE * 32) >> SECTOR_SHIFT); - - blk_queue_max_hw_sectors(sdev->request_queue, max_sectors); - return 0; } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8df70c92911dd..a44a098dbb9c7 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5834,7 +5834,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; - sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); if (sh == NULL) { dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); return -ENOMEM; @@ -8904,7 +8904,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) destroy_workqueue(h->monitor_ctlr_wq); h->monitor_ctlr_wq = NULL; } - kfree(h); + hpda_free_ctlr_info(h); return rc; } @@ -9764,7 +9764,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h) return 0; free_sas_phy: - hpsa_free_sas_phy(hpsa_sas_phy); + sas_phy_free(hpsa_sas_phy->phy); + kfree(hpsa_sas_phy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); free_sas_node: @@ -9800,10 +9801,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); if (rc) - goto free_sas_port; + goto free_sas_rphy; return 0; +free_sas_rphy: + sas_rphy_free(rphy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); device->sas_port = NULL; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index f6d6539c657f0..b793e342ab7c6 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -635,8 +635,13 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost) memset(vhost->async_crq.msgs, 0, PAGE_SIZE); vhost->async_crq.cur = 0; - list_for_each_entry(tgt, &vhost->targets, queue) - ibmvfc_del_tgt(tgt); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (vhost->client_migrated) + tgt->need_login = 1; + else + ibmvfc_del_tgt(tgt); + } + scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -2822,9 +2827,12 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) /* We need to re-setup the interpartition connection */ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); vhost->client_migrated = 1; + + scsi_block_requests(vhost->host); ibmvfc_purge_requests(vhost, DID_REQUEUE); - ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); + wake_up(&vhost->work_wait_q); } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); ibmvfc_purge_requests(vhost, DID_ERROR); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 90e8a538b078b..a5e6fbd86ad45 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -10870,11 +10870,19 @@ static struct notifier_block ipr_notifier = { **/ static int __init ipr_init(void) { + int rc; + ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", IPR_DRIVER_VERSION, IPR_DRIVER_DATE); register_reboot_notifier(&ipr_notifier); - return pci_register_driver(&ipr_driver); + rc = pci_register_driver(&ipr_driver); + if (rc) { + unregister_reboot_notifier(&ipr_notifier); + return rc; + } + + return 0; } /** diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index df47557a02a3c..252d7881f99c2 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -558,6 +558,8 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; + mutex_init(&tcp_sw_conn->sock_lock); + tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto free_conn; @@ -592,11 +594,15 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) { - struct iscsi_session *session = conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct socket *sock = tcp_sw_conn->sock; + /* + * The iscsi transport class will make sure we are not called in + * parallel with start, stop, bind and destroys. However, this can be + * called twice if userspace does a stop then a destroy. + */ if (!sock) return; @@ -604,9 +610,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) iscsi_sw_tcp_conn_restore_callbacks(conn); sock_put(sock->sk); - spin_lock_bh(&session->frwd_lock); + mutex_lock(&tcp_sw_conn->sock_lock); tcp_sw_conn->sock = NULL; - spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&tcp_sw_conn->sock_lock); sockfd_put(sock); } @@ -658,7 +664,6 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading) { - struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; @@ -678,10 +683,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, if (err) goto free_socket; - spin_lock_bh(&session->frwd_lock); + mutex_lock(&tcp_sw_conn->sock_lock); /* bind iSCSI connection and socket */ tcp_sw_conn->sock = sock; - spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&tcp_sw_conn->sock_lock); /* setup Socket parameters */ sk = sock->sk; @@ -717,8 +722,15 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn, break; case ISCSI_PARAM_DATADGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); + + mutex_lock(&tcp_sw_conn->sock_lock); + if (!tcp_sw_conn->sock) { + mutex_unlock(&tcp_sw_conn->sock_lock); + return -ENOTCONN; + } tcp_sw_conn->sendpage = conn->datadgst_en ? sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage; + mutex_unlock(&tcp_sw_conn->sock_lock); break; case ISCSI_PARAM_MAX_R2T: return iscsi_tcp_set_max_r2t(conn, buf); @@ -733,8 +745,8 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct iscsi_tcp_conn *tcp_conn; struct sockaddr_in6 addr; struct socket *sock; int rc; @@ -744,21 +756,36 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_LOCAL_PORT: spin_lock_bh(&conn->session->frwd_lock); - if (!tcp_sw_conn || !tcp_sw_conn->sock) { + if (!conn->session->leadconn) { spin_unlock_bh(&conn->session->frwd_lock); return -ENOTCONN; } - sock = tcp_sw_conn->sock; - sock_hold(sock->sk); + /* + * The conn has been setup and bound, so just grab a ref + * incase a destroy runs while we are in the net layer. + */ + iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&conn->session->frwd_lock); + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + + mutex_lock(&tcp_sw_conn->sock_lock); + sock = tcp_sw_conn->sock; + if (!sock) { + rc = -ENOTCONN; + goto sock_unlock; + } + if (param == ISCSI_PARAM_LOCAL_PORT) rc = kernel_getsockname(sock, (struct sockaddr *)&addr); else rc = kernel_getpeername(sock, (struct sockaddr *)&addr); - sock_put(sock->sk); +sock_unlock: + mutex_unlock(&tcp_sw_conn->sock_lock); + iscsi_put_conn(conn->cls_conn); if (rc < 0) return rc; @@ -775,7 +802,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost); - struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; @@ -785,6 +812,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, switch (param) { case ISCSI_HOST_PARAM_IPADDRESS: + session = tcp_sw_host->session; if (!session) return -ENOTCONN; @@ -796,17 +824,21 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, } tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; - sock = tcp_sw_conn->sock; - if (!sock) { - spin_unlock_bh(&session->frwd_lock); - return -ENOTCONN; - } - sock_hold(sock->sk); + /* + * The conn has been setup and bound, so just grab a ref + * incase a destroy runs while we are in the net layer. + */ + iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&session->frwd_lock); - rc = kernel_getsockname(sock, - (struct sockaddr *)&addr); - sock_put(sock->sk); + mutex_lock(&tcp_sw_conn->sock_lock); + sock = tcp_sw_conn->sock; + if (!sock) + rc = -ENOTCONN; + else + rc = kernel_getsockname(sock, (struct sockaddr *)&addr); + mutex_unlock(&tcp_sw_conn->sock_lock); + iscsi_put_conn(conn->cls_conn); if (rc < 0) return rc; @@ -875,12 +907,14 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, if (!cls_session) goto remove_host; session = cls_session->dd_data; - tcp_sw_host = iscsi_host_priv(shost); - tcp_sw_host->session = session; shost->can_queue = session->scsi_cmds_max; if (iscsi_tcp_r2tpool_alloc(session)) goto remove_session; + + /* We are now fully setup so expose the session to sysfs. */ + tcp_sw_host = iscsi_host_priv(shost); + tcp_sw_host->session = session; return cls_session; remove_session: diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 791453195099c..1731956326e2b 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -28,6 +28,8 @@ struct iscsi_sw_tcp_send { struct iscsi_sw_tcp_conn { struct socket *sock; + /* Taken when accessing the sock from the netlink/sysfs interface */ + struct mutex sock_lock; struct iscsi_sw_tcp_send out; /* old values for socket callbacks */ diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 8d6bcc19359ff..51485d0251f2d 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -85,7 +85,7 @@ static int smp_execute_task_sg(struct domain_device *dev, res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { - del_timer(&task->slow_task->timer); + del_timer_sync(&task->slow_task->timer); pr_notice("executing SMP task failed:%d\n", res); break; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 134e4ee5dc481..17200b453cbbb 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6670,7 +6670,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Allocate device driver memory */ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); if (rc) - return -ENOMEM; + goto out_destroy_workqueue; /* IF Type 2 ports get initialized now. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= @@ -7076,6 +7076,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) lpfc_destroy_bootstrap_mbox(phba); out_free_mem: lpfc_mem_free(phba); +out_destroy_workqueue: + destroy_workqueue(phba->wq); + phba->wq = NULL; return rc; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 13022a42fd6f4..7838c7911adde 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -5198,7 +5198,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance) if (!fusion->log_to_span) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); - kfree(instance->ctrl_context); return -ENOMEM; } } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 3153f164554aa..c1b76cda60dbc 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2822,23 +2822,22 @@ static int _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { struct sysinfo s; - int dma_mask; if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma || - dma_get_required_mask(&pdev->dev) <= 32) - dma_mask = 32; + dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) + ioc->dma_mask = 32; /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) - dma_mask = 63; + ioc->dma_mask = 63; else - dma_mask = 64; + ioc->dma_mask = 64; - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask))) + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask))) return -ENODEV; - if (dma_mask > 32) { + if (ioc->dma_mask > 32) { ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); } else { @@ -2848,7 +2847,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) si_meminfo(&s); ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", - dma_mask, convert_to_kb(s.totalram)); + ioc->dma_mask, convert_to_kb(s.totalram)); return 0; } @@ -4902,10 +4901,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) dma_pool_free(ioc->pcie_sgl_dma_pool, ioc->pcie_sg_lookup[i].pcie_sgl, ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->pcie_sg_lookup[i].pcie_sgl = NULL; } dma_pool_destroy(ioc->pcie_sgl_dma_pool); } - if (ioc->config_page) { dexitprintk(ioc, ioc_info(ioc, "config_page(0x%p): free\n", @@ -4960,6 +4959,89 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz) return 0; } +/** + * _base_reduce_hba_queue_depth- Retry with reduced queue depth + * @ioc: Adapter object + * + * Return: 0 for success, non-zero for failure. + **/ +static inline int +_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +/** + * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory + * for pcie sgl pools. + * @ioc: Adapter object + * @sz: DMA Pool size + * @ct: Chain tracker + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ct; + + ioc->pcie_sgl_dma_pool = + dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, + ioc->page_size, 0); + if (!ioc->pcie_sgl_dma_pool) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); + return -ENOMEM; + } + + ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; + ioc->chains_per_prp_buffer = + min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->pcie_sg_lookup[i].pcie_sgl = + dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, + &ioc->pcie_sg_lookup[i].pcie_sgl_dma); + if (!ioc->pcie_sg_lookup[i].pcie_sgl) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + + if (!mpt3sas_check_same_4gb_region( + (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) { + ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", + ioc->pcie_sg_lookup[i].pcie_sgl, + (unsigned long long) + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + + for (j = 0; j < ioc->chains_per_prp_buffer; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + ct->chain_buffer = + ioc->pcie_sg_lookup[i].pcie_sgl + + (j * ioc->chain_segment_sz); + ct->chain_buffer_dma = + ioc->pcie_sg_lookup[i].pcie_sgl_dma + + (j * ioc->chain_segment_sz); + } + } + dinitprintk(ioc, ioc_info(ioc, + "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); + dinitprintk(ioc, ioc_info(ioc, + "Number of chains can fit in a PRP page(%d)\n", + ioc->chains_per_prp_buffer)); + return 0; +} + /** * base_alloc_rdpq_dma_pool - Allocating DMA'able memory * for reply queues. @@ -5058,7 +5140,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) unsigned short sg_tablesize; u16 sge_size; int i, j; - int ret = 0; + int ret = 0, rc = 0; struct chain_tracker *ct; dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -5357,6 +5439,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) * be required for NVMe PRP's, only each set of NVMe blocks will be * contiguous, so a new set is allocated for each possible I/O. */ + ioc->chains_per_prp_buffer = 0; if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { nvme_blocks_needed = @@ -5371,43 +5454,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) goto out; } sz = nvme_blocks_needed * ioc->page_size; - ioc->pcie_sgl_dma_pool = - dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); - if (!ioc->pcie_sgl_dma_pool) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n"); - goto out; - } - - ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; - ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer, - ioc->chains_needed_per_io); - - for (i = 0; i < ioc->scsiio_depth; i++) { - ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( - ioc->pcie_sgl_dma_pool, GFP_KERNEL, - &ioc->pcie_sg_lookup[i].pcie_sgl_dma); - if (!ioc->pcie_sg_lookup[i].pcie_sgl) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); - goto out; - } - for (j = 0; j < ioc->chains_per_prp_buffer; j++) { - ct = &ioc->chain_lookup[i].chains_per_smid[j]; - ct->chain_buffer = - ioc->pcie_sg_lookup[i].pcie_sgl + - (j * ioc->chain_segment_sz); - ct->chain_buffer_dma = - ioc->pcie_sg_lookup[i].pcie_sgl_dma + - (j * ioc->chain_segment_sz); - } - } - - dinitprintk(ioc, - ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->scsiio_depth, sz, - (sz * ioc->scsiio_depth) / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n", - ioc->chains_per_prp_buffer)); + rc = _base_allocate_pcie_sgl_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; total_sz += sz * ioc->scsiio_depth; } @@ -5577,6 +5628,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->shost->sg_tablesize); return 0; +try_32bit_dma: + _base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + /* Change dma coherent mask to 32 bit and reallocate */ + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; + out: return -ENOMEM; } @@ -7239,6 +7303,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->rdpq_array_enable_assigned = 0; ioc->use_32bit_dma = false; + ioc->dma_mask = 64; if (ioc->is_aero_ioc) ioc->base_readl = &_base_readl_aero; else diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index bc8beb10f3fc3..823bbe64a477f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1257,6 +1257,7 @@ struct MPT3SAS_ADAPTER { u16 thresh_hold; u8 high_iops_queues; u32 drv_support_bitmap; + u32 dma_mask; bool enable_sdev_max_qd; bool use_32bit_dma; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8418b59b3743b..c3a5978b0efac 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3501,6 +3501,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) fw_event = list_first_entry(&ioc->fw_event_list, struct fw_event_work, list); list_del_init(&fw_event->list); + fw_event_work_put(fw_event); } spin_unlock_irqrestore(&ioc->fw_event_lock, flags); @@ -3559,7 +3560,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) if (cancel_work_sync(&fw_event->work)) fw_event_work_put(fw_event); - fw_event_work_put(fw_event); } ioc->fw_events_cleanup = 0; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 6ec5b7f33dfd7..b58f4d9c296a3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -712,6 +712,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, if ((sas_rphy_add(rphy))) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); + sas_rphy_free(rphy); + rphy = NULL; } if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index e64457f53da86..f48ef47546f4d 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1917,6 +1917,27 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled) fc_vport_setlink(vn_port); } + /* Set symbolic node name */ + if (base_qedf->pdev->device == QL45xxx) + snprintf(fc_host_symbolic_name(vn_port->host), 256, + "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); + + if (base_qedf->pdev->device == QL41xxx) + snprintf(fc_host_symbolic_name(vn_port->host), 256, + "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); + + /* Set supported speed */ + fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds; + + /* Set speed */ + vn_port->link_speed = n_port->link_speed; + + /* Set port type */ + fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV; + + /* Set maxframe size */ + fc_host_maxframe_size(vn_port->host) = n_port->mfs; + QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", vn_port); @@ -3671,11 +3692,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) err1: scsi_host_put(lport->host); err0: - if (qedf) { - QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); - - clear_bit(QEDF_PROBING, &qedf->flags); - } return rc; } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index d0407f44de78d..61b9dc511d904 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -3262,11 +3262,34 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .bsg_timeout = qla24xx_bsg_timeout, }; +static uint +qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds) +{ + uint supported_speeds = FC_PORTSPEED_UNKNOWN; + + if (speeds & FDMI_PORT_SPEED_64GB) + supported_speeds |= FC_PORTSPEED_64GBIT; + if (speeds & FDMI_PORT_SPEED_32GB) + supported_speeds |= FC_PORTSPEED_32GBIT; + if (speeds & FDMI_PORT_SPEED_16GB) + supported_speeds |= FC_PORTSPEED_16GBIT; + if (speeds & FDMI_PORT_SPEED_8GB) + supported_speeds |= FC_PORTSPEED_8GBIT; + if (speeds & FDMI_PORT_SPEED_4GB) + supported_speeds |= FC_PORTSPEED_4GBIT; + if (speeds & FDMI_PORT_SPEED_2GB) + supported_speeds |= FC_PORTSPEED_2GBIT; + if (speeds & FDMI_PORT_SPEED_1GB) + supported_speeds |= FC_PORTSPEED_1GBIT; + + return supported_speeds; +} + void qla2x00_init_host_attr(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - u32 speeds = FC_PORTSPEED_UNKNOWN; + u32 speeds = 0, fdmi_speed = 0; fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); @@ -3276,7 +3299,8 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; - speeds = qla25xx_fdmi_port_speed_capability(ha); + fdmi_speed = qla25xx_fdmi_port_speed_capability(ha); + speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed); fc_host_supported_speeds(vha->host) = speeds; } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ba823e8eb902b..ecb30c2738b8b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -6817,14 +6817,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha) if (ha->flags.msix_enabled) { if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { - if (IS_QLA2071(ha)) { - /* 4 ports Baker: Enable Interrupt Handshake */ - icb->msix_atio = 0; - icb->firmware_options_2 |= cpu_to_le32(BIT_26); - } else { - icb->msix_atio = cpu_to_le16(msix->entry); - icb->firmware_options_2 &= cpu_to_le32(~BIT_26); - } + icb->msix_atio = cpu_to_le16(msix->entry); + icb->firmware_options_2 &= cpu_to_le32(~BIT_26); ql_dbg(ql_dbg_init, vha, 0xf072, "Registering ICB vector 0x%x for atio que.\n", msix->entry); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 5eb959b5f7010..7cfc6db817634 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1878,6 +1878,13 @@ static int resp_readcap16(struct scsi_cmnd *scp, arr[14] |= 0x40; } + /* + * Since the scsi_debug READ CAPACITY implementation always reports the + * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices. + */ + if (devip->zmodel == BLK_ZONED_HM) + arr[12] |= 1 << 4; + arr[15] = sdebug_lowest_aligned & 0xff; if (have_dif_prot) { @@ -3612,7 +3619,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return illegal_condition_result; } - lrdp = kzalloc(lbdof_blen, GFP_ATOMIC); + lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN); if (lrdp == NULL) return SCSI_MLQUEUE_HOST_BUSY; if (sdebug_verbose) @@ -4268,7 +4275,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (ret) return ret; - arr = kcalloc(lb_size, vnum, GFP_ATOMIC); + arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -4339,7 +4346,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD), max_zones); - arr = kzalloc(alloc_len, GFP_ATOMIC); + arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -7079,8 +7086,12 @@ static int sdebug_add_host_helper(int per_host_idx) dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); error = device_register(&sdbg_host->dev); - if (error) + if (error) { + spin_lock(&sdebug_host_list_lock); + list_del(&sdbg_host->host_list); + spin_unlock(&sdebug_host_list_lock); goto clean; + } ++sdebug_num_hosts; return 0; @@ -7092,7 +7103,10 @@ static int sdebug_add_host_helper(int per_host_idx) kfree(sdbg_devinfo->zstate); kfree(sdbg_devinfo); } - kfree(sdbg_host); + if (sdbg_host->dev.release) + put_device(&sdbg_host->dev); + else + kfree(sdbg_host); pr_warn("%s: failed, errno=%d\n", __func__, -error); return error; } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index f11f51e2465f5..0c4bc42b55c20 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -306,19 +306,11 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) if (rtn == BLK_EH_DONE) { /* - * Set the command to complete first in order to prevent a real - * completion from releasing the command while error handling - * is using it. If the command was already completed, then the - * lower level driver beat the timeout handler, and it is safe - * to return without escalating error recovery. - * - * If timeout handling lost the race to a real completion, the - * block layer may ignore that due to a fake timeout injection, - * so return RESET_TIMER to allow error handling another shot - * at this command. + * If scsi_done() has already set SCMD_STATE_COMPLETE, do not + * modify *scmd. */ if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state)) - return BLK_EH_RESET_TIMER; + return BLK_EH_DONE; if (scsi_abort_command(scmd) != SUCCESS) { set_host_byte(scmd, DID_TIME_OUT); scsi_eh_scmd_add(scmd); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 8e474b1452495..6f7c4d41c51de 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1129,8 +1129,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, * that no LUN is present, so don't add sdev in these cases. * Two specific examples are: * 1) NetApp targets: return PQ=1, PDT=0x1f - * 2) IBM/2145 targets: return PQ=1, PDT=0 - * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" + * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" * in the UFI 1.0 spec (we cannot rely on reserved bits). * * References: @@ -1144,8 +1143,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, * PDT=00h Direct-access device (floppy) * PDT=1Fh none (no FDD connected to the requested logical unit) */ - if (((result[0] >> 5) == 1 || - (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) && + if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && + (result[0] & 0x1f) == 0x1f && !scsi_is_wlun(lun)) { SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, "scsi scan: peripheral device type" diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 42db9c52208e6..6cc4d0792e3d0 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -815,6 +815,14 @@ store_state_field(struct device *dev, struct device_attribute *attr, } mutex_lock(&sdev->state_mutex); + switch (sdev->sdev_state) { + case SDEV_RUNNING: + case SDEV_OFFLINE: + break; + default: + mutex_unlock(&sdev->state_mutex); + return -EINVAL; + } if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { ret = 0; } else { diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ef7cd7520e7c7..092bd6a3d64a1 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1674,6 +1674,13 @@ static const char *iscsi_session_state_name(int state) return name; } +static char *iscsi_session_target_state_name[] = { + [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND", + [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED", + [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED", + [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING", +}; + int iscsi_session_chkready(struct iscsi_cls_session *session) { unsigned long flags; @@ -1805,9 +1812,13 @@ static int iscsi_user_scan_session(struct device *dev, void *data) if ((scan_data->channel == SCAN_WILD_CARD || scan_data->channel == 0) && (scan_data->id == SCAN_WILD_CARD || - scan_data->id == id)) + scan_data->id == id)) { scsi_scan_target(&session->dev, 0, id, scan_data->lun, scan_data->rescan); + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_SCANNED; + spin_unlock_irqrestore(&session->lock, flags); + } } user_scan_exit: @@ -1996,31 +2007,41 @@ static void __iscsi_unbind_session(struct work_struct *work) struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; unsigned int target_id; + bool remove_target = true; ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); /* Prevent new scans and make sure scanning is not in progress */ mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); - if (session->target_id == ISCSI_MAX_TARGET) { + if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) { + remove_target = false; + } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); - goto unbind_session_exit; + ISCSI_DBG_TRANS_SESSION(session, + "Skipping target unbinding: Session is unbound/unbinding.\n"); + return; } + session->target_state = ISCSI_SESSION_TARGET_UNBINDING; target_id = session->target_id; session->target_id = ISCSI_MAX_TARGET; spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); - scsi_remove_target(&session->dev); + if (remove_target) + scsi_remove_target(&session->dev); if (session->ida_used) ida_simple_remove(&iscsi_sess_ida, target_id); -unbind_session_exit: iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); + + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_UNBOUND; + spin_unlock_irqrestore(&session->lock, flags); } static void __iscsi_destroy_session(struct work_struct *work) @@ -2089,6 +2110,9 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) session->ida_used = true; } else session->target_id = target_id; + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_ALLOCATED; + spin_unlock_irqrestore(&session->lock, flags); dev_set_name(&session->dev, "session%u", session->sid); err = device_add(&session->dev); @@ -4343,6 +4367,19 @@ iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0); iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0); iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0); +static ssize_t +show_priv_session_target_state(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); + + return sysfs_emit(buf, "%s\n", + iscsi_session_target_state_name[session->target_state]); +} + +static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO, + show_priv_session_target_state, NULL); + static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) @@ -4445,6 +4482,7 @@ static struct attribute *iscsi_session_attrs[] = { &dev_attr_sess_boot_target.attr, &dev_attr_priv_sess_recovery_tmo.attr, &dev_attr_priv_sess_state.attr, + &dev_attr_priv_sess_target_state.attr, &dev_attr_priv_sess_creator.attr, &dev_attr_sess_chap_out_idx.attr, &dev_attr_sess_chap_in_idx.attr, @@ -4558,6 +4596,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj, return S_IRUGO | S_IWUSR; else if (attr == &dev_attr_priv_sess_state.attr) return S_IRUGO; + else if (attr == &dev_attr_priv_sess_target_state.attr) + return S_IRUGO; else if (attr == &dev_attr_priv_sess_creator.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_target_id.attr) diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 4a96fb05731d2..c6256fdc24b10 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -716,12 +716,17 @@ int sas_phy_add(struct sas_phy *phy) int error; error = device_add(&phy->dev); - if (!error) { - transport_add_device(&phy->dev); - transport_configure_device(&phy->dev); + if (error) + return error; + + error = transport_add_device(&phy->dev); + if (error) { + device_del(&phy->dev); + return error; } + transport_configure_device(&phy->dev); - return error; + return 0; } EXPORT_SYMBOL(sas_phy_add); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bd068d3bb455d..58f66176bcb28 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1074,6 +1074,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) struct bio *bio = rq->bio; u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); + unsigned int nr_bytes = blk_rq_bytes(rq); blk_status_t ret; if (sdkp->device->no_write_same) @@ -1110,7 +1111,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) */ rq->__data_len = sdp->sector_size; ret = scsi_alloc_sgtables(cmd); - rq->__data_len = blk_rq_bytes(rq); + rq->__data_len = nr_bytes; return ret; } diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index e9ccfb97773f1..7cf871323b2c4 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -318,6 +318,9 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) ret); put_device(&snic->shost->shost_gendev); + spin_lock_irqsave(snic->shost->host_lock, flags); + list_del(&tgt->list); + spin_unlock_irqrestore(snic->shost->host_lock, flags); kfree(tgt); tgt = NULL; diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index d4f10c0d813cf..a3bce11ed4b4b 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -668,16 +668,17 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) return 0; case PASSTHRU_CMD: if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { - struct st_drvver ver; + const struct st_drvver ver = { + .major = ST_VER_MAJOR, + .minor = ST_VER_MINOR, + .oem = ST_OEM, + .build = ST_BUILD_VER, + .signature[0] = PASSTHRU_SIGNATURE, + .console_id = host->max_id - 1, + .host_no = hba->host->host_no, + }; size_t cp_len = sizeof(ver); - ver.major = ST_VER_MAJOR; - ver.minor = ST_VER_MINOR; - ver.oem = ST_OEM; - ver.build = ST_BUILD_VER; - ver.signature[0] = PASSTHRU_SIGNATURE; - ver.console_id = host->max_id - 1; - ver.host_no = hba->host->host_no; cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len); cmd->result = sizeof(ver) == cp_len ? DID_OK << 16 | COMMAND_COMPLETE << 8 : diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 7ac1090d4379c..3fa8a0c94bdc1 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -356,16 +356,21 @@ enum storvsc_request_type { }; /* - * SRB status codes and masks; a subset of the codes used here. + * SRB status codes and masks. In the 8-bit field, the two high order bits + * are flags, while the remaining 6 bits are an integer status code. The + * definitions here include only the subset of the integer status codes that + * are tested for in this driver. */ - #define SRB_STATUS_AUTOSENSE_VALID 0x80 #define SRB_STATUS_QUEUE_FROZEN 0x40 -#define SRB_STATUS_INVALID_LUN 0x20 -#define SRB_STATUS_SUCCESS 0x01 -#define SRB_STATUS_ABORTED 0x02 -#define SRB_STATUS_ERROR 0x04 -#define SRB_STATUS_DATA_OVERRUN 0x12 + +/* SRB status integer codes */ +#define SRB_STATUS_SUCCESS 0x01 +#define SRB_STATUS_ABORTED 0x02 +#define SRB_STATUS_ERROR 0x04 +#define SRB_STATUS_INVALID_REQUEST 0x06 +#define SRB_STATUS_DATA_OVERRUN 0x12 +#define SRB_STATUS_INVALID_LUN 0x20 #define SRB_STATUS(status) \ (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) @@ -995,38 +1000,25 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, void (*process_err_fn)(struct work_struct *work); struct hv_host_device *host_dev = shost_priv(host); - /* - * In some situations, Hyper-V sets multiple bits in the - * srb_status, such as ABORTED and ERROR. So process them - * individually, with the most specific bits first. - */ - - if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) { - set_host_byte(scmnd, DID_NO_CONNECT); - process_err_fn = storvsc_remove_lun; - goto do_work; - } + switch (SRB_STATUS(vm_srb->srb_status)) { + case SRB_STATUS_ERROR: + case SRB_STATUS_ABORTED: + case SRB_STATUS_INVALID_REQUEST: + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) { + /* Check for capacity change */ + if ((asc == 0x2a) && (ascq == 0x9)) { + process_err_fn = storvsc_device_scan; + /* Retry the I/O that triggered this. */ + set_host_byte(scmnd, DID_REQUEUE); + goto do_work; + } - if (vm_srb->srb_status & SRB_STATUS_ABORTED) { - if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID && - /* Capacity data has changed */ - (asc == 0x2a) && (ascq == 0x9)) { - process_err_fn = storvsc_device_scan; /* - * Retry the I/O that triggered this. + * Otherwise, let upper layer deal with the + * error when sense message is present */ - set_host_byte(scmnd, DID_REQUEUE); - goto do_work; - } - } - - if (vm_srb->srb_status & SRB_STATUS_ERROR) { - /* - * Let upper layer deal with error when - * sense message is present. - */ - if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) return; + } /* * If there is an error; offline the device since all @@ -1049,6 +1041,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, default: set_host_byte(scmnd, DID_ERROR); } + return; + + case SRB_STATUS_INVALID_LUN: + set_host_byte(scmnd, DID_NO_CONNECT); + process_err_fn = storvsc_remove_lun; + goto do_work; + } return; diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c index f8c08fb9891d7..e0ffef6e93865 100644 --- a/drivers/siox/siox-core.c +++ b/drivers/siox/siox-core.c @@ -835,6 +835,8 @@ static struct siox_device *siox_device_add(struct siox_master *smaster, err_device_register: /* don't care to make the buffer smaller again */ + put_device(&sdevice->dev); + sdevice = NULL; err_buf_alloc: siox_master_unlock(smaster); diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c index 75f87b3d8b953..73a2aa3629572 100644 --- a/drivers/slimbus/stream.c +++ b/drivers/slimbus/stream.c @@ -67,10 +67,10 @@ static const int slim_presence_rate_table[] = { 384000, 768000, 0, /* Reserved */ - 110250, - 220500, - 441000, - 882000, + 11025, + 22050, + 44100, + 88200, 176400, 352800, 705600, diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c index c6ec7d95bcfcc..722fd54e537cf 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c @@ -681,13 +681,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev) const struct of_device_id *of_id = NULL; struct device_node *dn; void __iomem *base; - int ret, i; + int ret, i, s; /* AON ctrl registers */ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL); if (IS_ERR(base)) { pr_err("error mapping AON_CTRL\n"); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto aon_err; } ctrl.aon_ctrl_base = base; @@ -697,8 +698,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev) /* Assume standard offset */ ctrl.aon_sram = ctrl.aon_ctrl_base + AON_CTRL_SYSTEM_DATA_RAM_OFS; + s = 0; } else { ctrl.aon_sram = base; + s = 1; } writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC); @@ -708,7 +711,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev) (const void **)&ddr_phy_data); if (IS_ERR(base)) { pr_err("error mapping DDR PHY\n"); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto ddr_phy_err; } ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot; ctrl.pll_status_offset = ddr_phy_data->pll_status_offset; @@ -728,17 +732,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev) for_each_matching_node(dn, ddr_shimphy_dt_ids) { i = ctrl.num_memc; if (i >= MAX_NUM_MEMC) { + of_node_put(dn); pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC); break; } base = of_io_request_and_map(dn, 0, dn->full_name); if (IS_ERR(base)) { + of_node_put(dn); if (!ctrl.support_warm_boot) break; pr_err("error mapping DDR SHIMPHY %d\n", i); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto ddr_shimphy_err; } ctrl.memcs[i].ddr_shimphy_base = base; ctrl.num_memc++; @@ -749,14 +756,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev) for_each_matching_node(dn, brcmstb_memc_of_match) { base = of_iomap(dn, 0); if (!base) { + of_node_put(dn); pr_err("error mapping DDR Sequencer %d\n", i); - return -ENOMEM; + ret = -ENOMEM; + goto brcmstb_memc_err; } of_id = of_match_node(brcmstb_memc_of_match, dn); if (!of_id) { iounmap(base); - return -EINVAL; + of_node_put(dn); + ret = -EINVAL; + goto brcmstb_memc_err; } ddr_seq_data = of_id->data; @@ -776,21 +787,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev) dn = of_find_matching_node(NULL, sram_dt_ids); if (!dn) { pr_err("SRAM not found\n"); - return -EINVAL; + ret = -EINVAL; + goto brcmstb_memc_err; } ret = brcmstb_init_sram(dn); of_node_put(dn); if (ret) { pr_err("error setting up SRAM for PM\n"); - return ret; + goto brcmstb_memc_err; } ctrl.pdev = pdev; ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL); - if (!ctrl.s3_params) - return -ENOMEM; + if (!ctrl.s3_params) { + ret = -ENOMEM; + goto s3_params_err; + } ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params, sizeof(*ctrl.s3_params), DMA_TO_DEVICE); @@ -810,7 +824,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev) out: kfree(ctrl.s3_params); - +s3_params_err: + iounmap(ctrl.boot_sram); +brcmstb_memc_err: + for (i--; i >= 0; i--) + iounmap(ctrl.memcs[i].ddr_ctrl); +ddr_shimphy_err: + for (i = 0; i < ctrl.num_memc; i++) + iounmap(ctrl.memcs[i].ddr_shimphy_base); + + iounmap(ctrl.memcs[0].ddr_phy_base); +ddr_phy_err: + iounmap(ctrl.aon_ctrl_base); + if (s) + iounmap(ctrl.aon_sram); +aon_err: pr_warn("PM: initialization failed with code %d\n", ret); return ret; diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig index 4df32bc4c7a6e..c5d46152d4680 100644 --- a/drivers/soc/fsl/Kconfig +++ b/drivers/soc/fsl/Kconfig @@ -24,6 +24,7 @@ config FSL_MC_DPIO tristate "QorIQ DPAA2 DPIO driver" depends on FSL_MC_BUS select SOC_BUS + select FSL_GUTS help Driver for the DPAA2 DPIO object. A DPIO provides queue and buffer management facilities for software to interact with diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index d0cf969a8fb5f..479d60abbd14f 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -63,6 +63,7 @@ config QCOM_GSBI config QCOM_LLCC tristate "Qualcomm Technologies, Inc. LLCC driver" depends on ARCH_QCOM || COMPILE_TEST + select REGMAP_MMIO help Qualcomm Technologies, Inc. platform specific Last Level Cache Controller(LLCC) driver for platforms such as, diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index f736d208362c9..660ee3aea447a 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -15,13 +15,18 @@ #include #include -struct apr { +enum { + PR_TYPE_APR = 0, +}; + +struct packet_router { struct rpmsg_endpoint *ch; struct device *dev; spinlock_t svcs_lock; spinlock_t rx_lock; struct idr svcs_idr; int dest_domain_id; + int type; struct pdr_handle *pdr; struct workqueue_struct *rxwq; struct work_struct rx_work; @@ -44,21 +49,21 @@ struct apr_rx_buf { */ int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt) { - struct apr *apr = dev_get_drvdata(adev->dev.parent); + struct packet_router *apr = dev_get_drvdata(adev->dev.parent); struct apr_hdr *hdr; unsigned long flags; int ret; - spin_lock_irqsave(&adev->lock, flags); + spin_lock_irqsave(&adev->svc.lock, flags); hdr = &pkt->hdr; hdr->src_domain = APR_DOMAIN_APPS; - hdr->src_svc = adev->svc_id; + hdr->src_svc = adev->svc.id; hdr->dest_domain = adev->domain_id; - hdr->dest_svc = adev->svc_id; + hdr->dest_svc = adev->svc.id; ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size); - spin_unlock_irqrestore(&adev->lock, flags); + spin_unlock_irqrestore(&adev->svc.lock, flags); return ret ? ret : hdr->pkt_size; } @@ -74,7 +79,7 @@ static void apr_dev_release(struct device *dev) static int apr_callback(struct rpmsg_device *rpdev, void *buf, int len, void *priv, u32 addr) { - struct apr *apr = dev_get_drvdata(&rpdev->dev); + struct packet_router *apr = dev_get_drvdata(&rpdev->dev); struct apr_rx_buf *abuf; unsigned long flags; @@ -100,11 +105,11 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf, return 0; } - -static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) +static int apr_do_rx_callback(struct packet_router *apr, struct apr_rx_buf *abuf) { uint16_t hdr_size, msg_type, ver, svc_id; - struct apr_device *svc = NULL; + struct pkt_router_svc *svc; + struct apr_device *adev; struct apr_driver *adrv = NULL; struct apr_resp_pkt resp; struct apr_hdr *hdr; @@ -145,12 +150,15 @@ static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) svc_id = hdr->dest_svc; spin_lock_irqsave(&apr->svcs_lock, flags); svc = idr_find(&apr->svcs_idr, svc_id); - if (svc && svc->dev.driver) - adrv = to_apr_driver(svc->dev.driver); + if (svc && svc->dev->driver) { + adev = svc_to_apr_device(svc); + adrv = to_apr_driver(adev->dev.driver); + } spin_unlock_irqrestore(&apr->svcs_lock, flags); - if (!adrv) { - dev_err(apr->dev, "APR: service is not registered\n"); + if (!adrv || !adev) { + dev_err(apr->dev, "APR: service is not registered (%d)\n", + svc_id); return -EINVAL; } @@ -164,20 +172,26 @@ static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) if (resp.payload_size > 0) resp.payload = buf + hdr_size; - adrv->callback(svc, &resp); + adrv->callback(adev, &resp); return 0; } static void apr_rxwq(struct work_struct *work) { - struct apr *apr = container_of(work, struct apr, rx_work); + struct packet_router *apr = container_of(work, struct packet_router, rx_work); struct apr_rx_buf *abuf, *b; unsigned long flags; if (!list_empty(&apr->rx_list)) { list_for_each_entry_safe(abuf, b, &apr->rx_list, node) { - apr_do_rx_callback(apr, abuf); + switch (apr->type) { + case PR_TYPE_APR: + apr_do_rx_callback(apr, abuf); + break; + default: + break; + } spin_lock_irqsave(&apr->rx_lock, flags); list_del(&abuf->node); spin_unlock_irqrestore(&apr->rx_lock, flags); @@ -201,7 +215,7 @@ static int apr_device_match(struct device *dev, struct device_driver *drv) while (id->domain_id != 0 || id->svc_id != 0) { if (id->domain_id == adev->domain_id && - id->svc_id == adev->svc_id) + id->svc_id == adev->svc.id) return 1; id++; } @@ -221,14 +235,14 @@ static int apr_device_remove(struct device *dev) { struct apr_device *adev = to_apr_device(dev); struct apr_driver *adrv; - struct apr *apr = dev_get_drvdata(adev->dev.parent); + struct packet_router *apr = dev_get_drvdata(adev->dev.parent); if (dev->driver) { adrv = to_apr_driver(dev->driver); if (adrv->remove) adrv->remove(adev); spin_lock(&apr->svcs_lock); - idr_remove(&apr->svcs_idr, adev->svc_id); + idr_remove(&apr->svcs_idr, adev->svc.id); spin_unlock(&apr->svcs_lock); } @@ -257,28 +271,39 @@ struct bus_type aprbus = { EXPORT_SYMBOL_GPL(aprbus); static int apr_add_device(struct device *dev, struct device_node *np, - const struct apr_device_id *id) + u32 svc_id, u32 domain_id) { - struct apr *apr = dev_get_drvdata(dev); + struct packet_router *apr = dev_get_drvdata(dev); struct apr_device *adev = NULL; + struct pkt_router_svc *svc; int ret; adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) return -ENOMEM; - spin_lock_init(&adev->lock); + adev->svc_id = svc_id; + svc = &adev->svc; + + svc->id = svc_id; + svc->pr = apr; + svc->priv = adev; + svc->dev = dev; + spin_lock_init(&svc->lock); + + adev->domain_id = domain_id; - adev->svc_id = id->svc_id; - adev->domain_id = id->domain_id; - adev->version = id->svc_version; if (np) snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np); - else - strscpy(adev->name, id->name, APR_NAME_SIZE); - dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name, - id->domain_id, id->svc_id); + switch (apr->type) { + case PR_TYPE_APR: + dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name, + domain_id, svc_id); + break; + default: + break; + } adev->dev.bus = &aprbus; adev->dev.parent = dev; @@ -287,12 +312,20 @@ static int apr_add_device(struct device *dev, struct device_node *np, adev->dev.driver = NULL; spin_lock(&apr->svcs_lock); - idr_alloc(&apr->svcs_idr, adev, id->svc_id, - id->svc_id + 1, GFP_ATOMIC); + ret = idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC); spin_unlock(&apr->svcs_lock); + if (ret < 0) { + dev_err(dev, "idr_alloc failed: %d\n", ret); + goto out; + } - of_property_read_string_index(np, "qcom,protection-domain", - 1, &adev->service_path); + /* Protection domain is optional, it does not exist on older platforms */ + ret = of_property_read_string_index(np, "qcom,protection-domain", + 1, &adev->service_path); + if (ret < 0 && ret != -EINVAL) { + dev_err(dev, "Failed to read second value of qcom,protection-domain\n"); + goto out; + } dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev)); @@ -302,13 +335,14 @@ static int apr_add_device(struct device *dev, struct device_node *np, put_device(&adev->dev); } +out: return ret; } static int of_apr_add_pd_lookups(struct device *dev) { const char *service_name, *service_path; - struct apr *apr = dev_get_drvdata(dev); + struct packet_router *apr = dev_get_drvdata(dev); struct device_node *node; struct pdr_service *pds; int ret; @@ -340,13 +374,14 @@ static int of_apr_add_pd_lookups(struct device *dev) static void of_register_apr_devices(struct device *dev, const char *svc_path) { - struct apr *apr = dev_get_drvdata(dev); + struct packet_router *apr = dev_get_drvdata(dev); struct device_node *node; const char *service_path; int ret; for_each_child_of_node(dev->of_node, node) { - struct apr_device_id id = { {0} }; + u32 svc_id; + u32 domain_id; /* * This function is called with svc_path NULL during @@ -376,13 +411,13 @@ static void of_register_apr_devices(struct device *dev, const char *svc_path) continue; } - if (of_property_read_u32(node, "reg", &id.svc_id)) + if (of_property_read_u32(node, "reg", &svc_id)) continue; - id.domain_id = apr->dest_domain_id; + domain_id = apr->dest_domain_id; - if (apr_add_device(dev, node, &id)) - dev_err(dev, "Failed to add apr %d svc\n", id.svc_id); + if (apr_add_device(dev, node, svc_id, domain_id)) + dev_err(dev, "Failed to add apr %d svc\n", svc_id); } } @@ -402,7 +437,7 @@ static int apr_remove_device(struct device *dev, void *svc_path) static void apr_pd_status(int state, char *svc_path, void *priv) { - struct apr *apr = (struct apr *)priv; + struct packet_router *apr = (struct packet_router *)priv; switch (state) { case SERVREG_SERVICE_STATE_UP: @@ -417,16 +452,20 @@ static void apr_pd_status(int state, char *svc_path, void *priv) static int apr_probe(struct rpmsg_device *rpdev) { struct device *dev = &rpdev->dev; - struct apr *apr; + struct packet_router *apr; int ret; apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL); if (!apr) return -ENOMEM; - ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", &apr->dest_domain_id); + ret = of_property_read_u32(dev->of_node, "qcom,domain", &apr->dest_domain_id); + if (ret) /* try deprecated apr-domain property */ + ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", + &apr->dest_domain_id); + apr->type = PR_TYPE_APR; if (ret) { - dev_err(dev, "APR Domain ID not specified in DT\n"); + dev_err(dev, "Domain ID not specified in DT\n"); return ret; } @@ -469,7 +508,7 @@ static int apr_probe(struct rpmsg_device *rpdev) static void apr_remove(struct rpmsg_device *rpdev) { - struct apr *apr = dev_get_drvdata(&rpdev->dev); + struct packet_router *apr = dev_get_drvdata(&rpdev->dev); pdr_handle_release(apr->pdr); device_for_each_child(&rpdev->dev, NULL, apr_remove_device); @@ -506,20 +545,20 @@ void apr_driver_unregister(struct apr_driver *drv) } EXPORT_SYMBOL_GPL(apr_driver_unregister); -static const struct of_device_id apr_of_match[] = { +static const struct of_device_id pkt_router_of_match[] = { { .compatible = "qcom,apr"}, { .compatible = "qcom,apr-v2"}, {} }; -MODULE_DEVICE_TABLE(of, apr_of_match); +MODULE_DEVICE_TABLE(of, pkt_router_of_match); -static struct rpmsg_driver apr_driver = { +static struct rpmsg_driver packet_router_driver = { .probe = apr_probe, .remove = apr_remove, .callback = apr_callback, .drv = { .name = "qcom,apr", - .of_match_table = apr_of_match, + .of_match_table = pkt_router_of_match, }, }; @@ -529,7 +568,7 @@ static int __init apr_init(void) ret = bus_register(&aprbus); if (!ret) - ret = register_rpmsg_driver(&apr_driver); + ret = register_rpmsg_driver(&packet_router_driver); else bus_unregister(&aprbus); @@ -539,7 +578,7 @@ static int __init apr_init(void) static void __exit apr_exit(void) { bus_unregister(&aprbus); - unregister_rpmsg_driver(&apr_driver); + unregister_rpmsg_driver(&packet_router_driver); } subsys_initcall(apr_init); diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c index 6298561bc29c9..fac0414c37314 100644 --- a/drivers/soc/qcom/cpr.c +++ b/drivers/soc/qcom/cpr.c @@ -1743,12 +1743,16 @@ static int cpr_probe(struct platform_device *pdev) ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd); if (ret) - return ret; + goto err_remove_genpd; platform_set_drvdata(pdev, drv); cpr_debugfs_init(drv); return 0; + +err_remove_genpd: + pm_genpd_remove(&drv->pd); + return ret; } static int cpr_remove(struct platform_device *pdev) diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 2e06f48d683d1..c60fe98f03e37 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -476,7 +476,7 @@ static int qcom_llcc_probe(struct platform_device *pdev) if (ret) goto err; - drv_data->ecc_irq = platform_get_irq(pdev, 0); + drv_data->ecc_irq = platform_get_irq_optional(pdev, 0); if (drv_data->ecc_irq >= 0) { llcc_edac = platform_device_register_data(&pdev->dev, "qcom_llcc_edac", -1, drv_data, diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c index d2b558438deb7..41e9294071960 100644 --- a/drivers/soc/qcom/smem_state.c +++ b/drivers/soc/qcom/smem_state.c @@ -136,6 +136,7 @@ static void qcom_smem_state_release(struct kref *ref) struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount); list_del(&state->list); + of_node_put(state->of_node); kfree(state); } @@ -169,7 +170,7 @@ struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, kref_init(&state->refcount); - state->of_node = of_node; + state->of_node = of_node_get(of_node); state->ops = *ops; state->priv = priv; diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index 6564f15c53190..acba67dfbc859 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -511,7 +511,7 @@ static int qcom_smsm_probe(struct platform_device *pdev) for (id = 0; id < smsm->num_hosts; id++) { ret = smsm_parse_ipc(smsm, id); if (ret < 0) - return ret; + goto out_put; } /* Acquire the main SMSM state vector */ @@ -519,13 +519,14 @@ static int qcom_smsm_probe(struct platform_device *pdev) smsm->num_entries * sizeof(u32)); if (ret < 0 && ret != -EEXIST) { dev_err(&pdev->dev, "unable to allocate shared state entry\n"); - return ret; + goto out_put; } states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL); if (IS_ERR(states)) { dev_err(&pdev->dev, "Unable to acquire shared state entry\n"); - return PTR_ERR(states); + ret = PTR_ERR(states); + goto out_put; } /* Acquire the list of interrupt mask vectors */ @@ -533,13 +534,14 @@ static int qcom_smsm_probe(struct platform_device *pdev) ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size); if (ret < 0 && ret != -EEXIST) { dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n"); - return ret; + goto out_put; } intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL); if (IS_ERR(intr_mask)) { dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n"); - return PTR_ERR(intr_mask); + ret = PTR_ERR(intr_mask); + goto out_put; } /* Setup the reference to the local state bits */ @@ -550,7 +552,8 @@ static int qcom_smsm_probe(struct platform_device *pdev) smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm); if (IS_ERR(smsm->state)) { dev_err(smsm->dev, "failed to register qcom_smem_state\n"); - return PTR_ERR(smsm->state); + ret = PTR_ERR(smsm->state); + goto out_put; } /* Register handlers for remote processor entries of interest. */ @@ -580,16 +583,19 @@ static int qcom_smsm_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, smsm); + of_node_put(local_node); return 0; unwind_interfaces: + of_node_put(node); for (id = 0; id < smsm->num_entries; id++) if (smsm->entries[id].domain) irq_domain_remove(smsm->entries[id].domain); qcom_smem_state_unregister(smsm->state); - +out_put: + of_node_put(local_node); return ret; } diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c index d4c7bd59429ec..443e38e9f30a2 100644 --- a/drivers/soc/sunxi/sunxi_sram.c +++ b/drivers/soc/sunxi/sunxi_sram.c @@ -78,8 +78,8 @@ static struct sunxi_sram_desc sun4i_a10_sram_d = { static struct sunxi_sram_desc sun50i_a64_sram_c = { .data = SUNXI_SRAM_DATA("C", 0x4, 24, 1, - SUNXI_SRAM_MAP(0, 1, "cpu"), - SUNXI_SRAM_MAP(1, 0, "de2")), + SUNXI_SRAM_MAP(1, 0, "cpu"), + SUNXI_SRAM_MAP(0, 1, "de2")), }; static const struct of_device_id sunxi_sram_dt_ids[] = { @@ -254,6 +254,7 @@ int sunxi_sram_claim(struct device *dev) writel(val | ((device << sram_data->offset) & mask), base + sram_data->reg); + sram_desc->claimed = true; spin_unlock(&sram_lock); return 0; @@ -318,12 +319,11 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = { .writeable_reg = sunxi_sram_regmap_accessible_reg, }; -static int sunxi_sram_probe(struct platform_device *pdev) +static int __init sunxi_sram_probe(struct platform_device *pdev) { - struct resource *res; - struct dentry *d; struct regmap *emac_clock; const struct sunxi_sramc_variant *variant; + struct device *dev = &pdev->dev; sram_dev = &pdev->dev; @@ -331,18 +331,10 @@ static int sunxi_sram_probe(struct platform_device *pdev) if (!variant) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); - of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); - - d = debugfs_create_file("sram", S_IRUGO, NULL, NULL, - &sunxi_sram_fops); - if (!d) - return -ENOMEM; - if (variant->has_emac_clock) { emac_clock = devm_regmap_init_mmio(&pdev->dev, base, &sunxi_sram_emac_clock_regmap); @@ -351,6 +343,10 @@ static int sunxi_sram_probe(struct platform_device *pdev) return PTR_ERR(emac_clock); } + of_platform_populate(dev->of_node, NULL, NULL, dev); + + debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops); + return 0; } @@ -396,9 +392,8 @@ static struct platform_driver sunxi_sram_driver = { .name = "sunxi-sram", .of_match_table = sunxi_sram_dt_match, }, - .probe = sunxi_sram_probe, }; -module_platform_driver(sunxi_sram_driver); +builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe); MODULE_AUTHOR("Maxime Ripard "); MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver"); diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 976dee0364700..676807c5a2154 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -136,7 +136,6 @@ config SOC_TEGRA_FUSE def_bool y depends on ARCH_TEGRA select SOC_BUS - select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC config SOC_TEGRA_FLOWCTRL bool diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 53e36d4328d1e..20c84741639e1 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -67,7 +67,7 @@ static DEFINE_MUTEX(knav_dev_lock); * Newest followed by older ones. Search is done from start of the array * until a firmware file is found. */ -const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; +static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; static bool device_ready; bool knav_qmss_device_ready(void) @@ -1782,9 +1782,9 @@ static int knav_queue_probe(struct platform_device *pdev) INIT_LIST_HEAD(&kdev->pdsps); pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); dev_err(dev, "Failed to enable QMSS\n"); return ret; } diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c index 5376f3d22f31e..1228a0cba1320 100644 --- a/drivers/soc/ti/smartreflex.c +++ b/drivers/soc/ti/smartreflex.c @@ -942,6 +942,7 @@ static int omap_sr_probe(struct platform_device *pdev) err_debugfs: debugfs_remove_recursive(sr_info->dbg_dir); err_list_del: + pm_runtime_disable(&pdev->dev); list_del(&sr_info->node); pm_runtime_put_sync(&pdev->dev); diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c index a9472e0e5d61c..27d6e25a01153 100644 --- a/drivers/soc/ux500/ux500-soc-id.c +++ b/drivers/soc/ux500/ux500-soc-id.c @@ -167,20 +167,18 @@ ATTRIBUTE_GROUPS(ux500_soc); static const char *db8500_read_soc_id(struct device_node *backupram) { void __iomem *base; - void __iomem *uid; const char *retstr; + u32 uid[5]; base = of_iomap(backupram, 0); if (!base) return NULL; - uid = base + 0x1fc0; + memcpy_fromio(uid, base + 0x1fc0, sizeof(uid)); /* Throw these device-specific numbers into the entropy pool */ - add_device_randomness(uid, 0x14); + add_device_randomness(uid, sizeof(uid)); retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x", - readl((u32 *)uid+0), - readl((u32 *)uid+1), readl((u32 *)uid+2), - readl((u32 *)uid+3), readl((u32 *)uid+4)); + uid[0], uid[1], uid[2], uid[3], uid[4]); iounmap(base); return retstr; } diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index c6d421a4b91b6..a3247692ddc07 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -501,9 +501,12 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns, return SDW_CMD_IGNORED; } - /* fill response */ - for (i = 0; i < count; i++) - msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA, cdns->response_buf[i]); + if (msg->flags == SDW_MSG_FLAG_READ) { + /* fill response */ + for (i = 0; i < count; i++) + msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA, + cdns->response_buf[i]); + } return SDW_CMD_OK; } diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 824d9f900aca7..4487fbb8f2e61 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -1140,8 +1140,8 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = { .prepare = intel_prepare, .hw_free = intel_hw_free, .shutdown = intel_shutdown, - .set_sdw_stream = intel_pcm_set_sdw_stream, - .get_sdw_stream = intel_get_sdw_stream, + .set_stream = intel_pcm_set_sdw_stream, + .get_stream = intel_get_sdw_stream, }; static const struct snd_soc_dai_ops intel_pdm_dai_ops = { @@ -1150,8 +1150,8 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = { .prepare = intel_prepare, .hw_free = intel_hw_free, .shutdown = intel_shutdown, - .set_sdw_stream = intel_pdm_set_sdw_stream, - .get_sdw_stream = intel_get_sdw_stream, + .set_stream = intel_pdm_set_sdw_stream, + .get_stream = intel_get_sdw_stream, }; static const struct snd_soc_component_driver dai_component = { @@ -1470,7 +1470,6 @@ int intel_master_startup(struct platform_device *pdev) ret = intel_register_dai(sdw); if (ret) { dev_err(dev, "DAI registration failed: %d\n", ret); - snd_soc_unregister_component(dev); goto err_interrupt; } diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index 6d22df01f3547..ac73258792e6c 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -649,8 +649,8 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream, ctrl->sruntime[dai->id] = sruntime; for_each_rtd_codec_dais(rtd, i, codec_dai) { - ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime, - substream->stream); + ret = snd_soc_dai_set_stream(codec_dai, sruntime, + substream->stream); if (ret < 0 && ret != -ENOTSUPP) { dev_err(dai->dev, "Failed to set sdw stream on %s", codec_dai->name); @@ -676,8 +676,8 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = { .hw_free = qcom_swrm_hw_free, .startup = qcom_swrm_startup, .shutdown = qcom_swrm_shutdown, - .set_sdw_stream = qcom_swrm_set_sdw_stream, - .get_sdw_stream = qcom_swrm_get_sdw_stream, + .set_stream = qcom_swrm_set_sdw_stream, + .get_stream = qcom_swrm_get_sdw_stream, }; static const struct snd_soc_component_driver qcom_swrm_dai_component = { diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 304ff2ee7d75a..79554d28f08aa 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -1860,7 +1860,7 @@ static int set_stream(struct snd_pcm_substream *substream, /* Set stream pointer on all DAIs */ for_each_rtd_dais(rtd, i, dai) { - ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream); + ret = snd_soc_dai_set_stream(dai, sdw_stream, substream->stream); if (ret < 0) { dev_err(rtd->dev, "failed to set stream pointer on dai %s", dai->name); break; @@ -1931,7 +1931,7 @@ void sdw_shutdown_stream(void *sdw_substream) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s", dai->name); diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c index bc9d5eab3c589..8f6a1af144562 100644 --- a/drivers/spi/spi-dw-bt1.c +++ b/drivers/spi/spi-dw-bt1.c @@ -293,8 +293,10 @@ static int dw_spi_bt1_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = dw_spi_add_host(&pdev->dev, dws); - if (ret) + if (ret) { + pm_runtime_disable(&pdev->dev); goto err_disable_clk; + } platform_set_drvdata(pdev, dwsbt1); diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index c33866f747dbe..aa116cee1fd8d 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -353,7 +353,7 @@ static void dw_spi_irq_setup(struct dw_spi *dws) * will be adjusted at the final stage of the IRQ-based SPI transfer * execution so not to lose the leftover of the incoming data. */ - level = min_t(u16, dws->fifo_len / 2, dws->tx_len); + level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len); dw_writel(dws, DW_SPI_TXFTLR, level); dw_writel(dws, DW_SPI_RXFTLR, level - 1); diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c index a09831c62192a..32ac8f9068e87 100644 --- a/drivers/spi/spi-dw-dma.c +++ b/drivers/spi/spi-dw-dma.c @@ -127,12 +127,15 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) dw_spi_dma_sg_burst_init(dws); + pci_dev_put(dma_dev); + return 0; free_rxchan: dma_release_channel(dws->rxchan); dws->rxchan = NULL; err_exit: + pci_dev_put(dma_dev); return -EBUSY; } diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 0e3bc0b0a5265..74b3b6ca15efb 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -434,8 +434,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx, unsigned int pre, post; unsigned int fin = spi_imx->spi_clk; - if (unlikely(fspi > fin)) - return 0; + fspi = min(fspi, fin); post = fls(fin) - fls(fspi); if (fin > fspi << post) diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index e4cb52e1fe261..6974a1c947aad 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -537,7 +537,7 @@ static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw, struct clk_divider *divider = to_clk_divider(hw); struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider); - if (!spicc->master->cur_msg || !spicc->master->busy) + if (!spicc->master->cur_msg) return 0; return clk_divider_ops.recalc_rate(hw, parent_rate); @@ -549,7 +549,7 @@ static int meson_spicc_pow2_determine_rate(struct clk_hw *hw, struct clk_divider *divider = to_clk_divider(hw); struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider); - if (!spicc->master->cur_msg || !spicc->master->busy) + if (!spicc->master->cur_msg) return -EINVAL; return clk_divider_ops.determine_rate(hw, req); @@ -561,7 +561,7 @@ static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_divider *divider = to_clk_divider(hw); struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider); - if (!spicc->master->cur_msg || !spicc->master->busy) + if (!spicc->master->cur_msg) return -EINVAL; return clk_divider_ops.set_rate(hw, rate, parent_rate); diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c index b4b9b7309b5e9..351b0ef52bbc8 100644 --- a/drivers/spi/spi-mt7621.c +++ b/drivers/spi/spi-mt7621.c @@ -340,11 +340,9 @@ static int mt7621_spi_probe(struct platform_device *pdev) return PTR_ERR(base); clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(clk)) { - dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n", - status); - return PTR_ERR(clk); - } + if (IS_ERR(clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(clk), + "unable to get SYS clock\n"); status = clk_prepare_enable(clk); if (status) diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index 0d0cd061d3563..7c992d1f4abd9 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c @@ -414,6 +414,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev) return status; err_fck: + pm_runtime_disable(&pdev->dev); clk_disable_unprepare(spi100k->fck); err_ick: clk_disable_unprepare(spi100k->ick); diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index d39dec6d1c91e..f3877eeb3da65 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1199,8 +1199,10 @@ static int spi_qup_pm_resume_runtime(struct device *device) return ret; ret = clk_prepare_enable(controller->cclk); - if (ret) + if (ret) { + clk_disable_unprepare(controller->iclk); return ret; + } /* Disable clocks auto gaiting */ config = readl_relaxed(controller->base + QUP_CONFIG); @@ -1246,14 +1248,25 @@ static int spi_qup_resume(struct device *device) return ret; ret = clk_prepare_enable(controller->cclk); - if (ret) + if (ret) { + clk_disable_unprepare(controller->iclk); return ret; + } ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) - return ret; + goto disable_clk; - return spi_master_resume(master); + ret = spi_master_resume(master); + if (ret) + goto disable_clk; + + return 0; + +disable_clk: + clk_disable_unprepare(controller->cclk); + clk_disable_unprepare(controller->iclk); + return ret; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index dfa7c91e13aa5..d435df1b715bb 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -84,6 +84,7 @@ #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) #define S3C64XX_SPI_PACKET_CNT_EN (1<<16) +#define S3C64XX_SPI_PACKET_CNT_MASK GENMASK(15, 0) #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) @@ -660,6 +661,13 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master, return 0; } +static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi) +{ + struct spi_controller *ctlr = spi->controller; + + return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX; +} + static int s3c64xx_spi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer) @@ -1135,6 +1143,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; master->prepare_message = s3c64xx_spi_prepare_message; master->transfer_one = s3c64xx_spi_transfer_one; + master->max_transfer_size = s3c64xx_spi_max_transfer_size; master->num_chipselect = sci->num_cs; master->dma_alignment = 8; master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index a6dfc8fef20cd..9ec37cf10c010 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -443,7 +443,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, u32 div, mbrdiv; /* Ensure spi->clk_rate is even */ - div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); + div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if @@ -941,6 +941,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL * 10, 1); + ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE); if (__ratelimit(&rs)) dev_dbg_ratelimited(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6ea7b286c80c2..857a1399850c3 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -946,6 +946,8 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, if (sgt->orig_nents) { dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); sg_free_table(sgt); + sgt->orig_nents = 0; + sgt->nents = 0; } } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 859910ec8d9f6..aee960a7d7f91 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -376,12 +376,23 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { /* read requests */ case SPI_IOC_RD_MODE: - retval = put_user(spi->mode & SPI_MODE_MASK, - (__u8 __user *)arg); - break; case SPI_IOC_RD_MODE32: - retval = put_user(spi->mode & SPI_MODE_MASK, - (__u32 __user *)arg); + tmp = spi->mode; + + { + struct spi_controller *ctlr = spi->controller; + + if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods && + ctlr->cs_gpiods[spi->chip_select]) + tmp &= ~SPI_CS_HIGH; + } + + if (cmd == SPI_IOC_RD_MODE) + retval = put_user(tmp & SPI_MODE_MASK, + (__u8 __user *)arg); + else + retval = put_user(tmp & SPI_MODE_MASK, + (__u32 __user *)arg); break; case SPI_IOC_RD_LSB_FIRST: retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, @@ -581,7 +592,6 @@ static int spidev_open(struct inode *inode, struct file *filp) if (!spidev->tx_buffer) { spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL); if (!spidev->tx_buffer) { - dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); status = -ENOMEM; goto err_find_dev; } @@ -590,7 +600,6 @@ static int spidev_open(struct inode *inode, struct file *filp) if (!spidev->rx_buffer) { spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL); if (!spidev->rx_buffer) { - dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); status = -ENOMEM; goto err_alloc_rx_buf; } diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index bbbd311eda030..e6de2aeece8d3 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -887,7 +887,8 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb) * version 5, there is more than one APID mapped to each PPID. * The owner field for each of these mappings specifies the EE which is * allowed to write to the APID. The owner of the last (highest) APID - * for a given PPID will receive interrupts from the PPID. + * which has the IRQ owner bit set for a given PPID will receive + * interrupts from the PPID. */ for (i = 0; ; i++, apidd++) { offset = pmic_arb->ver_ops->apid_map_offset(i); @@ -910,16 +911,16 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb) apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID; prev_apidd = &pmic_arb->apid_data[apid]; - if (valid && is_irq_ee && - prev_apidd->write_ee == pmic_arb->ee) { + if (!valid || apidd->write_ee == pmic_arb->ee) { + /* First PPID mapping or one for this EE */ + pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID; + } else if (valid && is_irq_ee && + prev_apidd->write_ee == pmic_arb->ee) { /* * Duplicate PPID mapping after the one for this EE; * override the irq owner */ prev_apidd->irq_ee = apidd->irq_ee; - } else if (!valid || is_irq_ee) { - /* First PPID mapping or duplicate for another EE */ - pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID; } apidd->ppid = ppid; diff --git a/drivers/staging/comedi/drivers/adv_pci1760.c b/drivers/staging/comedi/drivers/adv_pci1760.c index 6de8ab97d346c..d6934b6c436d1 100644 --- a/drivers/staging/comedi/drivers/adv_pci1760.c +++ b/drivers/staging/comedi/drivers/adv_pci1760.c @@ -59,7 +59,7 @@ #define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */ #define PCI1760_CMD_SET_DO 0x01 /* Set output state */ #define PCI1760_CMD_GET_DO 0x02 /* Read output status */ -#define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */ +#define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */ #define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */ #define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */ #define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */ diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c index a9576f92efaa4..08443f4aa045d 100644 --- a/drivers/staging/greybus/audio_helper.c +++ b/drivers/staging/greybus/audio_helper.c @@ -3,7 +3,6 @@ * Greybus Audio Sound SoC helper APIs */ -#include #include #include #include @@ -116,10 +115,6 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm, { int i; struct snd_soc_dapm_widget *w, *next_w; -#ifdef CONFIG_DEBUG_FS - struct dentry *parent = dapm->debugfs_dapm; - struct dentry *debugfs_w = NULL; -#endif mutex_lock(&dapm->card->dapm_mutex); for (i = 0; i < num; i++) { @@ -139,12 +134,6 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm, continue; } widget++; -#ifdef CONFIG_DEBUG_FS - if (!parent) - debugfs_w = debugfs_lookup(w->name, parent); - debugfs_remove(debugfs_w); - debugfs_w = NULL; -#endif gbaudio_dapm_free_widget(w); } mutex_unlock(&dapm->card->dapm_mutex); diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c index b68304da288b5..7be44ff2c943a 100644 --- a/drivers/staging/iio/accel/adis16203.c +++ b/drivers/staging/iio/accel/adis16203.c @@ -318,3 +318,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16203"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c index 5064adce5f583..dbbbf81207f9c 100644 --- a/drivers/staging/iio/accel/adis16240.c +++ b/drivers/staging/iio/accel/adis16240.c @@ -445,3 +445,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16240"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c index 5ccb3846c8797..7a818ca15b37d 100644 --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c @@ -1109,6 +1109,7 @@ static int vdec_probe(struct platform_device *pdev) err_vdev_release: video_device_release(vdev); + v4l2_device_unregister(&core->v4l2_dev); return ret; } @@ -1117,6 +1118,7 @@ static int vdec_remove(struct platform_device *pdev) struct amvdec_core *core = platform_get_drvdata(pdev); video_unregister_device(core->vdev_dec); + v4l2_device_unregister(&core->v4l2_dev); return 0; } diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c index 9530e580e57a2..afced435c9070 100644 --- a/drivers/staging/media/meson/vdec/vdec_hevc.c +++ b/drivers/staging/media/meson/vdec/vdec_hevc.c @@ -167,8 +167,12 @@ static int vdec_hevc_start(struct amvdec_session *sess) clk_set_rate(core->vdec_hevc_clk, 666666666); ret = clk_prepare_enable(core->vdec_hevc_clk); - if (ret) + if (ret) { + if (core->platform->revision == VDEC_REVISION_G12A || + core->platform->revision == VDEC_REVISION_SM1) + clk_disable_unprepare(core->vdec_hevcf_clk); return ret; + } if (core->platform->revision == VDEC_REVISION_SM1) regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c index 0c934ca5adaa3..8936f5a81680c 100644 --- a/drivers/staging/media/rkisp1/rkisp1-capture.c +++ b/drivers/staging/media/rkisp1/rkisp1-capture.c @@ -1258,11 +1258,12 @@ static int rkisp1_capture_link_validate(struct media_link *link) struct rkisp1_capture *cap = video_get_drvdata(vdev); const struct rkisp1_capture_fmt_cfg *fmt = rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat); - struct v4l2_subdev_format sd_fmt; + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = link->source->index, + }; int ret; - sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; - sd_fmt.pad = link->source->index; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt); if (ret) return ret; diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c index 4dcc342ac2b27..76f17dd7670f7 100644 --- a/drivers/staging/media/rkisp1/rkisp1-resizer.c +++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c @@ -500,6 +500,10 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd, sink_fmt->height = RKISP1_DEFAULT_HEIGHT; sink_fmt->field = V4L2_FIELD_NONE; sink_fmt->code = RKISP1_DEF_FMT; + sink_fmt->colorspace = V4L2_COLORSPACE_SRGB; + sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; sink_crop = v4l2_subdev_get_try_crop(sd, cfg, RKISP1_RSZ_PAD_SINK); sink_crop->width = RKISP1_DEFAULT_WIDTH; diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c index 7013f7ce36781..ddccd97a359fa 100644 --- a/drivers/staging/media/rkvdec/rkvdec-h264.c +++ b/drivers/staging/media/rkvdec/rkvdec-h264.c @@ -1124,8 +1124,8 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx) schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000)); - writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN); - writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E); + writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN); + writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E); writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND); writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND); diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index 1dd833757c4ee..28de90edf4cc5 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c @@ -399,6 +399,8 @@ static int cedrus_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; + platform_set_drvdata(pdev, dev); + dev->vfd = cedrus_video_device; dev->dev = &pdev->dev; dev->pdev = pdev; @@ -469,8 +471,6 @@ static int cedrus_probe(struct platform_device *pdev) goto err_m2m_mc; } - platform_set_drvdata(pdev, dev); - return 0; err_m2m_mc: diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c index a19c85c57fca5..dc5d432a09e8e 100644 --- a/drivers/staging/media/tegra-video/csi.c +++ b/drivers/staging/media/tegra-video/csi.c @@ -420,7 +420,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi, chan->csi = csi; chan->csi_port_num = port_num; chan->numlanes = lanes; - chan->of_node = node; + chan->of_node = of_node_get(node); chan->numpads = num_pads; if (num_pads & 0x2) { chan->pads[0].flags = MEDIA_PAD_FL_SINK; @@ -435,6 +435,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi, chan->mipi = tegra_mipi_request(csi->dev, node); if (IS_ERR(chan->mipi)) { ret = PTR_ERR(chan->mipi); + chan->mipi = NULL; dev_err(csi->dev, "failed to get mipi device: %d\n", ret); } @@ -620,6 +621,7 @@ static void tegra_csi_channels_cleanup(struct tegra_csi *csi) media_entity_cleanup(&subdev->entity); } + of_node_put(chan->of_node); list_del(&chan->list); kfree(chan); } diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h index c65ff73b1cdc2..be4ea81e00316 100644 --- a/drivers/staging/media/tegra-video/csi.h +++ b/drivers/staging/media/tegra-video/csi.h @@ -50,7 +50,7 @@ struct tegra_csi; * @framerate: active framerate for TPG * @h_blank: horizontal blanking for TPG active format * @v_blank: vertical blanking for TPG active format - * @mipi: mipi device for corresponding csi channel pads + * @mipi: mipi device for corresponding csi channel pads, or NULL if not applicable (TPG, error) * @pixel_rate: active pixel rate from the sensor on this channel */ struct tegra_csi_channel { diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 63752233e551f..404794503fb6d 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1490,9 +1490,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb, hdrlen += 4; } - rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); ieee->stats.rx_packets++; ieee->stats.rx_bytes += skb->len; + rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); return 1; } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index b6fee7230ce05..3871437f4708e 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -954,9 +954,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, #endif if (ieee->iw_mode == IW_MODE_MONITOR) { + unsigned int len = skb->len; + ieee80211_monitor_rx(ieee, skb, rx_stats); stats->rx_packets++; - stats->rx_bytes += skb->len; + stats->rx_bytes += len; return 1; } diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c index 2abe205e34535..cee05385f8727 100644 --- a/drivers/staging/rtl8723bs/core/rtw_cmd.c +++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c @@ -165,8 +165,6 @@ No irqsave is necessary. int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) { - int res = 0; - init_completion(&pcmdpriv->cmd_queue_comp); init_completion(&pcmdpriv->terminate_cmdthread_comp); @@ -178,18 +176,16 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ); - if (!pcmdpriv->cmd_allocated_buf) { - res = -ENOMEM; - goto exit; - } + if (!pcmdpriv->cmd_allocated_buf) + return -ENOMEM; pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1)); pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4); if (!pcmdpriv->rsp_allocated_buf) { - res = -ENOMEM; - goto exit; + kfree(pcmdpriv->cmd_allocated_buf); + return -ENOMEM; } pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3); @@ -199,8 +195,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) pcmdpriv->rsp_cnt = 0; mutex_init(&pcmdpriv->sctx_mutex); -exit: - return res; + + return 0; } static void c2h_wk_callback(_workitem * work); diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h index fefc664eefcf0..f5e1ae5f5ee27 100644 --- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h +++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h @@ -82,7 +82,7 @@ struct vchiq_service_params_kernel { struct vchiq_instance; -extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance); +extern int vchiq_initialise(struct vchiq_instance **pinstance); extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance); extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance); extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance, diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h index 0784c5002417d..77d8fb1801739 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h @@ -89,10 +89,10 @@ extern struct vchiq_arm_state* vchiq_platform_get_arm_state(struct vchiq_state *state); -extern enum vchiq_status +extern int vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, enum USE_TYPE_E use_type); -extern enum vchiq_status +extern int vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service); diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 09ab6d6f2429b..343f0de031546 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -564,7 +564,7 @@ static int device_init_rd0_ring(struct vnt_private *priv) kfree(desc->rd_info); err_free_desc: - while (--i) { + while (i--) { desc = &priv->aRD0Ring[i]; device_free_rx_buf(priv, desc); kfree(desc->rd_info); @@ -610,7 +610,7 @@ static int device_init_rd1_ring(struct vnt_private *priv) kfree(desc->rd_info); err_free_desc: - while (--i) { + while (i--) { desc = &priv->aRD1Ring[i]; device_free_rx_buf(priv, desc); kfree(desc->rd_info); @@ -675,7 +675,7 @@ static int device_init_td0_ring(struct vnt_private *priv) return 0; err_free_desc: - while (--i) { + while (i--) { desc = &priv->apTD0Rings[i]; kfree(desc->td_info); } @@ -715,7 +715,7 @@ static int device_init_td1_ring(struct vnt_private *priv) return 0; err_free_desc: - while (--i) { + while (i--) { desc = &priv->apTD1Rings[i]; kfree(desc->td_info); } diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 16d5a4e117a27..5ae5d94c5b931 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -394,6 +394,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host ret = device_register(&tl_hba->dev); if (ret) { pr_err("device_register() failed for tl_hba->dev: %d\n", ret); + put_device(&tl_hba->dev); return -ENODEV; } @@ -1072,7 +1073,7 @@ static struct se_wwn *tcm_loop_make_scsi_hba( */ ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); if (ret) - goto out; + return ERR_PTR(ret); sh = tl_hba->sh; tcm_loop_hba_no_cnt++; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 7143d03f0e027..18fbbe510d018 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -340,7 +340,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, len += sg->length; } - iov_iter_bvec(&iter, READ, bvec, sgl_nents, len); + iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); if (is_write) ret = vfs_iter_write(fd, &iter, &pos, 0); else @@ -477,7 +477,7 @@ fd_execute_write_same(struct se_cmd *cmd) len += se_dev->dev_attrib.block_size; } - iov_iter_bvec(&iter, READ, bvec, nolb, len); + iov_iter_bvec(&iter, WRITE, bvec, nolb, len); ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0); kfree(bvec); diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e4513ef091593..3efd5a3bd69d1 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -82,8 +82,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd, { struct se_session *sess = se_cmd->se_sess; - assert_spin_locked(&sess->sess_cmd_lock); - WARN_ON_ONCE(!irqs_disabled()); + lockdep_assert_held(&sess->sess_cmd_lock); + /* * If command already reached CMD_T_COMPLETE state within * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index 031806468af48..60ffc54da0033 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -80,7 +80,7 @@ static int optee_register_device(const uuid_t *device_uuid) rc = device_register(&optee_device->dev); if (rc) { pr_err("device registration failed, err: %d\n", rc); - kfree(optee_device); + put_device(&optee_device->dev); } return rc; diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 499fccba3d74b..6fb4400333fb4 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "tee_private.h" diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index 0f4cabd2a8c62..6be16e0598b68 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -65,8 +65,14 @@ static int imx8mm_tmu_get_temp(void *data, int *temp) u32 val; val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK; + + /* + * Do not validate against the V bit (bit 31) due to errata + * ERR051272: TMU: Bit 31 of registers TMU_TSCR/TMU_TRITSR/TMU_TRATSR invalid + */ + *temp = val * 1000; - if (*temp < VER1_TEMP_LOW_LIMIT) + if (*temp < VER1_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT) return -EAGAIN; return 0; diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c index a337600d5bc4c..6952f4e237e1f 100644 --- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c @@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone, int trip, int *temp) { struct int34x_thermal_zone *d = zone->devdata; - int i; + int i, ret = 0; if (d->override_ops && d->override_ops->get_trip_temp) return d->override_ops->get_trip_temp(zone, trip, temp); + mutex_lock(&d->trip_mutex); + if (trip < d->aux_trip_nr) *temp = d->aux_trips[trip]; else if (trip == d->crt_trip_id) @@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone, } } if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT) - return -EINVAL; + ret = -EINVAL; } - return 0; + mutex_unlock(&d->trip_mutex); + + return ret; } static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone, @@ -77,11 +81,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone, enum thermal_trip_type *type) { struct int34x_thermal_zone *d = zone->devdata; - int i; + int i, ret = 0; if (d->override_ops && d->override_ops->get_trip_type) return d->override_ops->get_trip_type(zone, trip, type); + mutex_lock(&d->trip_mutex); + if (trip < d->aux_trip_nr) *type = THERMAL_TRIP_PASSIVE; else if (trip == d->crt_trip_id) @@ -99,10 +105,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone, } } if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT) - return -EINVAL; + ret = -EINVAL; } - return 0; + mutex_unlock(&d->trip_mutex); + + return ret; } static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone, @@ -174,6 +182,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone) int trip_cnt = int34x_zone->aux_trip_nr; int i; + mutex_lock(&int34x_zone->trip_mutex); + int34x_zone->crt_trip_id = -1; if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT", &int34x_zone->crt_temp)) @@ -201,6 +211,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone) int34x_zone->act_trips[i].valid = true; } + mutex_unlock(&int34x_zone->trip_mutex); + return trip_cnt; } EXPORT_SYMBOL_GPL(int340x_thermal_read_trips); @@ -224,6 +236,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, if (!int34x_thermal_zone) return ERR_PTR(-ENOMEM); + mutex_init(&int34x_thermal_zone->trip_mutex); + int34x_thermal_zone->adev = adev; int34x_thermal_zone->override_ops = override_ops; @@ -275,6 +289,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); kfree(int34x_thermal_zone->aux_trips); err_trip_alloc: + mutex_destroy(&int34x_thermal_zone->trip_mutex); kfree(int34x_thermal_zone); return ERR_PTR(ret); } @@ -286,6 +301,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone thermal_zone_device_unregister(int34x_thermal_zone->zone); acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); kfree(int34x_thermal_zone->aux_trips); + mutex_destroy(&int34x_thermal_zone->trip_mutex); kfree(int34x_thermal_zone); } EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h index 3b4971df1b33b..8f9872afd0d3c 100644 --- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h @@ -32,6 +32,7 @@ struct int34x_thermal_zone { struct thermal_zone_device_ops *override_ops; void *priv_data; struct acpi_lpat_conversion_table *lpat_table; + struct mutex trip_mutex; }; struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *, diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index b0eb5ece9243b..fb04470d7d4bb 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -531,9 +531,7 @@ static int start_power_clamp(void) get_online_cpus(); /* prefer BSP */ - control_cpu = 0; - if (!cpu_online(control_cpu)) - control_cpu = smp_processor_id(); + control_cpu = cpumask_first(cpu_online_mask); clamping = true; schedule_delayed_work(&poll_pkg_cstate_work, 0); diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c index 4ffa2e2c01457..9b8ba429a3049 100644 --- a/drivers/thermal/qcom/tsens-v0_1.c +++ b/drivers/thermal/qcom/tsens-v0_1.c @@ -522,7 +522,7 @@ static const struct tsens_ops ops_8939 = { struct tsens_plat_data data_8939 = { .num_sensors = 10, .ops = &ops_8939, - .hw_ids = (unsigned int []){ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10 }, + .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 }, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 82c46b200c347..b2fb3397310e4 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -2300,6 +2300,18 @@ struct tb *icm_probe(struct tb_nhi *nhi) icm->rtd3_veto = icm_icl_rtd3_veto; tb->cm_ops = &icm_icl_ops; break; + + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: + icm->is_supported = icm_tgl_is_supported; + icm->get_mode = icm_ar_get_mode; + icm->driver_ready = icm_tr_driver_ready; + icm->device_connected = icm_tr_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + tb->cm_ops = &icm_tr_ops; + break; } if (!icm->is_supported || !icm->is_supported(tb)) { diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 4e0861d750720..7ad6d3f0583b3 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -55,6 +55,8 @@ extern const struct tb_nhi_ops icl_nhi_ops; * need for the PCI quirk anymore as we will use ICM also on Apple * hardware. */ +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134 +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137 #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index c4b157c29af7a..e881b72833dcf 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2046,6 +2046,7 @@ int tb_switch_configure(struct tb_switch *sw) * additional capabilities. */ sw->config.cmuv = USB4_VERSION_1_0; + sw->config.plug_events_delay = 0xa; /* Enumerate the switch */ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, @@ -2412,6 +2413,26 @@ void tb_switch_unconfigure_link(struct tb_switch *sw) tb_lc_unconfigure_port(down); } +static int tb_switch_port_hotplug_enable(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_switch_is_icm(sw)) + return 0; + + tb_switch_for_each_port(sw, port) { + int res; + + if (!port->cap_usb4) + continue; + + res = usb4_port_hotplug_enable(port); + if (res) + return res; + } + return 0; +} + /** * tb_switch_add() - Add a switch to the domain * @sw: Switch to add @@ -2479,6 +2500,10 @@ int tb_switch_add(struct tb_switch *sw) return ret; } + ret = tb_switch_port_hotplug_enable(sw); + if (ret) + return ret; + ret = device_add(&sw->dev); if (ret) { dev_err(&sw->dev, "failed to add device: %d\n", ret); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 8ea360b0ff773..266f3bf8ff5c6 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -979,6 +979,7 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port); int usb4_port_unlock(struct tb_port *port); +int usb4_port_hotplug_enable(struct tb_port *port); int usb4_port_configure(struct tb_port *port); void usb4_port_unconfigure(struct tb_port *port); int usb4_port_configure_xdomain(struct tb_port *port); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index e7d9529822fab..26868e2f9d0bd 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -285,6 +285,7 @@ struct tb_regs_port_header { #define ADP_CS_5 0x05 #define ADP_CS_5_LCA_MASK GENMASK(28, 22) #define ADP_CS_5_LCA_SHIFT 22 +#define ADP_CS_5_DHP BIT(31) /* TMU adapter registers */ #define TMU_ADP_CS_3 0x03 diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 829b6ccdd5d4f..011ab5fed85b7 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -956,7 +956,7 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, return; } else if (!ret) { /* Use maximum link rate if the link valid is not set */ - ret = usb4_usb3_port_max_link_rate(tunnel->src_port); + ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); if (ret < 0) { tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); return; diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index c05ec6fad77f6..0b3a77ade04d9 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -854,6 +854,26 @@ int usb4_port_unlock(struct tb_port *port) return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); } +/** + * usb4_port_hotplug_enable() - Enables hotplug for a port + * @port: USB4 port to operate on + * + * Enables hot plug events on a given port. This is only intended + * to be used on lane, DP-IN, and DP-OUT adapters. + */ +int usb4_port_hotplug_enable(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1); + if (ret) + return ret; + + val &= ~ADP_CS_5_DHP; + return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); +} + static int usb4_port_set_configured(struct tb_port *port, bool configured) { int ret; diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 7948660e042fd..6f387a4fd96a4 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -52,17 +52,22 @@ static DEFINE_SPINLOCK(xencons_lock); static struct xencons_info *vtermno_to_xencons(int vtermno) { - struct xencons_info *entry, *n, *ret = NULL; + struct xencons_info *entry, *ret = NULL; + unsigned long flags; - if (list_empty(&xenconsoles)) - return NULL; + spin_lock_irqsave(&xencons_lock, flags); + if (list_empty(&xenconsoles)) { + spin_unlock_irqrestore(&xencons_lock, flags); + return NULL; + } - list_for_each_entry_safe(entry, n, &xenconsoles, list) { + list_for_each_entry(entry, &xenconsoles, list) { if (entry->vtermno == vtermno) { ret = entry; break; } } + spin_unlock_irqrestore(&xencons_lock, flags); return ret; } @@ -223,7 +228,7 @@ static int xen_hvm_console_init(void) { int r; uint64_t v = 0; - unsigned long gfn; + unsigned long gfn, flags; struct xencons_info *info; if (!xen_hvm_domain()) @@ -258,9 +263,9 @@ static int xen_hvm_console_init(void) goto err; info->vtermno = HVC_COOKIE; - spin_lock(&xencons_lock); + spin_lock_irqsave(&xencons_lock, flags); list_add_tail(&info->list, &xenconsoles); - spin_unlock(&xencons_lock); + spin_unlock_irqrestore(&xencons_lock, flags); return 0; err: @@ -283,6 +288,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno) static int xen_pv_console_init(void) { struct xencons_info *info; + unsigned long flags; if (!xen_pv_domain()) return -ENODEV; @@ -299,9 +305,9 @@ static int xen_pv_console_init(void) /* already configured */ return 0; } - spin_lock(&xencons_lock); + spin_lock_irqsave(&xencons_lock, flags); xencons_info_pv_init(info, HVC_COOKIE); - spin_unlock(&xencons_lock); + spin_unlock_irqrestore(&xencons_lock, flags); return 0; } @@ -309,6 +315,7 @@ static int xen_pv_console_init(void) static int xen_initial_domain_console_init(void) { struct xencons_info *info; + unsigned long flags; if (!xen_initial_domain()) return -ENODEV; @@ -323,9 +330,9 @@ static int xen_initial_domain_console_init(void) info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false); info->vtermno = HVC_COOKIE; - spin_lock(&xencons_lock); + spin_lock_irqsave(&xencons_lock, flags); list_add_tail(&info->list, &xenconsoles); - spin_unlock(&xencons_lock); + spin_unlock_irqrestore(&xencons_lock, flags); return 0; } @@ -380,10 +387,12 @@ static void xencons_free(struct xencons_info *info) static int xen_console_remove(struct xencons_info *info) { + unsigned long flags; + xencons_disconnect_backend(info); - spin_lock(&xencons_lock); + spin_lock_irqsave(&xencons_lock, flags); list_del(&info->list); - spin_unlock(&xencons_lock); + spin_unlock_irqrestore(&xencons_lock, flags); if (info->xbdev != NULL) xencons_free(info); else { @@ -464,6 +473,7 @@ static int xencons_probe(struct xenbus_device *dev, { int ret, devid; struct xencons_info *info; + unsigned long flags; devid = dev->nodename[strlen(dev->nodename) - 1] - '0'; if (devid == 0) @@ -482,9 +492,9 @@ static int xencons_probe(struct xenbus_device *dev, ret = xencons_connect_backend(dev, info); if (ret < 0) goto error; - spin_lock(&xencons_lock); + spin_lock_irqsave(&xencons_lock, flags); list_add_tail(&info->list, &xenconsoles); - spin_unlock(&xencons_lock); + spin_unlock_irqrestore(&xencons_lock, flags); return 0; @@ -583,10 +593,12 @@ static int __init xen_hvc_init(void) info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256); if (IS_ERR(info->hvc)) { + unsigned long flags; + r = PTR_ERR(info->hvc); - spin_lock(&xencons_lock); + spin_lock_irqsave(&xencons_lock, flags); list_del(&info->list); - spin_unlock(&xencons_lock); + spin_unlock_irqrestore(&xencons_lock, flags); if (info->irq) unbind_from_irqhandler(info->irq, NULL); kfree(info); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index cb5ed4155a8d2..f5063499f9cf6 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1419,7 +1419,7 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, unsigned int command, u8 *data, int clen) { struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control), - GFP_KERNEL); + GFP_ATOMIC); unsigned long flags; if (ctrl == NULL) return NULL; @@ -2200,11 +2200,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm) { struct gsm_dlci *dlci; - timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); - init_waitqueue_head(&gsm->event); - spin_lock_init(&gsm->control_lock); - spin_lock_init(&gsm->tx_lock); - if (gsm->encoding == 0) gsm->receive = gsm0_receive; else @@ -2306,6 +2301,10 @@ static struct gsm_mux *gsm_alloc_mux(void) mutex_init(&gsm->mutex); kref_init(&gsm->ref); INIT_LIST_HEAD(&gsm->tx_list); + timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); + init_waitqueue_head(&gsm->event); + spin_lock_init(&gsm->control_lock); + spin_lock_init(&gsm->tx_lock); gsm->t1 = T1; gsm->t2 = T2; diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 98ce484f1089d..0a7e9491b4d14 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -310,10 +310,9 @@ static void serial8250_backup_timeout(struct timer_list *t) jiffies + uart_poll_timeout(&up->port) + HZ / 5); } -static int univ8250_setup_irq(struct uart_8250_port *up) +static void univ8250_setup_timer(struct uart_8250_port *up) { struct uart_port *port = &up->port; - int retval = 0; /* * The above check will only give an accurate result the first time @@ -332,12 +331,18 @@ static int univ8250_setup_irq(struct uart_8250_port *up) * hardware interrupt, we use a timer-based system. The original * driver used to do this with IRQ0. */ - if (!port->irq) { + if (!port->irq) mod_timer(&up->timer, jiffies + uart_poll_timeout(port)); - } else - retval = serial_link_irq_chain(up); +} - return retval; +static int univ8250_setup_irq(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + + if (port->irq) + return serial_link_irq_chain(up); + + return 0; } static void univ8250_release_irq(struct uart_8250_port *up) @@ -393,6 +398,7 @@ static struct uart_ops univ8250_port_ops; static const struct uart_8250_ops univ8250_driver_ops = { .setup_irq = univ8250_setup_irq, .release_irq = univ8250_release_irq, + .setup_timer = univ8250_setup_timer, }; static struct uart_8250_port serial8250_ports[UART_NR]; @@ -766,6 +772,7 @@ void serial8250_suspend_port(int line) if (!console_suspend_enabled && uart_console(port) && port->type != PORT_8250) { unsigned char canary = 0xa5; + serial_out(up, UART_SCR, canary); if (serial_in(up, UART_SCR) == canary) up->canary = canary; diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index b3c3f7e5851ab..33ce4b218d9ef 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -46,19 +46,39 @@ static void __dma_rx_complete(void *param) struct uart_8250_dma *dma = p->dma; struct tty_port *tty_port = &p->port.state->port; struct dma_tx_state state; + enum dma_status dma_status; int count; - dma->rx_running = 0; - dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + /* + * New DMA Rx can be started during the completion handler before it + * could acquire port's lock and it might still be ongoing. Don't to + * anything in such case. + */ + dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + if (dma_status == DMA_IN_PROGRESS) + return; count = dma->rx_size - state.residue; tty_insert_flip_string(tty_port, dma->rx_buf, count); p->port.icount.rx += count; + dma->rx_running = 0; tty_flip_buffer_push(tty_port); } +static void dma_rx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct uart_8250_dma *dma = p->dma; + unsigned long flags; + + spin_lock_irqsave(&p->port.lock, flags); + if (dma->rx_running) + __dma_rx_complete(p); + spin_unlock_irqrestore(&p->port.lock, flags); +} + int serial8250_tx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; @@ -130,7 +150,7 @@ int serial8250_rx_dma(struct uart_8250_port *p) return -EBUSY; dma->rx_running = 1; - desc->callback = __dma_rx_complete; + desc->callback = dma_rx_complete; desc->callback_param = p; dma->rx_cookie = dmaengine_submit(desc); diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index dfb730b7ea2ae..1349c161c1922 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -268,8 +268,13 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port struct dw_dma_slave *rx_param, *tx_param; struct device *dev = port->port.dev; - if (!lpss->dma_param.dma_dev) + if (!lpss->dma_param.dma_dev) { + dma = port->dma; + if (dma) + goto out_configuration_only; + return 0; + } rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL); if (!rx_param) @@ -280,16 +285,18 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port return -ENOMEM; *rx_param = lpss->dma_param; - dma->rxconf.src_maxburst = lpss->dma_maxburst; - *tx_param = lpss->dma_param; - dma->txconf.dst_maxburst = lpss->dma_maxburst; dma->fn = lpss8250_dma_filter; dma->rx_param = rx_param; dma->tx_param = tx_param; port->dma = dma; + +out_configuration_only: + dma->rxconf.src_maxburst = lpss->dma_maxburst; + dma->txconf.dst_maxburst = lpss->dma_maxburst; + return 0; } diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 537bee8d2258a..483fff3a95c9e 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -157,7 +157,11 @@ static u32 uart_read(struct uart_8250_port *up, u32 reg) return readl(up->port.membase + (reg << up->port.regshift)); } -static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +/* + * Called on runtime PM resume path from omap8250_restore_regs(), and + * omap8250_set_mctrl(). + */ +static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = up->port.private_data; @@ -181,6 +185,20 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) } } +static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + int err; + + err = pm_runtime_resume_and_get(port->dev); + if (err) + return; + + __omap8250_set_mctrl(port, mctrl); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); +} + /* * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460) * The access to uart register after MDR1 Access @@ -193,27 +211,10 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) static void omap_8250_mdr1_errataset(struct uart_8250_port *up, struct omap8250_priv *priv) { - u8 timeout = 255; - serial_out(up, UART_OMAP_MDR1, priv->mdr1); udelay(2); serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); - /* - * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and - * TX_FIFO_E bit is 1. - */ - while (UART_LSR_THRE != (serial_in(up, UART_LSR) & - (UART_LSR_THRE | UART_LSR_DR))) { - timeout--; - if (!timeout) { - /* Should *never* happen. we warn and carry on */ - dev_crit(up->port.dev, "Errata i202: timedout %x\n", - serial_in(up, UART_LSR)); - break; - } - udelay(1); - } } static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud, @@ -292,6 +293,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) { struct omap8250_priv *priv = up->port.private_data; struct uart_8250_dma *dma = up->dma; + u8 mcr = serial8250_in_MCR(up); if (dma && dma->tx_running) { /* @@ -308,7 +310,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) serial_out(up, UART_EFR, UART_EFR_ECB); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); - serial8250_out_MCR(up, UART_MCR_TCRTLR); + serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR); serial_out(up, UART_FCR, up->fcr); omap8250_update_scr(up, priv); @@ -324,7 +326,8 @@ static void omap8250_restore_regs(struct uart_8250_port *up) serial_out(up, UART_LCR, 0); /* drop TCR + TLR access, we setup XON/XOFF later */ - serial8250_out_MCR(up, up->mcr); + serial8250_out_MCR(up, mcr); + serial_out(up, UART_IER, up->ier); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); @@ -341,7 +344,10 @@ static void omap8250_restore_regs(struct uart_8250_port *up) omap8250_update_mdr1(up, priv); - up->port.ops->set_mctrl(&up->port, up->port.mctrl); + __omap8250_set_mctrl(&up->port, up->port.mctrl); + + if (up->port.rs485.flags & SER_RS485_ENABLED) + serial8250_em485_stop_tx(up); } /* @@ -680,7 +686,6 @@ static int omap_8250_startup(struct uart_port *port) pm_runtime_get_sync(port->dev); - up->mcr = 0; serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_LCR, UART_LCR_WLEN8); @@ -1471,9 +1476,15 @@ static int omap8250_probe(struct platform_device *pdev) static int omap8250_remove(struct platform_device *pdev) { struct omap8250_priv *priv = platform_get_drvdata(pdev); + int err; + + err = pm_runtime_resume_and_get(&pdev->dev); + if (err) + return err; pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); + flush_work(&priv->qos_work); pm_runtime_disable(&pdev->dev); serial8250_unregister_port(priv->line); cpu_latency_qos_remove_request(&priv->pm_qos_request); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index df10cc606582b..b6656898699d1 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1531,7 +1531,6 @@ static int pci_fintek_init(struct pci_dev *dev) resource_size_t bar_data[3]; u8 config_base; struct serial_private *priv = pci_get_drvdata(dev); - struct uart_8250_port *port; if (!(pci_resource_flags(dev, 5) & IORESOURCE_IO) || !(pci_resource_flags(dev, 4) & IORESOURCE_IO) || @@ -1578,13 +1577,7 @@ static int pci_fintek_init(struct pci_dev *dev) pci_write_config_byte(dev, config_base + 0x06, dev->irq); - if (priv) { - /* re-apply RS232/485 mode when - * pciserial_resume_ports() - */ - port = serial8250_get_port(priv->line[i]); - pci_fintek_rs485_config(&port->port, NULL); - } else { + if (!priv) { /* First init without port data * force init to RS232 Mode */ diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 9d60418e4adb1..1f231fcda657b 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -592,7 +592,7 @@ EXPORT_SYMBOL_GPL(serial8250_rpm_put); static int serial8250_em485_init(struct uart_8250_port *p) { if (p->em485) - return 0; + goto deassert_rts; p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC); if (!p->em485) @@ -608,7 +608,9 @@ static int serial8250_em485_init(struct uart_8250_port *p) p->em485->active_timer = NULL; p->em485->tx_stopped = true; - p->rs485_stop_tx(p); +deassert_rts: + if (p->em485->tx_stopped) + p->rs485_stop_tx(p); return 0; } @@ -659,13 +661,6 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485) rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; } - /* clamp the delays to [0, 100ms] */ - rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U); - rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U); - - memset(rs485->padding, 0, sizeof(rs485->padding)); - port->rs485 = *rs485; - gpiod_set_value(port->rs485_term_gpio, rs485->flags & SER_RS485_TERMINATE_BUS); @@ -673,15 +668,8 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485) * Both serial8250_em485_init() and serial8250_em485_destroy() * are idempotent. */ - if (rs485->flags & SER_RS485_ENABLED) { - int ret = serial8250_em485_init(up); - - if (ret) { - rs485->flags &= ~SER_RS485_ENABLED; - port->rs485.flags &= ~SER_RS485_ENABLED; - } - return ret; - } + if (rs485->flags & SER_RS485_ENABLED) + return serial8250_em485_init(up); serial8250_em485_destroy(up); return 0; @@ -1021,7 +1009,8 @@ static void autoconfig_16550a(struct uart_8250_port *up) up->port.type = PORT_16550A; up->capabilities |= UART_CAP_FIFO; - if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS)) + if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS) && + !(up->port.flags & UPF_FULL_PROBE)) return; /* @@ -1880,10 +1869,13 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status); static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir) { switch (iir & 0x3f) { - case UART_IIR_RX_TIMEOUT: - serial8250_rx_dma_flush(up); + case UART_IIR_RDI: + if (!up->dma->rx_running) + break; fallthrough; case UART_IIR_RLSI: + case UART_IIR_RX_TIMEOUT: + serial8250_rx_dma_flush(up); return true; } return up->dma->rx_dma(up); @@ -2030,6 +2022,9 @@ EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl); static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl) { + if (port->rs485.flags & SER_RS485_ENABLED) + return; + if (port->set_mctrl) port->set_mctrl(port, mctrl); else @@ -2276,6 +2271,10 @@ int serial8250_do_startup(struct uart_port *port) if (port->irq && (up->port.flags & UPF_SHARE_IRQ)) up->port.irqflags |= IRQF_SHARED; + retval = up->ops->setup_irq(up); + if (retval) + goto out; + if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; @@ -2318,9 +2317,7 @@ int serial8250_do_startup(struct uart_port *port) } } - retval = up->ops->setup_irq(up); - if (retval) - goto out; + up->ops->setup_timer(up); /* * Now, initialize the UART @@ -3159,9 +3156,6 @@ static void serial8250_config_port(struct uart_port *port, int flags) if (flags & UART_CONFIG_TYPE) autoconfig(up); - if (port->rs485.flags & SER_RS485_ENABLED) - port->rs485_config(port, &port->rs485); - /* if access method is AU, it is a 16550 with a quirk */ if (port->type == PORT_16550A && port->iotype == UPIO_AU) up->bugs |= UART_BUG_NOMSR; @@ -3286,8 +3280,13 @@ static void serial8250_console_restore(struct uart_8250_port *up) unsigned int baud, quot, frac = 0; termios.c_cflag = port->cons->cflag; - if (port->state->port.tty && termios.c_cflag == 0) + termios.c_ispeed = port->cons->ispeed; + termios.c_ospeed = port->cons->ospeed; + if (port->state->port.tty && termios.c_cflag == 0) { termios.c_cflag = port->state->port.tty->termios.c_cflag; + termios.c_ispeed = port->state->port.tty->termios.c_ispeed; + termios.c_ospeed = port->state->port.tty->termios.c_ospeed; + } baud = serial8250_get_baud_rate(port, &termios, NULL); quot = serial8250_get_divisor(port, baud, &frac); diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index 603137da47363..136f2b1460f91 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -119,7 +119,7 @@ config SERIAL_8250_CONSOLE config SERIAL_8250_GSC tristate - depends on SERIAL_8250 && GSC + depends on SERIAL_8250 && PARISC default SERIAL_8250 config SERIAL_8250_DMA diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 0e487ce091ac9..d91f76b1d353e 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -199,9 +199,8 @@ static void altera_uart_set_termios(struct uart_port *port, */ } -static void altera_uart_rx_chars(struct altera_uart *pp) +static void altera_uart_rx_chars(struct uart_port *port) { - struct uart_port *port = &pp->port; unsigned char ch, flag; unsigned short status; @@ -248,9 +247,8 @@ static void altera_uart_rx_chars(struct altera_uart *pp) spin_lock(&port->lock); } -static void altera_uart_tx_chars(struct altera_uart *pp) +static void altera_uart_tx_chars(struct uart_port *port) { - struct uart_port *port = &pp->port; struct circ_buf *xmit = &port->state->xmit; if (port->x_char) { @@ -274,26 +272,25 @@ static void altera_uart_tx_chars(struct altera_uart *pp) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - if (xmit->head == xmit->tail) { - pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK; - altera_uart_update_ctrl_reg(pp); - } + if (uart_circ_empty(xmit)) + altera_uart_stop_tx(port); } static irqreturn_t altera_uart_interrupt(int irq, void *data) { struct uart_port *port = data; struct altera_uart *pp = container_of(port, struct altera_uart, port); + unsigned long flags; unsigned int isr; isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr; - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); if (isr & ALTERA_UART_STATUS_RRDY_MSK) - altera_uart_rx_chars(pp); + altera_uart_rx_chars(port); if (isr & ALTERA_UART_STATUS_TRDY_MSK) - altera_uart_tx_chars(pp); - spin_unlock(&port->lock); + altera_uart_tx_chars(port); + spin_unlock_irqrestore(&port->lock, flags); return IRQ_RETVAL(isr); } diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 9900ee3f90683..348d4b2a391a3 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1048,6 +1048,9 @@ static void pl011_dma_rx_callback(void *data) */ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) { + if (!uap->using_rx_dma) + return; + /* FIXME. Just disable the DMA enable */ uap->dmacr &= ~UART011_RXDMAE; pl011_write(uap->dmacr, uap, REG_DMACR); @@ -1757,8 +1760,17 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) static void pl011_unthrottle_rx(struct uart_port *port) { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + unsigned long flags; - pl011_enable_interrupts(uap); + spin_lock_irqsave(&uap->port.lock, flags); + + uap->im = UART011_RTIM; + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; + + pl011_write(uap->im, uap, REG_IMSC); + + spin_unlock_irqrestore(&uap->port.lock, flags); } static int pl011_startup(struct uart_port *port) diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index c2be7cf913992..fcbaff8941930 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -593,6 +593,11 @@ static int ar933x_config_rs485(struct uart_port *port, dev_err(port->dev, "RS485 needs rts-gpio\n"); return 1; } + + if (rs485conf->flags & SER_RS485_ENABLED) + gpiod_set_value(up->rts_gpiod, + !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND)); + port->rs485 = *rs485conf; return 0; } diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 602065bfc9bb8..02fd0e79c8f70 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -295,20 +295,16 @@ static int atmel_config_rs485(struct uart_port *port, mode = atmel_uart_readl(port, ATMEL_US_MR); - /* Resetting serial mode to RS232 (0x0) */ - mode &= ~ATMEL_US_USMODE; - - port->rs485 = *rs485conf; - if (rs485conf->flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); - if (port->rs485.flags & SER_RS485_RX_DURING_TX) + if (rs485conf->flags & SER_RS485_RX_DURING_TX) atmel_port->tx_done_mask = ATMEL_US_TXRDY; else atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; atmel_uart_writel(port, ATMEL_US_TTGR, rs485conf->delay_rts_after_send); + mode &= ~ATMEL_US_USMODE; mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); @@ -2637,13 +2633,7 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud, else if (mr == ATMEL_US_PAR_ODD) *parity = 'o'; - /* - * The serial core only rounds down when matching this to a - * supported baud rate. Make sure we don't end up slightly - * lower than one of those, as it would make us fall through - * to a much lower baud rate than we really want. - */ - *baud = port->uartclk / (16 * (quot - 1)); + *baud = port->uartclk / (16 * quot); } static int __init atmel_console_setup(struct console *co, char *options) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index a2c4eab0b4703..223695947b654 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1725,6 +1725,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport) if (sport->lpuart_dma_rx_use) { del_timer_sync(&sport->lpuart_timer); lpuart_dma_rx_free(&sport->port); + sport->lpuart_dma_rx_use = false; } if (sport->lpuart_dma_tx_use) { @@ -1733,6 +1734,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport) sport->dma_tx_in_progress = false; dmaengine_terminate_all(sport->dma_tx_chan); } + sport->lpuart_dma_tx_use = false; } if (sport->dma_tx_chan) @@ -2584,6 +2586,7 @@ static int lpuart_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct lpuart_port *sport; struct resource *res; + irq_handler_t handler; int ret; sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); @@ -2656,21 +2659,12 @@ static int lpuart_probe(struct platform_device *pdev) if (lpuart_is_32(sport)) { lpuart_reg.cons = LPUART32_CONSOLE; - ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0, - DRIVER_NAME, sport); + handler = lpuart32_int; } else { lpuart_reg.cons = LPUART_CONSOLE; - ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0, - DRIVER_NAME, sport); + handler = lpuart_int; } - if (ret) - goto failed_irq_request; - - ret = uart_add_one_port(&lpuart_reg, &sport->port); - if (ret) - goto failed_attach_port; - ret = uart_get_rs485_mode(&sport->port); if (ret) goto failed_get_rs485; @@ -2682,13 +2676,21 @@ static int lpuart_probe(struct platform_device *pdev) sport->port.rs485.delay_rts_after_send) dev_err(&pdev->dev, "driver doesn't support RTS delays\n"); - sport->port.rs485_config(&sport->port, &sport->port.rs485); + ret = uart_add_one_port(&lpuart_reg, &sport->port); + if (ret) + goto failed_attach_port; + + ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0, + DRIVER_NAME, sport); + if (ret) + goto failed_irq_request; return 0; +failed_irq_request: + uart_remove_one_port(&lpuart_reg, &sport->port); failed_get_rs485: failed_attach_port: -failed_irq_request: lpuart_disable_clks(sport); return ret; } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index bfbca711bbf9b..164597e2e0044 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -398,8 +398,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) { *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); - sport->port.mctrl |= TIOCM_RTS; - mctrl_gpio_set(sport->gpios, sport->port.mctrl); + mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); } /* called with port.lock taken and irqs caller dependent */ @@ -408,8 +407,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) *ucr2 &= ~UCR2_CTSC; *ucr2 |= UCR2_CTS; - sport->port.mctrl &= ~TIOCM_RTS; - mctrl_gpio_set(sport->gpios, sport->port.mctrl); + mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); } static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec) @@ -2381,8 +2379,6 @@ static int imx_uart_probe(struct platform_device *pdev) dev_err(&pdev->dev, "low-active RTS not possible when receiver is off, enabling receiver\n"); - imx_uart_rs485_config(&sport->port, &sport->port.rs485); - /* Disable interrupts before requesting them */ ucr1 = imx_uart_readl(sport, UCR1); ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN); @@ -2630,6 +2626,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = { .suspend_noirq = imx_uart_suspend_noirq, .resume_noirq = imx_uart_resume_noirq, .freeze_noirq = imx_uart_suspend_noirq, + .thaw_noirq = imx_uart_resume_noirq, .restore_noirq = imx_uart_resume_noirq, .suspend = imx_uart_suspend, .resume = imx_uart_resume, diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c index cd30da0ef0834..b5b61e598b533 100644 --- a/drivers/tty/serial/jsm/jsm_driver.c +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -212,7 +212,8 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) break; default: - return -ENXIO; + rc = -ENXIO; + goto out_kfree_brd; } rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 351ad0b020291..cb68a1028090a 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -711,6 +711,7 @@ static void pch_request_dma(struct uart_port *port) if (!chan) { dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n", __func__); + pci_dev_put(dma_dev); return; } priv->chan_tx = chan; @@ -727,6 +728,7 @@ static void pch_request_dma(struct uart_port *port) __func__); dma_release_channel(priv->chan_tx); priv->chan_tx = NULL; + pci_dev_put(dma_dev); return; } @@ -734,6 +736,8 @@ static void pch_request_dma(struct uart_port *port) priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize, &priv->rx_buf_dma, GFP_KERNEL); priv->chan_rx = chan; + + pci_dev_put(dma_dev); } static void pch_dma_rx_complete(void *arg) @@ -765,7 +769,7 @@ static void pch_dma_tx_complete(void *arg) } xmit->tail &= UART_XMIT_SIZE - 1; async_tx_ack(priv->desc_tx); - dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE); + dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE); priv->tx_dma_use = 0; priv->nent = 0; priv->orig_nent = 0; diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 0d85b55ea8233..f50ffc8076d8b 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -866,9 +866,10 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev) return IRQ_HANDLED; } -static void get_tx_fifo_size(struct qcom_geni_serial_port *port) +static int setup_fifos(struct qcom_geni_serial_port *port) { struct uart_port *uport; + u32 old_rx_fifo_depth = port->rx_fifo_depth; uport = &port->uport; port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se); @@ -876,6 +877,16 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port) port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se); uport->fifosize = (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE; + + if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) { + port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo, + port->rx_fifo_depth * sizeof(u32), + GFP_KERNEL); + if (!port->rx_fifo) + return -ENOMEM; + } + + return 0; } @@ -890,6 +901,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport) u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT; u32 proto; u32 pin_swap; + int ret; proto = geni_se_read_proto(&port->se); if (proto != GENI_SE_UART) { @@ -899,7 +911,9 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport) qcom_geni_serial_stop_rx(uport); - get_tx_fifo_size(port); + ret = setup_fifos(port); + if (ret) + return ret; writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT); diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index c2be22c3b7d1b..62377c831894d 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -520,7 +520,7 @@ static void tegra_uart_tx_dma_complete(void *args) count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); spin_lock_irqsave(&tup->uport.lock, flags); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&tup->uport); @@ -608,18 +608,18 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u) static void tegra_uart_stop_tx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); - struct circ_buf *xmit = &tup->uport.state->xmit; struct dma_tx_state state; unsigned int count; if (tup->tx_in_progress != TEGRA_UART_TX_DMA) return; - dmaengine_terminate_all(tup->tx_dma_chan); + dmaengine_pause(tup->tx_dma_chan); dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); + dmaengine_terminate_all(tup->tx_dma_chan); count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; } @@ -759,8 +759,9 @@ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup) return; } - dmaengine_terminate_all(tup->rx_dma_chan); + dmaengine_pause(tup->rx_dma_chan); dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); + dmaengine_terminate_all(tup->rx_dma_chan); tegra_uart_rx_buffer_push(tup, state.residue); tup->rx_dma_active = false; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index b578f7090b637..40fff38588d4f 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -42,6 +42,11 @@ static struct lock_class_key port_lock_key; #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) +/* + * Max time with active RTS before/after data is sent. + */ +#define RS485_MAX_RTS_DELAY 100 /* msecs */ + static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, struct ktermios *old_termios); static void uart_wait_until_sent(struct tty_struct *tty, int timeout); @@ -144,15 +149,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) unsigned long flags; unsigned int old; - if (port->rs485.flags & SER_RS485_ENABLED) { - set &= ~TIOCM_RTS; - clear &= ~TIOCM_RTS; - } - spin_lock_irqsave(&port->lock, flags); old = port->mctrl; port->mctrl = (old & ~clear) | set; - if (old != port->mctrl) + if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED)) port->ops->set_mctrl(port, port->mctrl); spin_unlock_irqrestore(&port->lock, flags); } @@ -1326,8 +1326,41 @@ static int uart_set_rs485_config(struct uart_port *port, if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user))) return -EFAULT; + /* pick sane settings if the user hasn't */ + if (!(rs485.flags & SER_RS485_RTS_ON_SEND) == + !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) { + dev_warn_ratelimited(port->dev, + "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n", + port->name, port->line); + rs485.flags |= SER_RS485_RTS_ON_SEND; + rs485.flags &= ~SER_RS485_RTS_AFTER_SEND; + } + + if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) { + rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY; + dev_warn_ratelimited(port->dev, + "%s (%d): RTS delay before sending clamped to %u ms\n", + port->name, port->line, rs485.delay_rts_before_send); + } + + if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) { + rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY; + dev_warn_ratelimited(port->dev, + "%s (%d): RTS delay after sending clamped to %u ms\n", + port->name, port->line, rs485.delay_rts_after_send); + } + /* Return clean padding area to userspace */ + memset(rs485.padding, 0, sizeof(rs485.padding)); + spin_lock_irqsave(&port->lock, flags); ret = port->rs485_config(port, &rs485); + if (!ret) { + port->rs485 = rs485; + + /* Reset RTS and other mctrl lines when disabling RS485 */ + if (!(rs485.flags & SER_RS485_ENABLED)) + port->ops->set_mctrl(port, port->mctrl); + } spin_unlock_irqrestore(&port->lock, flags); if (ret) return ret; @@ -2221,7 +2254,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) spin_lock_irq(&uport->lock); ops->stop_tx(uport); - ops->set_mctrl(uport, 0); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, 0); ops->stop_rx(uport); spin_unlock_irq(&uport->lock); @@ -2302,7 +2336,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) uart_change_pm(state, UART_PM_STATE_ON); spin_lock_irq(&uport->lock); - ops->set_mctrl(uport, 0); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, 0); spin_unlock_irq(&uport->lock); if (console_suspend_enabled || !uart_console(uport)) { /* Protected by port mutex for now */ @@ -2313,7 +2348,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) if (tty) uart_change_speed(tty, state, NULL); spin_lock_irq(&uport->lock); - ops->set_mctrl(uport, uport->mctrl); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, uport->mctrl); + else + uport->rs485_config(uport, &uport->rs485); ops->start_tx(uport); spin_unlock_irq(&uport->lock); tty_port_set_initialized(port, 1); @@ -2411,10 +2449,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, */ spin_lock_irqsave(&port->lock, flags); port->mctrl &= TIOCM_DTR; - if (port->rs485.flags & SER_RS485_ENABLED && - !(port->rs485.flags & SER_RS485_RTS_AFTER_SEND)) - port->mctrl |= TIOCM_RTS; - port->ops->set_mctrl(port, port->mctrl); + if (!(port->rs485.flags & SER_RS485_ENABLED)) + port->ops->set_mctrl(port, port->mctrl); + else + port->rs485_config(port, &port->rs485); spin_unlock_irqrestore(&port->lock, flags); /* diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index bab551f469631..451c7233623f0 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c @@ -1137,7 +1137,13 @@ static int __init sunsab_init(void) } } - return platform_driver_register(&sab_driver); + err = platform_driver_register(&sab_driver); + if (err) { + kfree(sunsab_ports); + sunsab_ports = NULL; + } + + return err; } static void __exit sunsab_exit(void) diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c index aaf8748a61479..31ae705aa38b7 100644 --- a/drivers/tty/serial/tegra-tcu.c +++ b/drivers/tty/serial/tegra-tcu.c @@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port) break; tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(port, count); } uart_write_wakeup(port); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index b5a8afbc452ba..f7dfa123907ab 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -375,6 +375,8 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id) isrstatus &= ~CDNS_UART_IXR_TXEMPTY; } + isrstatus &= port->read_status_mask; + isrstatus &= ~port->ignore_status_mask; /* * Skip RX processing if RX is disabled as RXEMPTY will never be set * as read bytes will not be removed from the FIFO. diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 1850bacdb5b0e..f566eb1839dc5 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -386,10 +386,6 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) uni_mode = use_unicode(inode); attr = use_attributes(inode); - ret = -ENXIO; - vc = vcs_vc(inode, &viewed); - if (!vc) - goto unlock_out; ret = -EINVAL; if (pos < 0) @@ -407,6 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) unsigned int this_round, skip = 0; int size; + ret = -ENXIO; + vc = vcs_vc(inode, &viewed); + if (!vc) + goto unlock_out; + /* Check whether we are above size each round, * as copy_to_user at the end of this loop * could sleep. diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index ec7f66f4555a6..92751737bbea4 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -110,8 +110,10 @@ static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info) * remember the state so we can allow user space to enable it later. */ + spin_lock(&priv->lock); if (!test_and_set_bit(0, &priv->flags)) disable_irq_nosync(irq); + spin_unlock(&priv->lock); return IRQ_HANDLED; } @@ -125,20 +127,19 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) * in the interrupt controller, but keep track of the * state to prevent per-irq depth damage. * - * Serialize this operation to support multiple tasks. + * Serialize this operation to support multiple tasks and concurrency + * with irq handler on SMP systems. */ spin_lock_irqsave(&priv->lock, flags); if (irq_on) { if (test_and_clear_bit(0, &priv->flags)) enable_irq(dev_info->irq); - spin_unlock_irqrestore(&priv->lock, flags); } else { - if (!test_and_set_bit(0, &priv->flags)) { - spin_unlock_irqrestore(&priv->lock, flags); - disable_irq(dev_info->irq); - } + if (!test_and_set_bit(0, &priv->flags)) + disable_irq_nosync(dev_info->irq); } + spin_unlock_irqrestore(&priv->lock, flags); return 0; } diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index 6eeb7ed8e91f3..8fe7420de033d 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -97,13 +97,23 @@ static int cdns3_core_init_role(struct cdns3 *cdns) * can be restricted later depending on strap pin configuration. */ if (dr_mode == USB_DR_MODE_UNKNOWN) { - if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) && - IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) - dr_mode = USB_DR_MODE_OTG; - else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST)) - dr_mode = USB_DR_MODE_HOST; - else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) - dr_mode = USB_DR_MODE_PERIPHERAL; + if (cdns->version == CDNSP_CONTROLLER_V2) { + if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) && + IS_ENABLED(CONFIG_USB_CDNSP_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } else { + if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) && + IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } } /* diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 3176f924293a1..0d87871499eaa 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -55,7 +55,9 @@ struct cdns3_platform_data { * @otg_res: the resource for otg * @otg_v0_regs: pointer to base of v0 otg registers * @otg_v1_regs: pointer to base of v1 otg registers + * @otg_cdnsp_regs: pointer to base of CDNSP otg registers * @otg_regs: pointer to base of otg registers + * @otg_irq_regs: pointer to interrupt registers * @otg_irq: irq number for otg controller * @dev_irq: irq number for device controller * @wakeup_irq: irq number for wakeup event, it is optional @@ -86,9 +88,12 @@ struct cdns3 { struct resource otg_res; struct cdns3_otg_legacy_regs *otg_v0_regs; struct cdns3_otg_regs *otg_v1_regs; + struct cdnsp_otg_regs *otg_cdnsp_regs; struct cdns3_otg_common_regs *otg_regs; + struct cdns3_otg_irq_regs *otg_irq_regs; #define CDNS3_CONTROLLER_V0 0 #define CDNS3_CONTROLLER_V1 1 +#define CDNSP_CONTROLLER_V2 2 u32 version; bool phyrst_a_enable; diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c index 38ccd29e4cdef..95863d44e3e09 100644 --- a/drivers/usb/cdns3/drd.c +++ b/drivers/usb/cdns3/drd.c @@ -2,13 +2,12 @@ /* * Cadence USBSS DRD Driver. * - * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2018-2020 Cadence. * Copyright (C) 2019 Texas Instruments * * Author: Pawel Laszczak * Roger Quadros * - * */ #include #include @@ -28,8 +27,9 @@ * * Returns 0 on success otherwise negative errno */ -int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) +static int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) { + u32 __iomem *override_reg; u32 reg; switch (mode) { @@ -39,11 +39,24 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) break; case USB_DR_MODE_OTG: dev_dbg(cdns->dev, "Set controller to OTG mode\n"); - if (cdns->version == CDNS3_CONTROLLER_V1) { - reg = readl(&cdns->otg_v1_regs->override); + + if (cdns->version == CDNSP_CONTROLLER_V2) + override_reg = &cdns->otg_cdnsp_regs->override; + else if (cdns->version == CDNS3_CONTROLLER_V1) + override_reg = &cdns->otg_v1_regs->override; + else + override_reg = &cdns->otg_v0_regs->ctrl1; + + reg = readl(override_reg); + + if (cdns->version != CDNS3_CONTROLLER_V0) reg |= OVERRIDE_IDPULLUP; - writel(reg, &cdns->otg_v1_regs->override); + else + reg |= OVERRIDE_IDPULLUP_V0; + writel(reg, override_reg); + + if (cdns->version == CDNS3_CONTROLLER_V1) { /* * Enable work around feature built into the * controller to address issue with RX Sensitivity @@ -55,10 +68,6 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) reg |= PHYRST_CFG_PHYRST_A_ENABLE; writel(reg, &cdns->otg_v1_regs->phyrst_cfg); } - } else { - reg = readl(&cdns->otg_v0_regs->ctrl1); - reg |= OVERRIDE_IDPULLUP_V0; - writel(reg, &cdns->otg_v0_regs->ctrl1); } /* @@ -123,7 +132,7 @@ bool cdns3_is_device(struct cdns3 *cdns) */ static void cdns3_otg_disable_irq(struct cdns3 *cdns) { - writel(0, &cdns->otg_regs->ien); + writel(0, &cdns->otg_irq_regs->ien); } /** @@ -133,7 +142,7 @@ static void cdns3_otg_disable_irq(struct cdns3 *cdns) static void cdns3_otg_enable_irq(struct cdns3 *cdns) { writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT | - OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien); + OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien); } /** @@ -144,16 +153,21 @@ static void cdns3_otg_enable_irq(struct cdns3 *cdns) */ int cdns3_drd_host_on(struct cdns3 *cdns) { - u32 val; + u32 val, ready_bit; int ret; /* Enable host mode. */ writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS, &cdns->otg_regs->cmd); + if (cdns->version == CDNSP_CONTROLLER_V2) + ready_bit = OTGSTS_CDNSP_XHCI_READY; + else + ready_bit = OTGSTS_CDNS3_XHCI_READY; + dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n"); ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, - val & OTGSTS_XHCI_READY, 1, 100000); + val & ready_bit, 1, 100000); if (ret) dev_err(cdns->dev, "timeout waiting for xhci_ready\n"); @@ -189,17 +203,22 @@ void cdns3_drd_host_off(struct cdns3 *cdns) */ int cdns3_drd_gadget_on(struct cdns3 *cdns) { - int ret, val; u32 reg = OTGCMD_OTG_DIS; + u32 ready_bit; + int ret, val; /* switch OTG core */ writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd); dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n"); + if (cdns->version == CDNSP_CONTROLLER_V2) + ready_bit = OTGSTS_CDNSP_DEV_READY; + else + ready_bit = OTGSTS_CDNS3_DEV_READY; + ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, - val & OTGSTS_DEV_READY, - 1, 100000); + val & ready_bit, 1, 100000); if (ret) { dev_err(cdns->dev, "timeout waiting for dev_ready\n"); return ret; @@ -244,7 +263,7 @@ static int cdns3_init_otg_mode(struct cdns3 *cdns) cdns3_otg_disable_irq(cdns); /* clear all interrupts */ - writel(~0, &cdns->otg_regs->ivect); + writel(~0, &cdns->otg_irq_regs->ivect); ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG); if (ret) @@ -313,7 +332,7 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data) if (cdns->in_lpm) return ret; - reg = readl(&cdns->otg_regs->ivect); + reg = readl(&cdns->otg_irq_regs->ivect); if (!reg) return IRQ_NONE; @@ -332,7 +351,7 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data) ret = IRQ_WAKE_THREAD; } - writel(~0, &cdns->otg_regs->ivect); + writel(~0, &cdns->otg_irq_regs->ivect); return ret; } @@ -347,28 +366,43 @@ int cdns3_drd_init(struct cdns3 *cdns) return PTR_ERR(regs); /* Detection of DRD version. Controller has been released - * in two versions. Both are similar, but they have same changes - * in register maps. - * The first register in old version is command register and it's read - * only, so driver should read 0 from it. On the other hand, in v1 - * the first register contains device ID number which is not set to 0. - * Driver uses this fact to detect the proper version of + * in three versions. All are very similar and are software compatible, + * but they have same changes in register maps. + * The first register in oldest version is command register and it's + * read only. Driver should read 0 from it. On the other hand, in v1 + * and v2 the first register contains device ID number which is not + * set to 0. Driver uses this fact to detect the proper version of * controller. */ cdns->otg_v0_regs = regs; if (!readl(&cdns->otg_v0_regs->cmd)) { cdns->version = CDNS3_CONTROLLER_V0; cdns->otg_v1_regs = NULL; + cdns->otg_cdnsp_regs = NULL; cdns->otg_regs = regs; + cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *) + &cdns->otg_v0_regs->ien; writel(1, &cdns->otg_v0_regs->simulate); dev_dbg(cdns->dev, "DRD version v0 (%08x)\n", readl(&cdns->otg_v0_regs->version)); } else { cdns->otg_v0_regs = NULL; cdns->otg_v1_regs = regs; + cdns->otg_cdnsp_regs = regs; + cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd; - cdns->version = CDNS3_CONTROLLER_V1; - writel(1, &cdns->otg_v1_regs->simulate); + + if (cdns->otg_cdnsp_regs->did == OTG_CDNSP_DID) { + cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *) + &cdns->otg_cdnsp_regs->ien; + cdns->version = CDNSP_CONTROLLER_V2; + } else { + cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *) + &cdns->otg_v1_regs->ien; + writel(1, &cdns->otg_v1_regs->simulate); + cdns->version = CDNS3_CONTROLLER_V1; + } + dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n", readl(&cdns->otg_v1_regs->did), readl(&cdns->otg_v1_regs->rid)); @@ -378,10 +412,17 @@ int cdns3_drd_init(struct cdns3 *cdns) /* Update dr_mode according to STRAP configuration. */ cdns->dr_mode = USB_DR_MODE_OTG; - if (state == OTGSTS_STRAP_HOST) { + + if ((cdns->version == CDNSP_CONTROLLER_V2 && + state == OTGSTS_CDNSP_STRAP_HOST) || + (cdns->version != CDNSP_CONTROLLER_V2 && + state == OTGSTS_STRAP_HOST)) { dev_dbg(cdns->dev, "Controller strapped to HOST\n"); cdns->dr_mode = USB_DR_MODE_HOST; - } else if (state == OTGSTS_STRAP_GADGET) { + } else if ((cdns->version == CDNSP_CONTROLLER_V2 && + state == OTGSTS_CDNSP_STRAP_GADGET) || + (cdns->version != CDNSP_CONTROLLER_V2 && + state == OTGSTS_STRAP_GADGET)) { dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n"); cdns->dr_mode = USB_DR_MODE_PERIPHERAL; } diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h index f1ccae285a16d..a767b6893938c 100644 --- a/drivers/usb/cdns3/drd.h +++ b/drivers/usb/cdns3/drd.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Cadence USB3 DRD header file. + * Cadence USB3 and USBSSP DRD header file. * - * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2018-2020 Cadence. * * Author: Pawel Laszczak */ @@ -13,7 +13,7 @@ #include #include "core.h" -/* DRD register interface for version v1. */ +/* DRD register interface for version v1 of cdns3 driver. */ struct cdns3_otg_regs { __le32 did; __le32 rid; @@ -38,7 +38,7 @@ struct cdns3_otg_regs { __le32 ctrl2; }; -/* DRD register interface for version v0. */ +/* DRD register interface for version v0 of cdns3 driver. */ struct cdns3_otg_legacy_regs { __le32 cmd; __le32 sts; @@ -57,14 +57,45 @@ struct cdns3_otg_legacy_regs { __le32 ctrl1; }; +/* DRD register interface for cdnsp driver */ +struct cdnsp_otg_regs { + __le32 did; + __le32 rid; + __le32 cfgs1; + __le32 cfgs2; + __le32 cmd; + __le32 sts; + __le32 state; + __le32 ien; + __le32 ivect; + __le32 tmr; + __le32 simulate; + __le32 adpbc_sts; + __le32 adp_ramp_time; + __le32 adpbc_ctrl1; + __le32 adpbc_ctrl2; + __le32 override; + __le32 vbusvalid_dbnc_cfg; + __le32 sessvalid_dbnc_cfg; + __le32 susp_timing_ctrl; +}; + +#define OTG_CDNSP_DID 0x0004034E + /* - * Common registers interface for both version of DRD. + * Common registers interface for both CDNS3 and CDNSP version of DRD. */ struct cdns3_otg_common_regs { __le32 cmd; __le32 sts; __le32 state; - __le32 different1; +}; + +/* + * Interrupt related registers. This registers are mapped in different + * location for CDNSP controller. + */ +struct cdns3_otg_irq_regs { __le32 ien; __le32 ivect; }; @@ -92,9 +123,9 @@ struct cdns3_otg_common_regs { #define OTGCMD_DEV_BUS_DROP BIT(8) /* Drop the bus for Host mode*/ #define OTGCMD_HOST_BUS_DROP BIT(9) -/* Power Down USBSS-DEV. */ +/* Power Down USBSS-DEV - only for CDNS3.*/ #define OTGCMD_DEV_POWER_OFF BIT(11) -/* Power Down CDNSXHCI. */ +/* Power Down CDNSXHCI - only for CDNS3. */ #define OTGCMD_HOST_POWER_OFF BIT(12) /* OTGIEN - bitmasks */ @@ -123,20 +154,31 @@ struct cdns3_otg_common_regs { #define OTGSTS_OTG_NRDY_MASK BIT(11) #define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK) /* - * Value of the strap pins. + * Value of the strap pins for: + * CDNS3: * 000 - no default configuration * 010 - Controller initiall configured as Host * 100 - Controller initially configured as Device + * CDNSP: + * 000 - No default configuration. + * 010 - Controller initiall configured as Host. + * 100 - Controller initially configured as Device. */ #define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12) #define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00 #define OTGSTS_STRAP_HOST_OTG 0x01 #define OTGSTS_STRAP_HOST 0x02 #define OTGSTS_STRAP_GADGET 0x04 +#define OTGSTS_CDNSP_STRAP_HOST 0x01 +#define OTGSTS_CDNSP_STRAP_GADGET 0x02 + /* Host mode is turned on. */ -#define OTGSTS_XHCI_READY BIT(26) +#define OTGSTS_CDNS3_XHCI_READY BIT(26) +#define OTGSTS_CDNSP_XHCI_READY BIT(27) + /* "Device mode is turned on .*/ -#define OTGSTS_DEV_READY BIT(27) +#define OTGSTS_CDNS3_DEV_READY BIT(27) +#define OTGSTS_CDNSP_DEV_READY BIT(26) /* OTGSTATE- bitmasks */ #define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0) @@ -152,6 +194,8 @@ struct cdns3_otg_common_regs { #define OVERRIDE_IDPULLUP BIT(0) /* Only for CDNS3_CONTROLLER_V0 version */ #define OVERRIDE_IDPULLUP_V0 BIT(24) +/* Vbusvalid/Sesvalid override select. */ +#define OVERRIDE_SESS_VLD_SEL BIT(10) /* PHYRST_CFG - bitmasks */ #define PHYRST_CFG_PHYRST_A_ENABLE BIT(0) @@ -170,6 +214,5 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns); void cdns3_drd_gadget_off(struct cdns3 *cdns); int cdns3_drd_host_on(struct cdns3 *cdns); void cdns3_drd_host_off(struct cdns3 *cdns); -int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode); #endif /* __LINUX_CDNS3_DRD */ diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index a37ea946459cc..e3a8b6c71aa1d 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -352,19 +352,6 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); } -static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) -{ - struct cdns3_endpoint *priv_ep = priv_req->priv_ep; - int current_trb = priv_req->start_trb; - - while (current_trb != priv_req->end_trb) { - cdns3_ep_inc_deq(priv_ep); - current_trb = priv_ep->dequeue; - } - - cdns3_ep_inc_deq(priv_ep); -} - /** * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. * @priv_dev: Extended gadget object @@ -1518,10 +1505,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, trb = priv_ep->trb_pool + priv_ep->dequeue; - /* Request was dequeued and TRB was changed to TRB_LINK. */ - if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { + /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ + while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { trace_cdns3_complete_trb(priv_ep, trb); - cdns3_move_deq_to_next_trb(priv_req); + cdns3_ep_inc_deq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->dequeue; } if (!request->stream_id) { @@ -1543,7 +1531,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, TRB_LEN(le32_to_cpu(trb->length)); if (priv_req->num_of_trb > 1 && - le32_to_cpu(trb->control) & TRB_SMM) + le32_to_cpu(trb->control) & TRB_SMM && + le32_to_cpu(trb->control) & TRB_CHAIN) transfer_end = true; cdns3_ep_inc_deq(priv_ep); @@ -1703,6 +1692,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) ep_cfg &= ~EP_CFG_ENABLE; writel(ep_cfg, &priv_dev->regs->ep_cfg); priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; + priv_ep->flags |= EP_UPDATE_EP_TRBADDR; } cdns3_transfer_completed(priv_dev, priv_ep); } else if (!(priv_ep->flags & EP_STALLED) && diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index 6ed4b00dba961..7a2a9559693fb 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -256,8 +256,10 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t) ci->enabled_otg_timer_bits &= ~(1 << t); if (ci->next_otg_timer == t) { if (ci->enabled_otg_timer_bits == 0) { + spin_unlock_irqrestore(&ci->lock, flags); /* No enabled timers after delete it */ hrtimer_cancel(&ci->otg_fsm_hrtimer); + spin_lock_irqsave(&ci->lock, flags); ci->next_otg_timer = NUM_OTG_FSM_TIMERS; } else { /* Find the next timer */ diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 1433260d99b48..347fb3d3894a5 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -25,6 +25,12 @@ static const char *const ep_type_names[] = { [USB_ENDPOINT_XFER_INT] = "intr", }; +/** + * usb_ep_type_string() - Returns human readable-name of the endpoint type. + * @ep_type: The endpoint type to return human-readable name for. If it's not + * any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT}, + * usually got by usb_endpoint_type(), the string 'unknown' will be returned. + */ const char *usb_ep_type_string(int ep_type) { if (ep_type < 0 || ep_type >= ARRAY_SIZE(ep_type_names)) @@ -69,6 +75,19 @@ static const char *const speed_names[] = { [USB_SPEED_SUPER_PLUS] = "super-speed-plus", }; +static const char *const ssp_rate[] = { + [USB_SSP_GEN_UNKNOWN] = "UNKNOWN", + [USB_SSP_GEN_2x1] = "super-speed-plus-gen2x1", + [USB_SSP_GEN_1x2] = "super-speed-plus-gen1x2", + [USB_SSP_GEN_2x2] = "super-speed-plus-gen2x2", +}; + +/** + * usb_speed_string() - Returns human readable-name of the speed. + * @speed: The speed to return human-readable name for. If it's not + * any of the speeds defined in usb_device_speed enum, string for + * USB_SPEED_UNKNOWN will be returned. + */ const char *usb_speed_string(enum usb_device_speed speed) { if (speed < 0 || speed >= ARRAY_SIZE(speed_names)) @@ -77,6 +96,14 @@ const char *usb_speed_string(enum usb_device_speed speed) } EXPORT_SYMBOL_GPL(usb_speed_string); +/** + * usb_get_maximum_speed - Get maximum requested speed for a given USB + * controller. + * @dev: Pointer to the given USB controller device + * + * The function gets the maximum speed string from property "maximum-speed", + * and returns the corresponding enum usb_device_speed. + */ enum usb_device_speed usb_get_maximum_speed(struct device *dev) { const char *maximum_speed; @@ -86,12 +113,44 @@ enum usb_device_speed usb_get_maximum_speed(struct device *dev) if (ret < 0) return USB_SPEED_UNKNOWN; - ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed); + ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed); + if (ret > 0) + return USB_SPEED_SUPER_PLUS; + ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed); return (ret < 0) ? USB_SPEED_UNKNOWN : ret; } EXPORT_SYMBOL_GPL(usb_get_maximum_speed); +/** + * usb_get_maximum_ssp_rate - Get the signaling rate generation and lane count + * of a SuperSpeed Plus capable device. + * @dev: Pointer to the given USB controller device + * + * If the string from "maximum-speed" property is super-speed-plus-genXxY where + * 'X' is the generation number and 'Y' is the number of lanes, then this + * function returns the corresponding enum usb_ssp_rate. + */ +enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev) +{ + const char *maximum_speed; + int ret; + + ret = device_property_read_string(dev, "maximum-speed", &maximum_speed); + if (ret < 0) + return USB_SSP_GEN_UNKNOWN; + + ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed); + return (ret < 0) ? USB_SSP_GEN_UNKNOWN : ret; +} +EXPORT_SYMBOL_GPL(usb_get_maximum_ssp_rate); + +/** + * usb_state_string - Returns human readable name for the state. + * @state: The state to return a human-readable name for. If it's not + * any of the states devices in usb_device_state_string enum, + * the string UNKNOWN will be returned. + */ const char *usb_state_string(enum usb_device_state state) { static const char *const names[] = { @@ -141,6 +200,47 @@ enum usb_dr_mode usb_get_dr_mode(struct device *dev) } EXPORT_SYMBOL_GPL(usb_get_dr_mode); +/** + * usb_decode_interval - Decode bInterval into the time expressed in 1us unit + * @epd: The descriptor of the endpoint + * @speed: The speed that the endpoint works as + * + * Function returns the interval expressed in 1us unit for servicing + * endpoint for data transfers. + */ +unsigned int usb_decode_interval(const struct usb_endpoint_descriptor *epd, + enum usb_device_speed speed) +{ + unsigned int interval = 0; + + switch (usb_endpoint_type(epd)) { + case USB_ENDPOINT_XFER_CONTROL: + /* uframes per NAK */ + if (speed == USB_SPEED_HIGH) + interval = epd->bInterval; + break; + case USB_ENDPOINT_XFER_ISOC: + interval = 1 << (epd->bInterval - 1); + break; + case USB_ENDPOINT_XFER_BULK: + /* uframes per NAK */ + if (speed == USB_SPEED_HIGH && usb_endpoint_dir_out(epd)) + interval = epd->bInterval; + break; + case USB_ENDPOINT_XFER_INT: + if (speed >= USB_SPEED_HIGH) + interval = 1 << (epd->bInterval - 1); + else + interval = epd->bInterval; + break; + } + + interval *= (speed >= USB_SPEED_HIGH) ? 125 : 1000; + + return interval; +} +EXPORT_SYMBOL_GPL(usb_decode_interval); + #ifdef CONFIG_OF /** * of_usb_get_dr_mode_by_phy - Get dual role mode for the controller device diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c index ba849c7bc5c7f..f0c0e8db70388 100644 --- a/drivers/usb/common/debug.c +++ b/drivers/usb/common/debug.c @@ -207,12 +207,28 @@ static void usb_decode_set_isoch_delay(__u8 wValue, char *str, size_t size) snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue); } -/* - * usb_decode_ctrl - returns a string representation of ctrl request - */ -const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, - __u8 bRequest, __u16 wValue, __u16 wIndex, - __u16 wLength) +static void usb_decode_ctrl_generic(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength) +{ + u8 recip = bRequestType & USB_RECIP_MASK; + u8 type = bRequestType & USB_TYPE_MASK; + + snprintf(str, size, + "Type=%s Recipient=%s Dir=%s bRequest=%u wValue=%u wIndex=%u wLength=%u", + (type == USB_TYPE_STANDARD) ? "Standard" : + (type == USB_TYPE_VENDOR) ? "Vendor" : + (type == USB_TYPE_CLASS) ? "Class" : "Unknown", + (recip == USB_RECIP_DEVICE) ? "Device" : + (recip == USB_RECIP_INTERFACE) ? "Interface" : + (recip == USB_RECIP_ENDPOINT) ? "Endpoint" : "Unknown", + (bRequestType & USB_DIR_IN) ? "IN" : "OUT", + bRequest, wValue, wIndex, wLength); +} + +static void usb_decode_ctrl_standard(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength) { switch (bRequest) { case USB_REQ_GET_STATUS: @@ -253,14 +269,48 @@ const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, usb_decode_set_isoch_delay(wValue, str, size); break; default: - snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x", - bRequestType, bRequest, - (u8)(cpu_to_le16(wValue) & 0xff), - (u8)(cpu_to_le16(wValue) >> 8), - (u8)(cpu_to_le16(wIndex) & 0xff), - (u8)(cpu_to_le16(wIndex) >> 8), - (u8)(cpu_to_le16(wLength) & 0xff), - (u8)(cpu_to_le16(wLength) >> 8)); + usb_decode_ctrl_generic(str, size, bRequestType, bRequest, + wValue, wIndex, wLength); + break; + } +} + +/** + * usb_decode_ctrl - Returns human readable representation of control request. + * @str: buffer to return a human-readable representation of control request. + * This buffer should have about 200 bytes. + * @size: size of str buffer. + * @bRequestType: matches the USB bmRequestType field + * @bRequest: matches the USB bRequest field + * @wValue: matches the USB wValue field (CPU byte order) + * @wIndex: matches the USB wIndex field (CPU byte order) + * @wLength: matches the USB wLength field (CPU byte order) + * + * Function returns decoded, formatted and human-readable description of + * control request packet. + * + * The usage scenario for this is for tracepoints, so function as a return + * use the same value as in parameters. This approach allows to use this + * function in TP_printk + * + * Important: wValue, wIndex, wLength parameters before invoking this function + * should be processed by le16_to_cpu macro. + */ +const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength) +{ + switch (bRequestType & USB_TYPE_MASK) { + case USB_TYPE_STANDARD: + usb_decode_ctrl_standard(str, size, bRequestType, bRequest, + wValue, wIndex, wLength); + break; + case USB_TYPE_VENDOR: + case USB_TYPE_CLASS: + default: + usb_decode_ctrl_generic(str, size, bRequestType, bRequest, + wValue, wIndex, wLength); + break; } return str; diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 1ef2de6e375ac..d8b0041de612f 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -157,38 +157,25 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, switch (usb_endpoint_type(desc)) { case USB_ENDPOINT_XFER_CONTROL: type = "Ctrl"; - if (speed == USB_SPEED_HIGH) /* uframes per NAK */ - interval = desc->bInterval; - else - interval = 0; dir = 'B'; /* ctrl is bidirectional */ break; case USB_ENDPOINT_XFER_ISOC: type = "Isoc"; - interval = 1 << (desc->bInterval - 1); break; case USB_ENDPOINT_XFER_BULK: type = "Bulk"; - if (speed == USB_SPEED_HIGH && dir == 'O') /* uframes per NAK */ - interval = desc->bInterval; - else - interval = 0; break; case USB_ENDPOINT_XFER_INT: type = "Int."; - if (speed == USB_SPEED_HIGH || speed >= USB_SPEED_SUPER) - interval = 1 << (desc->bInterval - 1); - else - interval = desc->bInterval; break; default: /* "can't happen" */ return start; } - interval *= (speed == USB_SPEED_HIGH || - speed >= USB_SPEED_SUPER) ? 125 : 1000; - if (interval % 1000) + + interval = usb_decode_interval(desc, speed); + if (interval % 1000) { unit = 'u'; - else { + } else { unit = 'm'; interval /= 1000; } diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c index 1c2c040796760..fc3341f2bb61a 100644 --- a/drivers/usb/core/endpoint.c +++ b/drivers/usb/core/endpoint.c @@ -84,40 +84,13 @@ static ssize_t interval_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ep_device *ep = to_ep_device(dev); + unsigned int interval; char unit; - unsigned interval = 0; - unsigned in; - in = (ep->desc->bEndpointAddress & USB_DIR_IN); - - switch (usb_endpoint_type(ep->desc)) { - case USB_ENDPOINT_XFER_CONTROL: - if (ep->udev->speed == USB_SPEED_HIGH) - /* uframes per NAK */ - interval = ep->desc->bInterval; - break; - - case USB_ENDPOINT_XFER_ISOC: - interval = 1 << (ep->desc->bInterval - 1); - break; - - case USB_ENDPOINT_XFER_BULK: - if (ep->udev->speed == USB_SPEED_HIGH && !in) - /* uframes per NAK */ - interval = ep->desc->bInterval; - break; - - case USB_ENDPOINT_XFER_INT: - if (ep->udev->speed == USB_SPEED_HIGH) - interval = 1 << (ep->desc->bInterval - 1); - else - interval = ep->desc->bInterval; - break; - } - interval *= (ep->udev->speed == USB_SPEED_HIGH) ? 125 : 1000; - if (interval % 1000) + interval = usb_decode_interval(ep->desc, ep->udev->speed); + if (interval % 1000) { unit = 'u'; - else { + } else { unit = 'm'; interval /= 1000; } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 53b3d77fba6a2..5925b8eb9ee38 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -42,6 +42,9 @@ #define USB_PRODUCT_USB5534B 0x5534 #define USB_VENDOR_CYPRESS 0x04b4 #define USB_PRODUCT_CY7C65632 0x6570 +#define USB_VENDOR_TEXAS_INSTRUMENTS 0x0451 +#define USB_PRODUCT_TUSB8041_USB3 0x8140 +#define USB_PRODUCT_TUSB8041_USB2 0x8142 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 @@ -5715,6 +5718,16 @@ static const struct usb_device_id hub_id_table[] = { .idVendor = USB_VENDOR_GENESYS_LOGIC, .bInterfaceClass = USB_CLASS_HUB, .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND}, + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT, + .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS, + .idProduct = USB_PRODUCT_TUSB8041_USB2, + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT, + .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS, + .idProduct = USB_PRODUCT_TUSB8041_USB3, + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, .bDeviceClass = USB_CLASS_HUB}, { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, @@ -5968,7 +5981,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) * * Return: The same as for usb_reset_and_verify_device(). * However, if a reset is already in progress (for instance, if a - * driver doesn't have pre_ or post_reset() callbacks, and while + * driver doesn't have pre_reset() or post_reset() callbacks, and while * being unbound or re-bound during the ongoing reset its disconnect() * or probe() routine tries to perform a second, nested reset), the * routine returns -EINPROGRESS. diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f03ee889ecc70..4ac1c22f13be0 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -362,6 +362,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, + /* Realforce 87U Keyboard */ + { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, + /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -388,6 +391,15 @@ static const struct usb_device_id usb_quirk_list[] = { /* Kingston DataTraveler 3.0 */ { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM }, + /* NVIDIA Jetson devices in Force Recovery mode */ + { USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME }, + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, @@ -438,6 +450,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1532, 0x0116), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Lenovo ThinkPad OneLink+ Dock twin hub controllers (VIA Labs VL812) */ + { USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x17ef, 0x1019), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Lenovo USB-C to Ethernet Adapter RTL8153-04 */ { USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM }, @@ -511,6 +527,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */ + { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM }, + /* DELL USB GEN2 */ { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 50b2fc7fcc0e3..8751276ef5789 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -37,6 +37,71 @@ bool usb_acpi_power_manageable(struct usb_device *hdev, int index) } EXPORT_SYMBOL_GPL(usb_acpi_power_manageable); +#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899" +#define USB_DSM_DISABLE_U1_U2_FOR_PORT 5 + +/** + * usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port. + * @hdev: USB device belonging to the usb hub + * @index: zero based port index + * + * Some USB3 ports may not support USB3 link power management U1/U2 states + * due to different retimer setup. ACPI provides _DSM method which returns 0x01 + * if U1 and U2 states should be disabled. Evaluate _DSM with: + * Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899 + * Arg1: Revision ID = 0 + * Arg2: Function Index = 5 + * Arg3: (empty) + * + * Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0 + */ + +int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index) +{ + union acpi_object *obj; + acpi_handle port_handle; + int port1 = index + 1; + guid_t guid; + int ret; + + ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid); + if (ret) + return ret; + + port_handle = usb_get_hub_port_acpi_handle(hdev, port1); + if (!port_handle) { + dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1); + return -ENODEV; + } + + if (!acpi_check_dsm(port_handle, &guid, 0, + BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) { + dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n", + port1, USB_DSM_DISABLE_U1_U2_FOR_PORT); + return -ENODEV; + } + + obj = acpi_evaluate_dsm(port_handle, &guid, 0, + USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL); + + if (!obj) + return -ENODEV; + + if (obj->type != ACPI_TYPE_INTEGER) { + dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1); + ACPI_FREE(obj); + return -EINVAL; + } + + if (obj->integer.value == 0x01) + ret = 1; + + ACPI_FREE(obj); + + return ret; +} +EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable); + /** * usb_acpi_set_power_state - control usb port's power via acpi power * resource diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 5aae7504f78a1..d73f624ed42a3 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -114,29 +114,31 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) dwc->current_dr_role = mode; } -static int dwc3_core_soft_reset(struct dwc3 *dwc); - static void __dwc3_set_mode(struct work_struct *work) { struct dwc3 *dwc = work_to_dwc(work); unsigned long flags; int ret; u32 reg; + u32 desired_dr_role; mutex_lock(&dwc->mutex); + spin_lock_irqsave(&dwc->lock, flags); + desired_dr_role = dwc->desired_dr_role; + spin_unlock_irqrestore(&dwc->lock, flags); pm_runtime_get_sync(dwc->dev); if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) dwc3_otg_update(dwc, 0); - if (!dwc->desired_dr_role) + if (!desired_dr_role) goto out; - if (dwc->desired_dr_role == dwc->current_dr_role) + if (desired_dr_role == dwc->current_dr_role) goto out; - if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) + if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) goto out; switch (dwc->current_dr_role) { @@ -164,7 +166,7 @@ static void __dwc3_set_mode(struct work_struct *work) */ if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC31, 190A)) && - dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { + desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { reg = dwc3_readl(dwc->regs, DWC3_GCTL); reg |= DWC3_GCTL_CORESOFTRESET; dwc3_writel(dwc->regs, DWC3_GCTL, reg); @@ -184,11 +186,11 @@ static void __dwc3_set_mode(struct work_struct *work) spin_lock_irqsave(&dwc->lock, flags); - dwc3_set_prtcap(dwc, dwc->desired_dr_role); + dwc3_set_prtcap(dwc, desired_dr_role); spin_unlock_irqrestore(&dwc->lock, flags); - switch (dwc->desired_dr_role) { + switch (desired_dr_role) { case DWC3_GCTL_PRTCAP_HOST: ret = dwc3_host_init(dwc); if (ret) { @@ -265,7 +267,7 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) * dwc3_core_soft_reset - Issues core soft reset and PHY reset * @dwc: pointer to our context structure */ -static int dwc3_core_soft_reset(struct dwc3 *dwc) +int dwc3_core_soft_reset(struct dwc3 *dwc) { u32 reg; int retries = 1000; @@ -958,8 +960,13 @@ static int dwc3_core_init(struct dwc3 *dwc) if (!dwc->ulpi_ready) { ret = dwc3_core_ulpi_init(dwc); - if (ret) + if (ret) { + if (ret == -ETIMEDOUT) { + dwc3_core_soft_reset(dwc); + ret = -EPROBE_DEFER; + } goto err0; + } dwc->ulpi_ready = true; } diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 79e1b82e5e057..cbebe541f7e8f 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1010,6 +1010,7 @@ struct dwc3_scratchpad_array { * @tx_max_burst_prd: max periodic ESS transmit burst size * @hsphy_interface: "utmi" or "ulpi" * @connected: true when we're connected to a host, false otherwise + * @softconnect: true when gadget connect is called, false when disconnect runs * @delayed_status: true when gadget driver asks for delayed status * @ep0_bounced: true when we used bounce buffer * @ep0_expect_in: true when we expect a DATA IN transfer @@ -1218,6 +1219,7 @@ struct dwc3 { const char *hsphy_interface; unsigned connected:1; + unsigned softconnect:1; unsigned delayed_status:1; unsigned ep0_bounced:1; unsigned ep0_expect_in:1; @@ -1456,6 +1458,8 @@ bool dwc3_has_imod(struct dwc3 *dwc); int dwc3_event_buffers_setup(struct dwc3 *dwc); void dwc3_event_buffers_cleanup(struct dwc3 *dwc); +int dwc3_core_soft_reset(struct dwc3 *dwc); + #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_host_init(struct dwc3 *dwc); void dwc3_host_exit(struct dwc3 *dwc); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 90bb022737da8..ee7b71827216c 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -37,15 +37,6 @@ struct dwc3_exynos { struct regulator *vdd10; }; -static int dwc3_exynos_remove_child(struct device *dev, void *unused) -{ - struct platform_device *pdev = to_platform_device(dev); - - platform_device_unregister(pdev); - - return 0; -} - static int dwc3_exynos_probe(struct platform_device *pdev) { struct dwc3_exynos *exynos; @@ -142,7 +133,7 @@ static int dwc3_exynos_remove(struct platform_device *pdev) struct dwc3_exynos *exynos = platform_get_drvdata(pdev); int i; - device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child); + of_platform_depopulate(&pdev->dev); for (i = exynos->num_clks - 1; i >= 0; i--) clk_disable_unprepare(exynos->clks[i]); diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index ca3a35fd8f746..dac13fe978110 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -115,7 +115,7 @@ static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val) readl(base + offset); } -static void dwc3_qcom_vbus_overrride_enable(struct dwc3_qcom *qcom, bool enable) +static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable) { if (enable) { dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL, @@ -136,7 +136,7 @@ static int dwc3_qcom_vbus_notifier(struct notifier_block *nb, struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb); /* enable vbus override for device mode */ - dwc3_qcom_vbus_overrride_enable(qcom, event); + dwc3_qcom_vbus_override_enable(qcom, event); qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST; return NOTIFY_DONE; @@ -148,7 +148,7 @@ static int dwc3_qcom_host_notifier(struct notifier_block *nb, struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb); /* disable vbus override in host mode */ - dwc3_qcom_vbus_overrride_enable(qcom, !event); + dwc3_qcom_vbus_override_enable(qcom, !event); qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL; return NOTIFY_DONE; @@ -258,7 +258,8 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) if (IS_ERR(qcom->icc_path_apps)) { dev_err(dev, "failed to get apps-usb path: %ld\n", PTR_ERR(qcom->icc_path_apps)); - return PTR_ERR(qcom->icc_path_apps); + ret = PTR_ERR(qcom->icc_path_apps); + goto put_path_ddr; } if (usb_get_maximum_speed(&qcom->dwc3->dev) >= USB_SPEED_SUPER || @@ -271,17 +272,23 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) if (ret) { dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret); - return ret; + goto put_path_apps; } ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW); if (ret) { dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret); - return ret; + goto put_path_apps; } return 0; + +put_path_apps: + icc_put(qcom->icc_path_apps); +put_path_ddr: + icc_put(qcom->icc_path_ddr); + return ret; } /** @@ -825,8 +832,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev) qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev); /* enable vbus override for device mode */ - if (qcom->mode == USB_DR_MODE_PERIPHERAL) - dwc3_qcom_vbus_overrride_enable(qcom, true); + if (qcom->mode != USB_DR_MODE_HOST) + dwc3_qcom_vbus_override_enable(qcom, true); /* register extcon to override sw_vbus on Vbus change later */ ret = dwc3_qcom_register_extcon(qcom); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a2a10c05ef3fb..28a1194f849fc 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -291,7 +291,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, * * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 */ - if (dwc->gadget->speed <= USB_SPEED_HIGH) { + if (dwc->gadget->speed <= USB_SPEED_HIGH || + DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; @@ -752,7 +753,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) return 0; } -static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) +static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status) { struct dwc3_request *req; @@ -762,19 +763,19 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) while (!list_empty(&dep->started_list)) { req = next_request(&dep->started_list); - dwc3_gadget_giveback(dep, req, -ESHUTDOWN); + dwc3_gadget_giveback(dep, req, status); } while (!list_empty(&dep->pending_list)) { req = next_request(&dep->pending_list); - dwc3_gadget_giveback(dep, req, -ESHUTDOWN); + dwc3_gadget_giveback(dep, req, status); } while (!list_empty(&dep->cancelled_list)) { req = next_request(&dep->cancelled_list); - dwc3_gadget_giveback(dep, req, -ESHUTDOWN); + dwc3_gadget_giveback(dep, req, status); } } @@ -803,18 +804,18 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) reg &= ~DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); + dwc3_remove_requests(dwc, dep, -ESHUTDOWN); + + dep->stream_capable = false; + dep->type = 0; + dep->flags = 0; + /* Clear out the ep descriptors for non-ep0 */ if (dep->number > 1) { dep->endpoint.comp_desc = NULL; dep->endpoint.desc = NULL; } - dwc3_remove_requests(dwc, dep); - - dep->stream_capable = false; - dep->type = 0; - dep->flags = 0; - return 0; } @@ -1064,8 +1065,8 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; } - /* always enable Interrupt on Missed ISOC */ - trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; + if (!no_interrupt && !chain) + trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; break; case USB_ENDPOINT_XFER_BULK: @@ -2067,7 +2068,7 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc) if (!dep) continue; - dwc3_remove_requests(dwc, dep); + dwc3_remove_requests(dwc, dep, -ESHUTDOWN); } } @@ -2120,14 +2121,42 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc); static void __dwc3_gadget_stop(struct dwc3 *dwc); static int __dwc3_gadget_start(struct dwc3 *dwc); +static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) +{ + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + dwc->connected = false; + + /* + * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a + * Section 4.1.8 Table 4-7, it states that for a device-initiated + * disconnect, the SW needs to ensure that it sends "a DEPENDXFER + * command for any active transfers" before clearing the RunStop + * bit. + */ + dwc3_stop_active_transfers(dwc); + __dwc3_gadget_stop(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + + /* + * Note: if the GEVNTCOUNT indicates events in the event buffer, the + * driver needs to acknowledge them before the controller can halt. + * Simply let the interrupt handler acknowledges and handle the + * remaining event generated by the controller while polling for + * DSTS.DEVCTLHLT. + */ + return dwc3_gadget_run_stop(dwc, false, false); +} + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { struct dwc3 *dwc = gadget_to_dwc(g); - unsigned long flags; int ret; is_on = !!is_on; + dwc->softconnect = is_on; /* * Per databook, when we want to stop the gadget, if a control transfer * is still in process, complete it and get the core into setup phase. @@ -2163,50 +2192,27 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) return 0; } - /* - * Synchronize and disable any further event handling while controller - * is being enabled/disabled. - */ - disable_irq(dwc->irq_gadget); - - spin_lock_irqsave(&dwc->lock, flags); + if (dwc->pullups_connected == is_on) { + pm_runtime_put(dwc->dev); + return 0; + } if (!is_on) { - u32 count; - - dwc->connected = false; + ret = dwc3_gadget_soft_disconnect(dwc); + } else { /* - * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a - * Section 4.1.8 Table 4-7, it states that for a device-initiated - * disconnect, the SW needs to ensure that it sends "a DEPENDXFER - * command for any active transfers" before clearing the RunStop - * bit. + * In the Synopsys DWC_usb31 1.90a programming guide section + * 4.1.9, it specifies that for a reconnect after a + * device-initiated disconnect requires a core soft reset + * (DCTL.CSftRst) before enabling the run/stop bit. */ - dwc3_stop_active_transfers(dwc); - __dwc3_gadget_stop(dwc); + dwc3_core_soft_reset(dwc); - /* - * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a - * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the - * "software needs to acknowledge the events that are generated - * (by writing to GEVNTCOUNTn) while it is waiting for this bit - * to be set to '1'." - */ - count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); - count &= DWC3_GEVNTCOUNT_MASK; - if (count > 0) { - dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); - dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) % - dwc->ev_buf->length; - } - } else { + dwc3_event_buffers_setup(dwc); __dwc3_gadget_start(dwc); + ret = dwc3_gadget_run_stop(dwc, true, false); } - ret = dwc3_gadget_run_stop(dwc, is_on, false); - spin_unlock_irqrestore(&dwc->lock, flags); - enable_irq(dwc->irq_gadget); - pm_runtime_put(dwc->dev); return ret; @@ -2795,6 +2801,10 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, if (event->status & DEPEVT_STATUS_SHORT && !chain) return 1; + if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) && + DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC) + return 1; + if ((trb->ctrl & DWC3_TRB_CTRL_IOC) || (trb->ctrl & DWC3_TRB_CTRL_LST)) return 1; @@ -4048,7 +4058,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc) { int ret; - if (!dwc->gadget_driver) + if (!dwc->gadget_driver || !dwc->softconnect) return 0; ret = __dwc3_gadget_start(dwc); diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index 86bc2bec9038d..b06ab85f8187e 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -10,13 +10,8 @@ #include #include -#include "../host/xhci-plat.h" #include "core.h" -static const struct xhci_plat_priv dwc3_xhci_plat_priv = { - .quirks = XHCI_SKIP_PHY_INIT, -}; - static int dwc3_host_get_irq(struct dwc3 *dwc) { struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); @@ -92,11 +87,6 @@ int dwc3_host_init(struct dwc3 *dwc) goto err; } - ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv, - sizeof(dwc3_xhci_plat_priv)); - if (ret) - goto err; - memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); if (dwc->usb3_lpm_capable) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index bb0d92837f677..8c48c9f801be2 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -278,6 +278,11 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) struct usb_request *req = ffs->ep0req; int ret; + if (!req) { + spin_unlock_irq(&ffs->ev.waitq.lock); + return -EINVAL; + } + req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); spin_unlock_irq(&ffs->ev.waitq.lock); @@ -1881,10 +1886,14 @@ static void functionfs_unbind(struct ffs_data *ffs) ENTER(); if (!WARN_ON(!ffs->gadget)) { + /* dequeue before freeing ep0req */ + usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req); + mutex_lock(&ffs->mutex); usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); ffs->ep0req = NULL; ffs->gadget = NULL; clear_bit(FFS_FL_BOUND, &ffs->flags); + mutex_unlock(&ffs->mutex); ffs_data_put(ffs); } } diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 6742271cd6e6a..e7cf56b13c643 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -45,12 +45,25 @@ struct f_hidg { unsigned short report_desc_length; char *report_desc; unsigned short report_length; + /* + * use_out_ep - if true, the OUT Endpoint (interrupt out method) + * will be used to receive reports from the host + * using functions with the "intout" suffix. + * Otherwise, the OUT Endpoint will not be configured + * and the SETUP/SET_REPORT method ("ssreport" suffix) + * will be used to receive reports. + */ + bool use_out_ep; /* recv report */ - struct list_head completed_out_req; spinlock_t read_spinlock; wait_queue_head_t read_queue; + /* recv report - interrupt out only (use_out_ep == 1) */ + struct list_head completed_out_req; unsigned int qlen; + /* recv report - setup set_report only (use_out_ep == 0) */ + char *set_report_buf; + unsigned int set_report_length; /* send report */ spinlock_t write_spinlock; @@ -58,7 +71,7 @@ struct f_hidg { wait_queue_head_t write_queue; struct usb_request *req; - int minor; + struct device dev; struct cdev cdev; struct usb_function func; @@ -71,6 +84,14 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f) return container_of(f, struct f_hidg, func); } +static void hidg_release(struct device *dev) +{ + struct f_hidg *hidg = container_of(dev, struct f_hidg, dev); + + kfree(hidg->set_report_buf); + kfree(hidg); +} + /*-------------------------------------------------------------------------*/ /* Static descriptors */ @@ -79,7 +100,7 @@ static struct usb_interface_descriptor hidg_interface_desc = { .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 0, - .bNumEndpoints = 2, + /* .bNumEndpoints = DYNAMIC (depends on use_out_ep) */ .bInterfaceClass = USB_CLASS_HID, /* .bInterfaceSubClass = DYNAMIC */ /* .bInterfaceProtocol = DYNAMIC */ @@ -140,7 +161,7 @@ static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = { /* .wBytesPerInterval = DYNAMIC */ }; -static struct usb_descriptor_header *hidg_ss_descriptors[] = { +static struct usb_descriptor_header *hidg_ss_descriptors_intout[] = { (struct usb_descriptor_header *)&hidg_interface_desc, (struct usb_descriptor_header *)&hidg_desc, (struct usb_descriptor_header *)&hidg_ss_in_ep_desc, @@ -150,6 +171,14 @@ static struct usb_descriptor_header *hidg_ss_descriptors[] = { NULL, }; +static struct usb_descriptor_header *hidg_ss_descriptors_ssreport[] = { + (struct usb_descriptor_header *)&hidg_interface_desc, + (struct usb_descriptor_header *)&hidg_desc, + (struct usb_descriptor_header *)&hidg_ss_in_ep_desc, + (struct usb_descriptor_header *)&hidg_ss_in_comp_desc, + NULL, +}; + /* High-Speed Support */ static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = { @@ -176,7 +205,7 @@ static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = { */ }; -static struct usb_descriptor_header *hidg_hs_descriptors[] = { +static struct usb_descriptor_header *hidg_hs_descriptors_intout[] = { (struct usb_descriptor_header *)&hidg_interface_desc, (struct usb_descriptor_header *)&hidg_desc, (struct usb_descriptor_header *)&hidg_hs_in_ep_desc, @@ -184,6 +213,13 @@ static struct usb_descriptor_header *hidg_hs_descriptors[] = { NULL, }; +static struct usb_descriptor_header *hidg_hs_descriptors_ssreport[] = { + (struct usb_descriptor_header *)&hidg_interface_desc, + (struct usb_descriptor_header *)&hidg_desc, + (struct usb_descriptor_header *)&hidg_hs_in_ep_desc, + NULL, +}; + /* Full-Speed Support */ static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = { @@ -210,7 +246,7 @@ static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = { */ }; -static struct usb_descriptor_header *hidg_fs_descriptors[] = { +static struct usb_descriptor_header *hidg_fs_descriptors_intout[] = { (struct usb_descriptor_header *)&hidg_interface_desc, (struct usb_descriptor_header *)&hidg_desc, (struct usb_descriptor_header *)&hidg_fs_in_ep_desc, @@ -218,6 +254,13 @@ static struct usb_descriptor_header *hidg_fs_descriptors[] = { NULL, }; +static struct usb_descriptor_header *hidg_fs_descriptors_ssreport[] = { + (struct usb_descriptor_header *)&hidg_interface_desc, + (struct usb_descriptor_header *)&hidg_desc, + (struct usb_descriptor_header *)&hidg_fs_in_ep_desc, + NULL, +}; + /*-------------------------------------------------------------------------*/ /* Strings */ @@ -241,8 +284,8 @@ static struct usb_gadget_strings *ct_func_strings[] = { /*-------------------------------------------------------------------------*/ /* Char Device */ -static ssize_t f_hidg_read(struct file *file, char __user *buffer, - size_t count, loff_t *ptr) +static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer, + size_t count, loff_t *ptr) { struct f_hidg *hidg = file->private_data; struct f_hidg_req_list *list; @@ -255,15 +298,15 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, spin_lock_irqsave(&hidg->read_spinlock, flags); -#define READ_COND (!list_empty(&hidg->completed_out_req)) +#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req)) /* wait for at least one buffer to complete */ - while (!READ_COND) { + while (!READ_COND_INTOUT) { spin_unlock_irqrestore(&hidg->read_spinlock, flags); if (file->f_flags & O_NONBLOCK) return -EAGAIN; - if (wait_event_interruptible(hidg->read_queue, READ_COND)) + if (wait_event_interruptible(hidg->read_queue, READ_COND_INTOUT)) return -ERESTARTSYS; spin_lock_irqsave(&hidg->read_spinlock, flags); @@ -313,6 +356,60 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, return count; } +#define READ_COND_SSREPORT (hidg->set_report_buf != NULL) + +static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer, + size_t count, loff_t *ptr) +{ + struct f_hidg *hidg = file->private_data; + char *tmp_buf = NULL; + unsigned long flags; + + if (!count) + return 0; + + spin_lock_irqsave(&hidg->read_spinlock, flags); + + while (!READ_COND_SSREPORT) { + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible(hidg->read_queue, READ_COND_SSREPORT)) + return -ERESTARTSYS; + + spin_lock_irqsave(&hidg->read_spinlock, flags); + } + + count = min_t(unsigned int, count, hidg->set_report_length); + tmp_buf = hidg->set_report_buf; + hidg->set_report_buf = NULL; + + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + + if (tmp_buf != NULL) { + count -= copy_to_user(buffer, tmp_buf, count); + kfree(tmp_buf); + } else { + count = -ENOMEM; + } + + wake_up(&hidg->read_queue); + + return count; +} + +static ssize_t f_hidg_read(struct file *file, char __user *buffer, + size_t count, loff_t *ptr) +{ + struct f_hidg *hidg = file->private_data; + + if (hidg->use_out_ep) + return f_hidg_intout_read(file, buffer, count, ptr); + else + return f_hidg_ssreport_read(file, buffer, count, ptr); +} + static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) { struct f_hidg *hidg = (struct f_hidg *)ep->driver_data; @@ -433,14 +530,20 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait) if (WRITE_COND) ret |= EPOLLOUT | EPOLLWRNORM; - if (READ_COND) - ret |= EPOLLIN | EPOLLRDNORM; + if (hidg->use_out_ep) { + if (READ_COND_INTOUT) + ret |= EPOLLIN | EPOLLRDNORM; + } else { + if (READ_COND_SSREPORT) + ret |= EPOLLIN | EPOLLRDNORM; + } return ret; } #undef WRITE_COND -#undef READ_COND +#undef READ_COND_SSREPORT +#undef READ_COND_INTOUT static int f_hidg_release(struct inode *inode, struct file *fd) { @@ -467,7 +570,7 @@ static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep, return alloc_ep_req(ep, length); } -static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req) +static void hidg_intout_complete(struct usb_ep *ep, struct usb_request *req) { struct f_hidg *hidg = (struct f_hidg *) req->context; struct usb_composite_dev *cdev = hidg->func.config->cdev; @@ -502,6 +605,37 @@ static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req) } } +static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_hidg *hidg = (struct f_hidg *)req->context; + struct usb_composite_dev *cdev = hidg->func.config->cdev; + char *new_buf = NULL; + unsigned long flags; + + if (req->status != 0 || req->buf == NULL || req->actual == 0) { + ERROR(cdev, + "%s FAILED: status=%d, buf=%p, actual=%d\n", + __func__, req->status, req->buf, req->actual); + return; + } + + spin_lock_irqsave(&hidg->read_spinlock, flags); + + new_buf = krealloc(hidg->set_report_buf, req->actual, GFP_ATOMIC); + if (new_buf == NULL) { + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + return; + } + hidg->set_report_buf = new_buf; + + hidg->set_report_length = req->actual; + memcpy(hidg->set_report_buf, req->buf, req->actual); + + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + + wake_up(&hidg->read_queue); +} + static int hidg_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { @@ -549,7 +683,11 @@ static int hidg_setup(struct usb_function *f, case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_SET_REPORT): VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength); - goto stall; + if (hidg->use_out_ep) + goto stall; + req->complete = hidg_ssreport_complete; + req->context = hidg; + goto respond; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 @@ -637,15 +775,18 @@ static void hidg_disable(struct usb_function *f) unsigned long flags; usb_ep_disable(hidg->in_ep); - usb_ep_disable(hidg->out_ep); - spin_lock_irqsave(&hidg->read_spinlock, flags); - list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { - free_ep_req(hidg->out_ep, list->req); - list_del(&list->list); - kfree(list); + if (hidg->out_ep) { + usb_ep_disable(hidg->out_ep); + + spin_lock_irqsave(&hidg->read_spinlock, flags); + list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { + free_ep_req(hidg->out_ep, list->req); + list_del(&list->list); + kfree(list); + } + spin_unlock_irqrestore(&hidg->read_spinlock, flags); } - spin_unlock_irqrestore(&hidg->read_spinlock, flags); spin_lock_irqsave(&hidg->write_spinlock, flags); if (!hidg->write_pending) { @@ -691,8 +832,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } } - - if (hidg->out_ep != NULL) { + if (hidg->use_out_ep && hidg->out_ep != NULL) { /* restart endpoint */ usb_ep_disable(hidg->out_ep); @@ -717,7 +857,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) hidg_alloc_ep_req(hidg->out_ep, hidg->report_length); if (req) { - req->complete = hidg_set_report_complete; + req->complete = hidg_intout_complete; req->context = hidg; status = usb_ep_queue(hidg->out_ep, req, GFP_ATOMIC); @@ -743,7 +883,8 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } return 0; disable_out_ep: - usb_ep_disable(hidg->out_ep); + if (hidg->out_ep) + usb_ep_disable(hidg->out_ep); free_req_in: if (req_in) free_ep_req(hidg->in_ep, req_in); @@ -771,9 +912,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) struct usb_ep *ep; struct f_hidg *hidg = func_to_hidg(f); struct usb_string *us; - struct device *device; int status; - dev_t dev; /* maybe allocate device-global string IDs, and patch descriptors */ us = usb_gstrings_attach(c->cdev, ct_func_strings, @@ -795,14 +934,21 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) goto fail; hidg->in_ep = ep; - ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc); - if (!ep) - goto fail; - hidg->out_ep = ep; + hidg->out_ep = NULL; + if (hidg->use_out_ep) { + ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc); + if (!ep) + goto fail; + hidg->out_ep = ep; + } + + /* used only if use_out_ep == 1 */ + hidg->set_report_buf = NULL; /* set descriptor dynamic values */ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; + hidg_interface_desc.bNumEndpoints = hidg->use_out_ep ? 2 : 1; hidg->protocol = HID_REPORT_PROTOCOL; hidg->idle = 1; hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); @@ -833,9 +979,19 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_ss_out_ep_desc.bEndpointAddress = hidg_fs_out_ep_desc.bEndpointAddress; - status = usb_assign_descriptors(f, hidg_fs_descriptors, - hidg_hs_descriptors, hidg_ss_descriptors, - hidg_ss_descriptors); + if (hidg->use_out_ep) + status = usb_assign_descriptors(f, + hidg_fs_descriptors_intout, + hidg_hs_descriptors_intout, + hidg_ss_descriptors_intout, + hidg_ss_descriptors_intout); + else + status = usb_assign_descriptors(f, + hidg_fs_descriptors_ssreport, + hidg_hs_descriptors_ssreport, + hidg_ss_descriptors_ssreport, + hidg_ss_descriptors_ssreport); + if (status) goto fail; @@ -849,21 +1005,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) /* create char device */ cdev_init(&hidg->cdev, &f_hidg_fops); - dev = MKDEV(major, hidg->minor); - status = cdev_add(&hidg->cdev, dev, 1); + status = cdev_device_add(&hidg->cdev, &hidg->dev); if (status) goto fail_free_descs; - device = device_create(hidg_class, NULL, dev, NULL, - "%s%d", "hidg", hidg->minor); - if (IS_ERR(device)) { - status = PTR_ERR(device); - goto del; - } - return 0; -del: - cdev_del(&hidg->cdev); fail_free_descs: usb_free_all_descriptors(f); fail: @@ -950,6 +1096,7 @@ CONFIGFS_ATTR(f_hid_opts_, name) F_HID_OPT(subclass, 8, 255); F_HID_OPT(protocol, 8, 255); +F_HID_OPT(no_out_endpoint, 8, 1); F_HID_OPT(report_length, 16, 65535); static ssize_t f_hid_opts_report_desc_show(struct config_item *item, char *page) @@ -1009,6 +1156,7 @@ CONFIGFS_ATTR_RO(f_hid_opts_, dev); static struct configfs_attribute *hid_attrs[] = { &f_hid_opts_attr_subclass, &f_hid_opts_attr_protocol, + &f_hid_opts_attr_no_out_endpoint, &f_hid_opts_attr_report_length, &f_hid_opts_attr_report_desc, &f_hid_opts_attr_dev, @@ -1092,8 +1240,7 @@ static void hidg_free(struct usb_function *f) hidg = func_to_hidg(f); opts = container_of(f->fi, struct f_hid_opts, func_inst); - kfree(hidg->report_desc); - kfree(hidg); + put_device(&hidg->dev); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); @@ -1103,8 +1250,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); - device_destroy(hidg_class, MKDEV(major, hidg->minor)); - cdev_del(&hidg->cdev); + cdev_device_del(&hidg->cdev, &hidg->dev); usb_free_all_descriptors(f); } @@ -1113,6 +1259,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) { struct f_hidg *hidg; struct f_hid_opts *opts; + int ret; /* allocate and initialize one new instance */ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL); @@ -1124,21 +1271,33 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) mutex_lock(&opts->lock); ++opts->refcnt; - hidg->minor = opts->minor; + device_initialize(&hidg->dev); + hidg->dev.release = hidg_release; + hidg->dev.class = hidg_class; + hidg->dev.devt = MKDEV(major, opts->minor); + ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor); + if (ret) { + --opts->refcnt; + mutex_unlock(&opts->lock); + return ERR_PTR(ret); + } + hidg->bInterfaceSubClass = opts->subclass; hidg->bInterfaceProtocol = opts->protocol; hidg->report_length = opts->report_length; hidg->report_desc_length = opts->report_desc_length; if (opts->report_desc) { - hidg->report_desc = kmemdup(opts->report_desc, - opts->report_desc_length, - GFP_KERNEL); + hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc, + opts->report_desc_length, + GFP_KERNEL); if (!hidg->report_desc) { - kfree(hidg); + put_device(&hidg->dev); + --opts->refcnt; mutex_unlock(&opts->lock); return ERR_PTR(-ENOMEM); } } + hidg->use_out_ep = !opts->no_out_endpoint; mutex_unlock(&opts->lock); diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 855127249f242..f56147489835d 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -85,7 +85,9 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f) /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned ncm_bitrate(struct usb_gadget *g) { - if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) + if (!g) + return 0; + else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) return 4250000000U; else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) return 3750000000U; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 236ecc9689985..c13bb29a160e8 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -87,7 +87,7 @@ struct printer_dev { u8 printer_cdev_open; wait_queue_head_t wait; unsigned q_len; - char *pnp_string; /* We don't own memory! */ + char **pnp_string; /* We don't own memory! */ struct usb_function function; }; @@ -999,16 +999,16 @@ static int printer_func_setup(struct usb_function *f, if ((wIndex>>8) != dev->interface) break; - if (!dev->pnp_string) { + if (!*dev->pnp_string) { value = 0; break; } - value = strlen(dev->pnp_string); + value = strlen(*dev->pnp_string); buf[0] = (value >> 8) & 0xFF; buf[1] = value & 0xFF; - memcpy(buf + 2, dev->pnp_string, value); + memcpy(buf + 2, *dev->pnp_string, value); DBG(dev, "1284 PNP String: %x %s\n", value, - dev->pnp_string); + *dev->pnp_string); break; case GET_PORT_STATUS: /* Get Port Status */ @@ -1471,7 +1471,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi) kref_init(&dev->kref); ++opts->refcnt; dev->minor = opts->minor; - dev->pnp_string = opts->pnp_string; + dev->pnp_string = &opts->pnp_string; dev->q_len = opts->q_len; mutex_unlock(&opts->lock); diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index fecdba85ab27f..5d39aff263f00 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -213,8 +213,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req) memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_DATA; - uvc_event->data.length = req->actual; - memcpy(&uvc_event->data.data, req->buf, req->actual); + uvc_event->data.length = min_t(unsigned int, req->actual, + sizeof(uvc_event->data.data)); + memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length); v4l2_event_queue(&uvc->vdev, &v4l2_event); } } diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h index 84e6da302499a..fa631f34bb3d7 100644 --- a/drivers/usb/gadget/function/u_hid.h +++ b/drivers/usb/gadget/function/u_hid.h @@ -20,6 +20,7 @@ struct f_hid_opts { int minor; unsigned char subclass; unsigned char protocol; + unsigned char no_out_endpoint; unsigned short report_length; unsigned short report_desc_length; unsigned char *report_desc; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index cd097474b6c39..cbe8016409162 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -229,6 +229,7 @@ static void put_ep (struct ep_data *data) */ static const char *CHIP; +static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */ /*----------------------------------------------------------------------*/ @@ -2012,13 +2013,20 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc) { struct inode *inode; struct dev_data *dev; + int rc; - if (the_device) - return -ESRCH; + mutex_lock(&sb_mutex); + + if (the_device) { + rc = -ESRCH; + goto Done; + } CHIP = usb_get_gadget_udc_name(); - if (!CHIP) - return -ENODEV; + if (!CHIP) { + rc = -ENODEV; + goto Done; + } /* superblock */ sb->s_blocksize = PAGE_SIZE; @@ -2055,13 +2063,17 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc) * from binding to a controller. */ the_device = dev; - return 0; + rc = 0; + goto Done; -Enomem: + Enomem: kfree(CHIP); CHIP = NULL; + rc = -ENOMEM; - return -ENOMEM; + Done: + mutex_unlock(&sb_mutex); + return rc; } /* "mount -t gadgetfs path /dev/gadget" ends up here */ @@ -2083,6 +2095,7 @@ static int gadgetfs_init_fs_context(struct fs_context *fc) static void gadgetfs_kill_sb (struct super_block *sb) { + mutex_lock(&sb_mutex); kill_litter_super (sb); if (the_device) { put_dev (the_device); @@ -2090,6 +2103,7 @@ gadgetfs_kill_sb (struct super_block *sb) } kfree(CHIP); CHIP = NULL; + mutex_unlock(&sb_mutex); } /*----------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c index 2c9eab2b863d2..ff970a9433479 100644 --- a/drivers/usb/gadget/legacy/webcam.c +++ b/drivers/usb/gadget/legacy/webcam.c @@ -293,6 +293,7 @@ static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = { (const struct uvc_descriptor_header *) &uvc_format_yuv, (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p, (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p, + (const struct uvc_descriptor_header *) &uvc_color_matching, (const struct uvc_descriptor_header *) &uvc_format_mjpg, (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p, (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p, @@ -305,6 +306,7 @@ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = { (const struct uvc_descriptor_header *) &uvc_format_yuv, (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p, (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p, + (const struct uvc_descriptor_header *) &uvc_color_matching, (const struct uvc_descriptor_header *) &uvc_format_mjpg, (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p, (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p, @@ -317,6 +319,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = { (const struct uvc_descriptor_header *) &uvc_format_yuv, (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p, (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p, + (const struct uvc_descriptor_header *) &uvc_color_matching, (const struct uvc_descriptor_header *) &uvc_format_mjpg, (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p, (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p, diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c index 248426a3e88a7..5f0b3fd936319 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_udc.c +++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c @@ -151,6 +151,7 @@ static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit) bdc->delayed_status = false; bdc->reinit = reinit; bdc->test_mode = false; + usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED); } /* TNotify wkaeup timer */ diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index 75bf446f4a666..11712bc896352 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -629,10 +629,10 @@ static void fotg210_request_error(struct fotg210_udc *fotg210) static void fotg210_set_address(struct fotg210_udc *fotg210, struct usb_ctrlrequest *ctrl) { - if (ctrl->wValue >= 0x0100) { + if (le16_to_cpu(ctrl->wValue) >= 0x0100) { fotg210_request_error(fotg210); } else { - fotg210_set_dev_addr(fotg210, ctrl->wValue); + fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue)); fotg210_set_cxdone(fotg210); } } @@ -713,17 +713,17 @@ static void fotg210_get_status(struct fotg210_udc *fotg210, switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: - fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED; + fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED); break; case USB_RECIP_INTERFACE: - fotg210->ep0_data = 0; + fotg210->ep0_data = cpu_to_le16(0); break; case USB_RECIP_ENDPOINT: epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK; if (epnum) fotg210->ep0_data = - fotg210_is_epnstall(fotg210->ep[epnum]) - << USB_ENDPOINT_HALT; + cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum]) + << USB_ENDPOINT_HALT); else fotg210_request_error(fotg210); break; diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c index 2df52f75f6b3c..7558cc4d90cc6 100644 --- a/drivers/usb/host/bcma-hcd.c +++ b/drivers/usb/host/bcma-hcd.c @@ -285,7 +285,7 @@ static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val) { struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev); - if (IS_ERR_OR_NULL(usb_dev->gpio_desc)) + if (!usb_dev->gpio_desc) return; gpiod_set_value(usb_dev->gpio_desc, val); @@ -406,9 +406,11 @@ static int bcma_hcd_probe(struct bcma_device *core) return -ENOMEM; usb_dev->core = core; - if (core->dev.of_node) - usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc", - GPIOD_OUT_HIGH); + usb_dev->gpio_desc = devm_gpiod_get_optional(&core->dev, "vcc", + GPIOD_OUT_HIGH); + if (IS_ERR(usb_dev->gpio_desc)) + return dev_err_probe(&core->dev, PTR_ERR(usb_dev->gpio_desc), + "error obtaining VCC GPIO"); switch (core->id.id) { case BCMA_CORE_USB20_HOST: diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 1e8b59ab22729..c78f71a5faac4 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -29,7 +29,7 @@ #include "ehci-fsl.h" #define DRIVER_DESC "Freescale EHCI Host controller driver" -#define DRV_NAME "ehci-fsl" +#define DRV_NAME "fsl-ehci" static struct hc_driver __read_mostly fsl_ehci_hc_driver; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 024e8911df344..a8a9addb4d253 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -659,7 +659,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, num_stream_ctxs, &stream_info->ctx_array_dma, mem_flags); if (!stream_info->stream_ctx_array) - goto cleanup_ctx; + goto cleanup_ring_array; memset(stream_info->stream_ctx_array, 0, sizeof(struct xhci_stream_ctx)*num_stream_ctxs); @@ -720,6 +720,11 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, } xhci_free_command(xhci, stream_info->free_streams_command); cleanup_ctx: + xhci_free_stream_ctx(xhci, + stream_info->num_stream_ctxs, + stream_info->stream_ctx_array, + stream_info->ctx_array_dma); +cleanup_ring_array: kfree(stream_info->stream_rings); cleanup_info: kfree(stream_info); @@ -910,15 +915,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) if (dev->eps[i].stream_info) xhci_free_stream_info(xhci, dev->eps[i].stream_info); - /* Endpoints on the TT/root port lists should have been removed - * when usb_disable_device() was called for the device. - * We can't drop them anyway, because the udev might have gone - * away by this point, and we can't tell what speed it was. + /* + * Endpoints are normally deleted from the bandwidth list when + * endpoints are dropped, before device is freed. + * If host is dying or being removed then endpoints aren't + * dropped cleanly, so delete the endpoint from list here. + * Only applicable for hosts with software bandwidth checking. */ - if (!list_empty(&dev->eps[i].bw_endpoint_list)) - xhci_warn(xhci, "Slot %u endpoint %u " - "not removed from BW list!\n", - slot_id, i); + + if (!list_empty(&dev->eps[i].bw_endpoint_list)) { + list_del_init(&dev->eps[i].bw_endpoint_list); + xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n", + slot_id, i); + } } /* If this is a hub, free the TT(s) from the TT list */ xhci_free_tt_info(xhci, dev, slot_id); @@ -994,6 +1003,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, if (!dev) return 0; + dev->slot_id = slot_id; + /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); if (!dev->out_ctx) @@ -1012,6 +1023,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, /* Initialize the cancellation list and watchdog timers for each ep */ for (i = 0; i < 31; i++) { + dev->eps[i].ep_index = i; + dev->eps[i].vdev = dev; xhci_init_endpoint_timer(xhci, &dev->eps[i]); INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 8950d1f10a7fb..86c4bc9df3b80 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -25,6 +25,13 @@ */ #define TT_MICROFRAMES_MAX 9 +/* schedule error type */ +#define ESCH_SS_Y6 1001 +#define ESCH_SS_OVERLAP 1002 +#define ESCH_CS_OVERFLOW 1003 +#define ESCH_BW_OVERFLOW 1004 +#define ESCH_FIXME 1005 + /* mtk scheduler bitmasks */ #define EP_BPKTS(p) ((p) & 0x7f) #define EP_BCSCOUNT(p) (((p) & 0x7) << 8) @@ -32,6 +39,24 @@ #define EP_BOFFSET(p) ((p) & 0x3fff) #define EP_BREPEAT(p) (((p) & 0x7fff) << 16) +static char *sch_error_string(int err_num) +{ + switch (err_num) { + case ESCH_SS_Y6: + return "Can't schedule Start-Split in Y6"; + case ESCH_SS_OVERLAP: + return "Can't find a suitable Start-Split location"; + case ESCH_CS_OVERFLOW: + return "The last Complete-Split is greater than 7"; + case ESCH_BW_OVERFLOW: + return "Bandwidth exceeds the maximum limit"; + case ESCH_FIXME: + return "FIXME, to be resolved"; + default: + return "Unknown"; + } +} + static int is_fs_or_ls(enum usb_device_speed speed) { return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW; @@ -375,7 +400,6 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, sch_ep->bw_budget_table[j]; } } - sch_ep->allocated = used; } static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) @@ -384,19 +408,20 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) u32 num_esit, tmp; int base; int i, j; + u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; + + if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP) + offset++; + for (i = 0; i < num_esit; i++) { base = offset + i * sch_ep->esit; - /* - * Compared with hs bus, no matter what ep type, - * the hub will always delay one uframe to send data - */ - for (j = 0; j < sch_ep->cs_count; j++) { + for (j = 0; j < uframes; j++) { tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe; if (tmp > FS_PAYLOAD_MAX) - return -ERANGE; + return -ESCH_BW_OVERFLOW; } } @@ -406,15 +431,11 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) static int check_sch_tt(struct usb_device *udev, struct mu3h_sch_ep_info *sch_ep, u32 offset) { - struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 extra_cs_count; - u32 fs_budget_start; u32 start_ss, last_ss; u32 start_cs, last_cs; - int i; start_ss = offset % 8; - fs_budget_start = (start_ss + 1) % 8; if (sch_ep->ep_type == ISOC_OUT_EP) { last_ss = start_ss + sch_ep->cs_count - 1; @@ -424,11 +445,7 @@ static int check_sch_tt(struct usb_device *udev, * must never schedule Start-Split in Y6 */ if (!(start_ss == 7 || last_ss < 6)) - return -ERANGE; - - for (i = 0; i < sch_ep->cs_count; i++) - if (test_bit(offset + i, tt->ss_bit_map)) - return -ERANGE; + return -ESCH_SS_Y6; } else { u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); @@ -438,29 +455,24 @@ static int check_sch_tt(struct usb_device *udev, * must never schedule Start-Split in Y6 */ if (start_ss == 6) - return -ERANGE; + return -ESCH_SS_Y6; /* one uframe for ss + one uframe for idle */ start_cs = (start_ss + 2) % 8; last_cs = start_cs + cs_count - 1; if (last_cs > 7) - return -ERANGE; + return -ESCH_CS_OVERFLOW; if (sch_ep->ep_type == ISOC_IN_EP) extra_cs_count = (last_cs == 7) ? 1 : 2; else /* ep_type : INTR IN / INTR OUT */ - extra_cs_count = (fs_budget_start == 6) ? 1 : 2; + extra_cs_count = 1; cs_count += extra_cs_count; if (cs_count > 7) cs_count = 7; /* HW limit */ - for (i = 0; i < cs_count + 2; i++) { - if (test_bit(offset + i, tt->ss_bit_map)) - return -ERANGE; - } - sch_ep->cs_count = cs_count; /* one for ss, the other for idle */ sch_ep->num_budget_microframes = cs_count + 2; @@ -482,28 +494,24 @@ static void update_sch_tt(struct usb_device *udev, struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 base, num_esit; int bw_updated; - int bits; int i, j; + int offset = sch_ep->offset; + u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; - bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1; if (used) bw_updated = sch_ep->bw_cost_per_microframe; else bw_updated = -sch_ep->bw_cost_per_microframe; - for (i = 0; i < num_esit; i++) { - base = sch_ep->offset + i * sch_ep->esit; + if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP) + offset++; - for (j = 0; j < bits; j++) { - if (used) - set_bit(base + j, tt->ss_bit_map); - else - clear_bit(base + j, tt->ss_bit_map); - } + for (i = 0; i < num_esit; i++) { + base = offset + i * sch_ep->esit; - for (j = 0; j < sch_ep->cs_count; j++) + for (j = 0; j < uframes; j++) tt->fs_bus_bw[base + j] += bw_updated; } @@ -513,21 +521,48 @@ static void update_sch_tt(struct usb_device *udev, list_del(&sch_ep->tt_endpoint); } +static int load_ep_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, + struct mu3h_sch_ep_info *sch_ep, bool loaded) +{ + if (sch_ep->sch_tt) + update_sch_tt(udev, sch_ep, loaded); + + /* update bus bandwidth info */ + update_bus_bw(sch_bw, sch_ep, loaded); + sch_ep->allocated = loaded; + + return 0; +} + +static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep) +{ + u32 boundary = sch_ep->esit; + + if (sch_ep->sch_tt) { /* LS/FS with TT */ + /* + * tune for CS, normally esit >= 8 for FS/LS, + * not add one for other types to avoid access array + * out of boundary + */ + if (sch_ep->ep_type == ISOC_OUT_EP && boundary > 1) + boundary--; + } + + return boundary; +} + static int check_sch_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) { u32 offset; - u32 esit; u32 min_bw; u32 min_index; u32 worst_bw; u32 bw_boundary; + u32 esit_boundary; u32 min_num_budget; u32 min_cs_count; - bool tt_offset_ok = false; - int ret; - - esit = sch_ep->esit; + int ret = 0; /* * Search through all possible schedule microframes. @@ -537,16 +572,15 @@ static int check_sch_bw(struct usb_device *udev, min_index = 0; min_cs_count = sch_ep->cs_count; min_num_budget = sch_ep->num_budget_microframes; - for (offset = 0; offset < esit; offset++) { - if (is_fs_or_ls(udev->speed)) { + esit_boundary = get_esit_boundary(sch_ep); + for (offset = 0; offset < sch_ep->esit; offset++) { + if (sch_ep->sch_tt) { ret = check_sch_tt(udev, sch_ep, offset); if (ret) continue; - else - tt_offset_ok = true; } - if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit) + if ((offset + sch_ep->num_budget_microframes) > esit_boundary) break; worst_bw = get_max_bw(sch_bw, sch_ep, offset); @@ -569,35 +603,21 @@ static int check_sch_bw(struct usb_device *udev, /* check bandwidth */ if (min_bw > bw_boundary) - return -ERANGE; + return ret ? ret : -ESCH_BW_OVERFLOW; sch_ep->offset = min_index; sch_ep->cs_count = min_cs_count; sch_ep->num_budget_microframes = min_num_budget; - if (is_fs_or_ls(udev->speed)) { - /* all offset for tt is not ok*/ - if (!tt_offset_ok) - return -ERANGE; - - update_sch_tt(udev, sch_ep, 1); - } - - /* update bus bandwidth info */ - update_bus_bw(sch_bw, sch_ep, 1); - - return 0; + return load_ep_bw(udev, sch_bw, sch_ep, true); } static void destroy_sch_ep(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) { /* only release ep bw check passed by check_sch_bw() */ - if (sch_ep->allocated) { - update_bus_bw(sch_bw, sch_ep, 0); - if (sch_ep->sch_tt) - update_sch_tt(udev, sch_ep, 0); - } + if (sch_ep->allocated) + load_ep_bw(udev, sch_bw, sch_ep, false); if (sch_ep->sch_tt) drop_tt(udev); @@ -760,7 +780,8 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ret = check_sch_bw(udev, sch_bw, sch_ep); if (ret) { - xhci_err(xhci, "Not enough bandwidth!\n"); + xhci_err(xhci, "Not enough bandwidth! (%s)\n", + sch_error_string(-ret)); return -ENOSPC; } } diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 2fc0568ba054e..3e2c607b5d64c 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -20,14 +20,12 @@ #define XHCI_MTK_MAX_ESIT 64 /** - * @ss_bit_map: used to avoid start split microframes overlay * @fs_bus_bw: array to keep track of bandwidth already used for FS * @ep_list: Endpoints using this TT * @usb_tt: usb TT related * @tt_port: TT port number */ struct mu3h_sch_tt { - DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT); u32 fs_bus_bw[XHCI_MTK_MAX_ESIT]; struct list_head ep_list; struct usb_tt *usb_tt; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 8952492d43be6..aff65cefead2f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -58,24 +58,13 @@ #define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed -#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e -#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 0x161a -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 0x161b -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 0x161d -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6 -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7 -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 @@ -88,9 +77,12 @@ static const char hcd_name[] = "xhci_hcd"; static struct hc_driver __read_mostly xhci_pci_hc_driver; static int xhci_pci_setup(struct usb_hcd *hcd); +static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags); static const struct xhci_driver_overrides xhci_pci_overrides __initconst = { .reset = xhci_pci_setup, + .update_hub_device = xhci_pci_update_hub_device, }; /* called after powerup, by probe or system-pm "wakeup" */ @@ -253,6 +245,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) xhci->quirks |= XHCI_MISSING_CAS; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI)) + xhci->quirks |= XHCI_RESET_TO_DEFAULT; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI || @@ -264,12 +261,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI)) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && @@ -302,8 +294,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { + /* + * try to tame the ASMedia 1042 controller which reports 0.96 + * but appears to behave more like 1.0 + */ + xhci->quirks |= XHCI_SPURIOUS_SUCCESS; xhci->quirks |= XHCI_BROKEN_STREAMS; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; @@ -332,15 +330,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4)) xhci->quirks |= XHCI_NO_SOFT_RETRY; - if (pdev->vendor == PCI_VENDOR_ID_AMD && - (pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8)) + /* xHC spec requires PCI devices to support D3hot and D3cold */ + if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (xhci->quirks & XHCI_RESET_ON_RESUME) @@ -360,8 +351,38 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) NULL); ACPI_FREE(obj); } + +static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_hub *rhub = &xhci->usb3_rhub; + int ret; + int i; + + /* This is not the usb3 roothub we are looking for */ + if (hcd != rhub->hcd) + return; + + if (hdev->maxchild > rhub->num_ports) { + dev_err(&hdev->dev, "USB3 roothub port number mismatch\n"); + return; + } + + for (i = 0; i < hdev->maxchild; i++) { + ret = usb_acpi_port_lpm_incapable(hdev, i); + + dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret); + + if (ret >= 0) { + rhub->ports[i]->lpm_incapable = ret; + continue; + } + } +} + #else static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } +static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { } #endif /* CONFIG_ACPI */ /* called during probe() after chip reset completes */ @@ -394,6 +415,16 @@ static int xhci_pci_setup(struct usb_hcd *hcd) return xhci_pci_reinit(xhci, pdev); } +static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags) +{ + /* Check if acpi claims some USB3 roothub ports are lpm incapable */ + if (!hdev->parent) + xhci_find_lpm_incapable_ports(hcd, hdev); + + return xhci_update_hub_device(hcd, hdev, tt, mem_flags); +} + /* * We need to register our own PCI probe function (instead of the USB core's * function) in order to create a second roothub under xHCI. @@ -463,6 +494,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW) pm_runtime_allow(&dev->dev); + dma_set_max_seg_size(&dev->dev, UINT_MAX); + return 0; put_usb3_hcd: diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index dc570ce4e8319..972a44b2a7f12 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -134,7 +134,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { }; static const struct xhci_plat_priv xhci_plat_brcm = { - .quirks = XHCI_RESET_ON_RESUME, + .quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS, }; static const struct of_device_id usb_xhci_of_match[] = { @@ -447,7 +447,16 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) * xhci_suspend() needs `do_wakeup` to know whether host is allowed * to do wakeup during suspend. */ - return xhci_suspend(xhci, device_may_wakeup(dev)); + ret = xhci_suspend(xhci, device_may_wakeup(dev)); + if (ret) + return ret; + + if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) { + clk_disable_unprepare(xhci->clk); + clk_disable_unprepare(xhci->reg_clk); + } + + return 0; } static int __maybe_unused xhci_plat_resume(struct device *dev) @@ -456,6 +465,11 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ret; + if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) { + clk_prepare_enable(xhci->clk); + clk_prepare_enable(xhci->reg_clk); + } + ret = xhci_priv_resume_quirk(hcd); if (ret) return ret; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index fa3a7ac15f825..b69b8c7e7966c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -773,6 +773,101 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, seg->bounce_offs = 0; } +static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_ring *ep_ring, int status) +{ + struct urb *urb = NULL; + + /* Clean up the endpoint's TD list */ + urb = td->urb; + + /* if a bounce buffer was used to align this td then unmap it */ + xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); + + /* Do one last check of the actual transfer length. + * If the host controller said we transferred more data than the buffer + * length, urb->actual_length will be a very big number (since it's + * unsigned). Play it safe and say we didn't transfer anything. + */ + if (urb->actual_length > urb->transfer_buffer_length) { + xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", + urb->transfer_buffer_length, urb->actual_length); + urb->actual_length = 0; + status = 0; + } + list_del_init(&td->td_list); + /* Was this TD slated to be cancelled but completed anyway? */ + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); + + inc_td_cnt(urb); + /* Giveback the urb when all the tds are completed */ + if (last_td_in_urb(td)) { + if ((urb->actual_length != urb->transfer_buffer_length && + (urb->transfer_flags & URB_SHORT_NOT_OK)) || + (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) + xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", + urb, urb->actual_length, + urb->transfer_buffer_length, status); + + /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + status = 0; + xhci_giveback_urb_in_irq(xhci, td, status); + } + + return 0; +} + +static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, enum xhci_ep_reset_type reset_type) +{ + struct xhci_command *command; + int ret = 0; + + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!command) { + ret = -ENOMEM; + goto done; + } + + ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); +done: + if (ret) + xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", + slot_id, ep_index, ret); + return ret; +} + +static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep, unsigned int stream_id, + struct xhci_td *td, + enum xhci_ep_reset_type reset_type) +{ + unsigned int slot_id = ep->vdev->slot_id; + int err; + + /* + * Avoid resetting endpoint if link is inactive. Can cause host hang. + * Device will be reset soon to recover the link so don't do anything + */ + if (ep->vdev->flags & VDEV_PORT_ERROR) + return; + + ep->ep_state |= EP_HALTED; + + err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); + if (err) + return; + + if (reset_type == EP_HARD_RESET) { + ep->ep_state |= EP_HARD_CLEAR_TOGGLE; + xhci_cleanup_stalled_ring(xhci, slot_id, ep->ep_index, stream_id, + td); + } + xhci_ring_cmd_db(xhci); +} + /* * When we get a command completion for a Stop Endpoint Command, we need to * unlink any cancelled TDs from the ring. There are two ways to do that: @@ -949,7 +1044,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, struct xhci_virt_ep *ep; struct xhci_ring *ring; - ep = &xhci->devs[slot_id]->eps[ep_index]; + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + if ((ep->ep_state & EP_HAS_STREAMS) || (ep->ep_state & EP_GETTING_NO_STREAMS)) { int stream_id; @@ -1924,37 +2022,6 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, } } -static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td, - enum xhci_ep_reset_type reset_type) -{ - struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; - struct xhci_command *command; - - /* - * Avoid resetting endpoint if link is inactive. Can cause host hang. - * Device will be reset soon to recover the link so don't do anything - */ - if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) - return; - - command = xhci_alloc_command(xhci, false, GFP_ATOMIC); - if (!command) - return; - - ep->ep_state |= EP_HALTED; - - xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); - - if (reset_type == EP_HARD_RESET) { - ep->ep_state |= EP_HARD_CLEAR_TOGGLE; - xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id, - td); - } - xhci_ring_cmd_db(xhci); -} - /* Check if an error has halted the endpoint ring. The class driver will * cleanup the halt for a non-default control endpoint if we indicate a stall. * However, a babble and other errors also halt the endpoint ring, and the class @@ -1995,68 +2062,15 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) return 0; } -static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_ring *ep_ring, int *status) -{ - struct urb *urb = NULL; - - /* Clean up the endpoint's TD list */ - urb = td->urb; - - /* if a bounce buffer was used to align this td then unmap it */ - xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); - - /* Do one last check of the actual transfer length. - * If the host controller said we transferred more data than the buffer - * length, urb->actual_length will be a very big number (since it's - * unsigned). Play it safe and say we didn't transfer anything. - */ - if (urb->actual_length > urb->transfer_buffer_length) { - xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", - urb->transfer_buffer_length, urb->actual_length); - urb->actual_length = 0; - *status = 0; - } - list_del_init(&td->td_list); - /* Was this TD slated to be cancelled but completed anyway? */ - if (!list_empty(&td->cancelled_td_list)) - list_del_init(&td->cancelled_td_list); - - inc_td_cnt(urb); - /* Giveback the urb when all the tds are completed */ - if (last_td_in_urb(td)) { - if ((urb->actual_length != urb->transfer_buffer_length && - (urb->transfer_flags & URB_SHORT_NOT_OK)) || - (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) - xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", - urb, urb->actual_length, - urb->transfer_buffer_length, *status); - - /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) - *status = 0; - xhci_giveback_urb_in_irq(xhci, td, *status); - } - - return 0; -} - static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_transfer_event *event, struct xhci_virt_ep *ep) { - struct xhci_virt_device *xdev; struct xhci_ep_ctx *ep_ctx; struct xhci_ring *ep_ring; - unsigned int slot_id; u32 trb_comp_code; - int ep_index; - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - xdev = xhci->devs[slot_id]; - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || @@ -2081,10 +2095,11 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, * stall later. Hub TT buffer should only be cleared for FS/LS * devices behind HS hubs for functional stalls. */ - if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) + if ((ep->ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) xhci_clear_hub_tt_buffer(xhci, td, ep); - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, - ep_ring->stream_id, td, EP_HARD_RESET); + + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_HARD_RESET); } else { /* Update ring dequeue pointer */ while (ep_ring->dequeue != td->last_trb) @@ -2092,7 +2107,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, inc_deq(xhci, ep_ring); } - return xhci_td_cleanup(xhci, td, ep_ring, status); + return xhci_td_cleanup(xhci, td, ep_ring, td->status); } /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ @@ -2115,21 +2130,15 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, */ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep) { - struct xhci_virt_device *xdev; - unsigned int slot_id; - int ep_index; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; u32 remaining, requested; u32 trb_type; trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - xdev = xhci->devs[slot_id]; - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); requested = td->urb->transfer_buffer_length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -2139,13 +2148,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_type != TRB_STATUS) { xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", (trb_type == TRB_DATA) ? "data" : "setup"); - *status = -ESHUTDOWN; + td->status = -ESHUTDOWN; break; } - *status = 0; + td->status = 0; break; case COMP_SHORT_PACKET: - *status = 0; + td->status = 0; break; case COMP_STOPPED_SHORT_PACKET: if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) @@ -2177,7 +2186,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ctx, trb_comp_code)) break; xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", - trb_comp_code, ep_index); + trb_comp_code, ep->ep_index); fallthrough; case COMP_STALL_ERROR: /* Did we transfer part of the data (middle) phase? */ @@ -2209,7 +2218,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length = requested; finish_td: - return finish_td(xhci, td, event, ep, status); + return finish_td(xhci, td, event, ep); } /* @@ -2217,9 +2226,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, */ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep) { - struct xhci_ring *ep_ring; struct urb_priv *urb_priv; int idx; struct usb_iso_packet_descriptor *frame; @@ -2228,7 +2236,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, u32 remaining, requested, ep_trb_len; int short_framestatus; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); urb_priv = td->urb->hcpriv; idx = urb_priv->num_tds_done; @@ -2289,26 +2296,23 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, } if (sum_trbs_for_length) - frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) + + frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + ep_trb_len - remaining; else frame->actual_length = requested; td->urb->actual_length += frame->actual_length; - return finish_td(xhci, td, event, ep, status); + return finish_td(xhci, td, event, ep); } static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep, int status) { - struct xhci_ring *ep_ring; struct urb_priv *urb_priv; struct usb_iso_packet_descriptor *frame; int idx; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); urb_priv = td->urb->hcpriv; idx = urb_priv->num_tds_done; frame = &td->urb->iso_frame_desc[idx]; @@ -2320,11 +2324,11 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, frame->actual_length = 0; /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring); - inc_deq(xhci, ep_ring); + while (ep->ring->dequeue != td->last_trb) + inc_deq(xhci, ep->ring); + inc_deq(xhci, ep->ring); - return xhci_td_cleanup(xhci, td, ep_ring, status); + return xhci_td_cleanup(xhci, td, ep->ring, status); } /* @@ -2332,18 +2336,14 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, */ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep) { struct xhci_slot_ctx *slot_ctx; struct xhci_ring *ep_ring; u32 trb_comp_code; u32 remaining, requested, ep_trb_len; - unsigned int slot_id; - int ep_index; - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx); - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -2352,7 +2352,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, switch (trb_comp_code) { case COMP_SUCCESS: - ep_ring->err_count = 0; + ep->err_count = 0; /* handle success with untransferred data as short packet */ if (ep_trb != td->last_trb || remaining) { xhci_warn(xhci, "WARN Successful completion on short TX\n"); @@ -2360,13 +2360,13 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->ep->desc.bEndpointAddress, requested, remaining); } - *status = 0; + td->status = 0; break; case COMP_SHORT_PACKET: xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, requested, remaining); - *status = 0; + td->status = 0; break; case COMP_STOPPED_SHORT_PACKET: td->urb->actual_length = remaining; @@ -2378,12 +2378,14 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, break; case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || - (ep_ring->err_count++ > MAX_SOFT_RETRY) || + (ep->err_count++ > MAX_SOFT_RETRY) || le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) break; - *status = 0; - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, - ep_ring->stream_id, td, EP_SOFT_RESET); + + td->status = 0; + + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_SOFT_RESET); return 0; default: /* do nothing */ @@ -2402,7 +2404,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, remaining); td->urb->actual_length = 0; } - return finish_td(xhci, td, event, ep, status); + return finish_td(xhci, td, event, ep); } /* @@ -2458,8 +2460,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_USB_TRANSACTION_ERROR: case COMP_INVALID_STREAM_TYPE_ERROR: case COMP_INVALID_STREAM_ID_ERROR: - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0, - NULL, EP_SOFT_RESET); + xhci_dbg(xhci, "Stream transaction error ep %u no id\n", + ep_index); + if (ep->err_count++ > MAX_SOFT_RETRY) + xhci_handle_halted_endpoint(xhci, ep, 0, NULL, + EP_HARD_RESET); + else + xhci_handle_halted_endpoint(xhci, ep, 0, NULL, + EP_SOFT_RESET); goto cleanup; case COMP_RING_UNDERRUN: case COMP_RING_OVERRUN: @@ -2642,11 +2650,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { - xhci_cleanup_halted_endpoint(xhci, slot_id, - ep_index, - ep_ring->stream_id, - NULL, - EP_HARD_RESET); + xhci_handle_halted_endpoint(xhci, ep, + ep_ring->stream_id, + NULL, + EP_HARD_RESET); } goto cleanup; } @@ -2705,7 +2712,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, return -ESHUTDOWN; } - skip_isoc_td(xhci, td, event, ep, &status); + skip_isoc_td(xhci, td, ep, status); goto cleanup; } if (trb_comp_code == COMP_SHORT_PACKET) @@ -2733,25 +2740,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, * endpoint. Otherwise, the endpoint remains stalled * indefinitely. */ + if (trb_is_noop(ep_trb)) { if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) - xhci_cleanup_halted_endpoint(xhci, slot_id, - ep_index, - ep_ring->stream_id, - td, EP_HARD_RESET); + xhci_handle_halted_endpoint(xhci, ep, + ep_ring->stream_id, + td, EP_HARD_RESET); goto cleanup; } + td->status = status; + /* update the urb's actual_length and give back to the core */ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) - process_ctrl_td(xhci, td, ep_trb, event, ep, &status); + process_ctrl_td(xhci, td, ep_trb, event, ep); else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) - process_isoc_td(xhci, td, ep_trb, event, ep, &status); + process_isoc_td(xhci, td, ep_trb, event, ep); else - process_bulk_intr_td(xhci, td, ep_trb, event, ep, - &status); + process_bulk_intr_td(xhci, td, ep_trb, event, ep); cleanup: handling_skipped_tds = ep->skip && trb_comp_code != COMP_MISSED_SERVICE_ERROR && diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7b16b6b45af7d..473b0b64dd572 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -696,6 +696,8 @@ int xhci_run(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_run for USB2 roothub"); + set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags); + xhci_dbc_init(xhci); xhci_debugfs_init(xhci); @@ -794,9 +796,15 @@ void xhci_shutdown(struct usb_hcd *hcd) spin_lock_irq(&xhci->lock); xhci_halt(xhci); - /* Workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + + /* + * Workaround for spurious wakeps at shutdown with HSW, and for boot + * firmware delay in ADL-P PCH if port are left in U3 at shutdown + */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || + xhci->quirks & XHCI_RESET_TO_DEFAULT) xhci_reset(xhci, XHCI_RESET_SHORT_USEC); + spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); @@ -1163,7 +1171,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* re-initialize the HC on Restore Error, or Host Controller Error */ if (temp & (STS_SRE | STS_HCE)) { reinit_xhc = true; - xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + if (!xhci->broken_suspend) + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); } if (reinit_xhc) { @@ -3912,6 +3921,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_virt_device *virt_dev; struct xhci_slot_ctx *slot_ctx; + unsigned long flags; int i, ret; /* @@ -3940,7 +3950,11 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) } virt_dev->udev = NULL; xhci_disable_slot(xhci, udev->slot_id); + + spin_lock_irqsave(&xhci->lock, flags); xhci_free_virt_device(xhci, udev->slot_id); + spin_unlock_irqrestore(&xhci->lock, flags); + } int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) @@ -4997,6 +5011,7 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { struct xhci_hcd *xhci; + struct xhci_port *port; u16 hub_encoded_timeout; int mel; int ret; @@ -5010,6 +5025,13 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, !xhci->devs[udev->slot_id]) return USB3_LPM_DISABLED; + /* If connected to root port then check port can handle lpm */ + if (udev->parent && !udev->parent->parent) { + port = xhci->usb3_rhub.ports[udev->portnum - 1]; + if (port->lpm_incapable) + return USB3_LPM_DISABLED; + } + hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); if (mel < 0) { @@ -5069,7 +5091,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, /* Once a hub descriptor is fetched for a device, we need to update the xHC's * internal data structures for the device. */ -static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, +int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -5169,6 +5191,7 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, xhci_free_command(xhci, config_cmd); return ret; } +EXPORT_SYMBOL_GPL(xhci_update_hub_device); static int xhci_get_frame(struct usb_hcd *hcd) { @@ -5439,6 +5462,8 @@ void xhci_init_driver(struct hc_driver *drv, drv->check_bandwidth = over->check_bandwidth; if (over->reset_bandwidth) drv->reset_bandwidth = over->reset_bandwidth; + if (over->update_hub_device) + drv->update_hub_device = over->update_hub_device; } } EXPORT_SYMBOL_GPL(xhci_init_driver); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 6f16a05b19584..c7749f6e34745 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -924,6 +924,8 @@ struct xhci_bw_info { #define SS_BW_RESERVED 10 struct xhci_virt_ep { + struct xhci_virt_device *vdev; /* parent */ + unsigned int ep_index; struct xhci_ring *ring; /* Related to endpoints that are configured to use stream IDs only */ struct xhci_stream_info *stream_info; @@ -931,6 +933,7 @@ struct xhci_virt_ep { * have to restore the device state to the previous state */ struct xhci_ring *new_ring; + unsigned int err_count; unsigned int ep_state; #define SET_DEQ_PENDING (1 << 0) #define EP_HALTED (1 << 1) /* For stall handling */ @@ -1002,6 +1005,7 @@ struct xhci_interval_bw_table { #define EP_CTX_PER_DEV 31 struct xhci_virt_device { + int slot_id; struct usb_device *udev; /* * Commands to the hardware are passed an "input context" that @@ -1541,6 +1545,7 @@ struct xhci_segment { struct xhci_td { struct list_head td_list; struct list_head cancelled_td_list; + int status; struct urb *urb; struct xhci_segment *start_seg; union xhci_trb *first_trb; @@ -1612,7 +1617,6 @@ struct xhci_ring { * if we own the TRB (if we are the consumer). See section 4.9.1. */ u32 cycle_state; - unsigned int err_count; unsigned int stream_id; unsigned int num_segs; unsigned int num_trbs_free; @@ -1724,6 +1728,7 @@ struct xhci_port { int hcd_portnum; struct xhci_hub *rhub; struct xhci_port_cap *port_cap; + unsigned int lpm_incapable:1; }; struct xhci_hub { @@ -1888,6 +1893,8 @@ struct xhci_hcd { #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) #define XHCI_NO_SOFT_RETRY BIT_ULL(40) #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) +#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) +#define XHCI_RESET_TO_DEFAULT BIT_ULL(44) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1927,6 +1934,8 @@ struct xhci_driver_overrides { int (*start)(struct usb_hcd *hcd); int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags); }; #define XHCI_CFC_DELAY 10 @@ -2083,6 +2092,8 @@ void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index e9437a176518a..ea39243efee39 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -177,10 +177,6 @@ static int idmouse_create_image(struct usb_idmouse *dev) bytes_read += bulk_read; } - /* reset the device */ -reset: - ftip_command(dev, FTIP_RELEASE, 0, 0); - /* check for valid image */ /* right border should be black (0x00) */ for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH) @@ -192,6 +188,10 @@ static int idmouse_create_image(struct usb_idmouse *dev) if (dev->bulk_in_buffer[bytes_read] != 0xFF) return -EAGAIN; + /* reset the device */ +reset: + ftip_command(dev, FTIP_RELEASE, 0, 0); + /* should be IMGSIZE == 65040 */ dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n", bytes_read); diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 72a06af250812..51a5d626134c3 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -817,7 +817,7 @@ static int iowarrior_probe(struct usb_interface *interface, break; case USB_DEVICE_ID_CODEMERCS_IOW100: - dev->report_size = 13; + dev->report_size = 12; break; } } diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index f48a23adbc35d..094e812e9e692 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1268,6 +1268,11 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) { /* don't do anything here: "fault" will set up page table entries */ vma->vm_ops = &mon_bin_vm_ops; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = filp->private_data; mon_bin_vma_open(vma); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index fb806b33178a0..8dc657c71541c 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -760,6 +760,9 @@ static void rxstate(struct musb *musb, struct musb_request *req) musb_writew(epio, MUSB_RXCSR, csr); buffer_aint_mapped: + fifo_count = min_t(unsigned int, + request->length - request->actual, + (unsigned int)fifo_count); musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); request->actual += fifo_count; @@ -1625,8 +1628,6 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) { struct musb *musb = gadget_to_musb(gadget); - if (!musb->xceiv->set_power) - return -EOPNOTSUPP; return usb_phy_set_power(musb->xceiv, mA); } diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index 33b637d0d8d99..5cc20275335d1 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -106,10 +106,13 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode) struct fwnode_handle *parent = fwnode_get_parent(fwnode); struct device *dev; - if (!parent || !fwnode_property_present(parent, "usb-role-switch")) + if (!fwnode_property_present(parent, "usb-role-switch")) { + fwnode_handle_put(parent); return NULL; + } dev = class_find_device_by_fwnode(role_class, parent); + fwnode_handle_put(parent); return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 6b5ba6180c307..9ee0fa7756121 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -64,6 +64,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ @@ -199,6 +200,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ + { USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */ + { USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index 0c7eacc630e0e..11fe49543f26a 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -130,9 +130,6 @@ static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ, static int calc_baud_divisor(speed_t baudrate, speed_t clockrate) { - if (!baudrate) - return 0; - return DIV_ROUND_CLOSEST(clockrate, baudrate); } @@ -523,9 +520,14 @@ static void f81232_set_baudrate(struct tty_struct *tty, speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE }; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { - idx = f81232_find_clk(baud_list[i]); + baudrate = baud_list[i]; + if (baudrate == 0) { + tty_encode_baud_rate(tty, 0, 0); + return; + } + + idx = f81232_find_clk(baudrate); if (idx >= 0) { - baudrate = baud_list[i]; tty_encode_baud_rate(tty, baudrate, baudrate); break; } diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c index 5661fd03e5456..e952be683c63a 100644 --- a/drivers/usb/serial/f81534.c +++ b/drivers/usb/serial/f81534.c @@ -538,9 +538,6 @@ static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags) static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate) { - if (!baudrate) - return 0; - /* Round to nearest divisor */ return DIV_ROUND_CLOSEST(clockrate, baudrate); } @@ -570,9 +567,14 @@ static int f81534_set_port_config(struct usb_serial_port *port, u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE}; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { - idx = f81534_find_clk(baud_list[i]); + baudrate = baud_list[i]; + if (baudrate == 0) { + tty_encode_baud_rate(tty, 0, 0); + return 0; + } + + idx = f81534_find_clk(baudrate); if (idx >= 0) { - baudrate = baud_list[i]; tty_encode_baud_rate(tty, baudrate, baudrate); break; } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 5480bacba39fc..3bfa395c31120 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1320,8 +1320,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty, case 38400: div_value = ftdi_sio_b38400; break; case 57600: div_value = ftdi_sio_b57600; break; case 115200: div_value = ftdi_sio_b115200; break; - } /* baud */ - if (div_value == 0) { + default: dev_dbg(dev, "%s - Baudrate (%d) requested is not supported\n", __func__, baud); div_value = ftdi_sio_b9600; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 211e03a204072..2fc65cbbfea95 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -162,6 +162,8 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_G2 0xA010 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 +#define UBLOX_VENDOR_ID 0x1546 + /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_H01 0x0800 @@ -240,7 +242,6 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_UC15 0x9090 /* These u-blox products use Qualcomm's vendor ID */ #define UBLOX_PRODUCT_R410M 0x90b2 -#define UBLOX_PRODUCT_R6XX 0x90fa /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625 @@ -254,8 +255,16 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM05G 0x030a #define QUECTEL_PRODUCT_EM060K 0x030b +#define QUECTEL_PRODUCT_EM05G_CS 0x030c +#define QUECTEL_PRODUCT_EM05CN_SG 0x0310 +#define QUECTEL_PRODUCT_EM05G_SG 0x0311 +#define QUECTEL_PRODUCT_EM05CN 0x0312 +#define QUECTEL_PRODUCT_EM05G_GR 0x0313 +#define QUECTEL_PRODUCT_EM05G_RS 0x0314 #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 +#define QUECTEL_PRODUCT_RM520N 0x0801 +#define QUECTEL_PRODUCT_EC200U 0x0901 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200T 0x6026 #define QUECTEL_PRODUCT_RM500K 0x7001 @@ -580,6 +589,9 @@ static void option_instat_callback(struct urb *urb); #define OPPO_VENDOR_ID 0x22d9 #define OPPO_PRODUCT_R11 0x276c +/* Sierra Wireless products */ +#define SIERRA_VENDOR_ID 0x1199 +#define SIERRA_PRODUCT_EM9191 0x90d3 /* Device flags */ @@ -1123,8 +1135,16 @@ static const struct usb_device_id option_ids[] = { /* u-blox products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, - { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX), + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */ + .driver_info = RSVD(4) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa), .driver_info = RSVD(3) }, + /* u-blox products */ + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */ + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */ + .driver_info = RSVD(4) }, + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */ + .driver_info = RSVD(4) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, @@ -1138,13 +1158,27 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */ + .driver_info = ZLP }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff), + .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff), + .driver_info = RSVD(6) | ZLP }, { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff), + .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff), + .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff), + .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff), + .driver_info = RSVD(6) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, @@ -1159,6 +1193,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, @@ -2161,6 +2199,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ @@ -2170,6 +2209,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 586ef5551e76e..b1e844bf31f81 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81c2)}, /* Dell Wireless 5811e */ {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 20b857e97e60c..7e4ce0e7e05a7 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -438,6 +438,8 @@ static int alauda_init_media(struct us_data *us) + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift); MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); + if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL) + return USB_STOR_TRANSPORT_ERROR; if (alauda_reset_media(us) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 3f720faa6f97c..d73282c0ec501 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -116,6 +116,19 @@ static int uas_use_uas_driver(struct usb_interface *intf, if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) flags |= US_FL_NO_ATA_1X; + /* + * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues + * with UAS. This isn't distinguishable with just idVendor and + * idProduct, use manufacturer and product too. + * + * Reported-by: Hongling Zeng + */ + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda && + le16_to_cpu(udev->descriptor.idProduct) == 0x9210 && + (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) && + (udev->product && !strcmp(udev->product, "MD202"))) + flags |= US_FL_IGNORE_UAS; + usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 4993227ab2930..20dcbccb290b3 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1275,12 +1275,6 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, USB_SC_RBC, USB_PR_BULK, NULL, 0 ), -UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100, - "Samsung", - "Flash Drive FIT", - USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_MAX_SECTORS_64), - /* aeb */ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, "Feiya", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 4051c8cd0cd8a..c7b763d6d1023 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), +/* Reported-by: Hongling Zeng */ +UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999, + "Hiksemi", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? @@ -62,6 +69,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_UAS), +/* Reported-by: Tom Hu */ +UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999, + "ASUS", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: David Webb */ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999, "Seagate", @@ -111,6 +125,13 @@ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Hongling Zeng */ +UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999, + "Thinkplus", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 5e293ccf0e904..e8eaca5a84db2 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -409,6 +409,18 @@ static const char * const pin_assignments[] = { [DP_PIN_ASSIGN_F] = "F", }; +/* + * Helper function to extract a peripheral's currently supported + * Pin Assignments from its DisplayPort alternate mode state. + */ +static u8 get_current_pin_assignments(struct dp_altmode *dp) +{ + if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D) + return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo); + else + return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo); +} + static ssize_t pin_assignment_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) @@ -435,10 +447,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr, goto out_unlock; } - if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D) - assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo); - else - assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo); + assignments = get_current_pin_assignments(dp); if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) { ret = -EINVAL; @@ -475,10 +484,7 @@ static ssize_t pin_assignment_show(struct device *dev, cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf)); - if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D) - assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo); - else - assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo); + assignments = get_current_pin_assignments(dp); for (i = 0; assignments; assignments >>= 1, i++) { if (assignments & 1) { @@ -518,10 +524,10 @@ int dp_altmode_probe(struct typec_altmode *alt) /* FIXME: Port can only be DFP_U. */ /* Make sure we have compatiple pin configurations */ - if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) & - DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) && - !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) & - DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo))) + if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) & + DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) && + !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) & + DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo))) return -ENODEV; ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group); diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index e8ddb81cb6df4..f4e7f4d78b565 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -132,7 +132,7 @@ int typec_altmode_exit(struct typec_altmode *adev) if (!adev || !adev->active) return 0; - if (!pdev->ops || !pdev->ops->enter) + if (!pdev->ops || !pdev->ops->exit) return -EOPNOTSUPP; /* Moving to USB Safe State */ diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index acdef6fbb85e0..1276112edeff9 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -83,8 +83,6 @@ enum { /* * Input Output Manager (IOM) PORT STATUS */ -#define IOM_PORT_STATUS_OFFSET 0x560 - #define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6) #define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6 #define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03 @@ -144,6 +142,7 @@ struct pmc_usb { struct pmc_usb_port *port; struct acpi_device *iom_adev; void __iomem *iom_base; + u32 iom_port_status_offset; }; static void update_port_status(struct pmc_usb_port *port) @@ -153,7 +152,8 @@ static void update_port_status(struct pmc_usb_port *port) /* SoC expects the USB Type-C port numbers to start with 0 */ port_num = port->usb3_port - 1; - port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET + + port->iom_status = readl(port->pmc->iom_base + + port->pmc->iom_port_status_offset + port_num * sizeof(u32)); } @@ -339,13 +339,24 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state) return pmc_usb_command(port, (void *)&req, sizeof(req)); } -static int pmc_usb_mux_safe_state(struct pmc_usb_port *port) +static int pmc_usb_mux_safe_state(struct pmc_usb_port *port, + struct typec_mux_state *state) { u8 msg; if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE)) return 0; + if ((IOM_PORT_ACTIVITY_IS(port->iom_status, DP) || + IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) && + state->alt && state->alt->svid == USB_TYPEC_DP_SID) + return 0; + + if ((IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) || + IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) && + state->alt && state->alt->svid == USB_TYPEC_TBT_SID) + return 0; + msg = PMC_USB_SAFE_MODE; msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; @@ -413,7 +424,7 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state) return 0; if (state->mode == TYPEC_STATE_SAFE) - return pmc_usb_mux_safe_state(port); + return pmc_usb_mux_safe_state(port, state); if (state->mode == TYPEC_STATE_USB) return pmc_usb_connect(port, port->role); @@ -541,19 +552,42 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index, static int is_memory(struct acpi_resource *res, void *data) { - struct resource r; + struct resource_win win = {}; + struct resource *r = &win.res; - return !acpi_dev_resource_memory(res, &r); + return !(acpi_dev_resource_memory(res, r) || + acpi_dev_resource_address_space(res, &win)); } +/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */ +static const struct acpi_device_id iom_acpi_ids[] = { + /* TigerLake */ + { "INTC1072", 0x560, }, + + /* AlderLake */ + { "INTC1079", 0x160, }, + + /* Meteor Lake */ + { "INTC107A", 0x160, }, + {} +}; + static int pmc_usb_probe_iom(struct pmc_usb *pmc) { struct list_head resource_list; struct resource_entry *rentry; - struct acpi_device *adev; + static const struct acpi_device_id *dev_id; + struct acpi_device *adev = NULL; int ret; - adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1); + for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) { + if (acpi_dev_present(dev_id->id, NULL, -1)) { + pmc->iom_port_status_offset = (u32)dev_id->driver_data; + adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1); + break; + } + } + if (!adev) return -ENODEV; diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index 49420e28a1f76..069affa5cb1ee 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -649,8 +649,10 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data) return ERR_PTR(err); tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc); - if (IS_ERR(tcpci->port)) + if (IS_ERR(tcpci->port)) { + fwnode_handle_put(tcpci->tcpc.fwnode); return ERR_CAST(tcpci->port); + } return tcpci; } @@ -659,6 +661,7 @@ EXPORT_SYMBOL_GPL(tcpci_register_port); void tcpci_unregister_port(struct tcpci *tcpci) { tcpm_unregister_port(tcpci->port); + fwnode_handle_put(tcpci->tcpc.fwnode); } EXPORT_SYMBOL_GPL(tcpci_unregister_port); diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c index 6cb5c8e2c8535..4722b7f7a4a2b 100644 --- a/drivers/usb/typec/tps6598x.c +++ b/drivers/usb/typec/tps6598x.c @@ -564,7 +564,6 @@ static int tps6598x_probe(struct i2c_client *client) ret = PTR_ERR(tps->port); goto err_role_put; } - fwnode_handle_put(fwnode); if (status & TPS_STATUS_PLUG_PRESENT) { ret = tps6598x_connect(tps, status); @@ -583,6 +582,7 @@ static int tps6598x_probe(struct i2c_client *client) } i2c_set_clientdata(client, tps); + fwnode_handle_put(fwnode); return 0; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 0c16e99807365..4cd5c291cdf38 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -515,8 +515,6 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner, num_pdos * sizeof(u32)); if (ret < 0) dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret); - if (ret == 0 && offset == 0) - dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n"); return ret; } diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index e83a7cd15c956..e15ef1a949e00 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -72,12 +72,11 @@ static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev, const char **extra_dbg) { #ifdef CONFIG_ACPI - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct device *dev = vdev->device; acpi_handle handle = ACPI_HANDLE(dev); acpi_status acpi_ret; - acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer); + acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL); if (ACPI_FAILURE(acpi_ret)) { if (extra_dbg) *extra_dbg = acpi_format_exception(acpi_ret); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index fbd438e9b9b03..ce50ca9a320c7 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -98,6 +98,12 @@ struct vfio_dma { unsigned long *bitmap; }; +struct vfio_batch { + struct page **pages; /* for pin_user_pages_remote */ + struct page *fallback_page; /* if pages alloc fails */ + int capacity; /* length of pages array */ +}; + struct vfio_group { struct iommu_group *iommu_group; struct list_head next; @@ -428,6 +434,31 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) + +static void vfio_batch_init(struct vfio_batch *batch) +{ + if (unlikely(disable_hugepages)) + goto fallback; + + batch->pages = (struct page **) __get_free_page(GFP_KERNEL); + if (!batch->pages) + goto fallback; + + batch->capacity = VFIO_BATCH_MAX_CAPACITY; + return; + +fallback: + batch->pages = &batch->fallback_page; + batch->capacity = 1; +} + +static void vfio_batch_fini(struct vfio_batch *batch) +{ + if (batch->capacity == VFIO_BATCH_MAX_CAPACITY) + free_page((unsigned long)batch->pages); +} + static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, unsigned long vaddr, unsigned long *pfn, bool write_fault) @@ -464,10 +495,14 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, return ret; } -static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, - int prot, unsigned long *pfn) +/* + * Returns the positive number of pfns successfully obtained or a negative + * error code. + */ +static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, + long npages, int prot, unsigned long *pfn, + struct page **pages) { - struct page *page[1]; struct vm_area_struct *vma; unsigned int flags = 0; int ret; @@ -476,11 +511,22 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, flags |= FOLL_WRITE; mmap_read_lock(mm); - ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, - page, NULL, NULL); - if (ret == 1) { - *pfn = page_to_pfn(page[0]); - ret = 0; + ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, + pages, NULL, NULL); + if (ret > 0) { + int i; + + /* + * The zero page is always resident, we don't need to pin it + * and it falls into our invalid/reserved test so we don't + * unpin in put_pfn(). Unpin all zero pages in the batch here. + */ + for (i = 0 ; i < ret; i++) { + if (unlikely(is_zero_pfn(page_to_pfn(pages[i])))) + unpin_user_page(pages[i]); + } + + *pfn = page_to_pfn(pages[0]); goto done; } @@ -494,8 +540,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, if (ret == -EAGAIN) goto retry; - if (!ret && !is_invalid_reserved_pfn(*pfn)) - ret = -EFAULT; + if (!ret) { + if (is_invalid_reserved_pfn(*pfn)) + ret = 1; + else + ret = -EFAULT; + } } done: mmap_read_unlock(mm); @@ -509,7 +559,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, */ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base, - unsigned long limit) + unsigned long limit, struct vfio_batch *batch) { unsigned long pfn = 0; long ret, pinned = 0, lock_acct = 0; @@ -520,8 +570,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, if (!current->mm) return -ENODEV; - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); - if (ret) + ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, pfn_base, + batch->pages); + if (ret < 0) return ret; pinned++; @@ -547,8 +598,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, /* Lock all the consecutive pages from pfn_base */ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); - if (ret) + ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, &pfn, + batch->pages); + if (ret < 0) break; if (pfn != *pfn_base + pinned || @@ -574,7 +626,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ret = vfio_lock_acct(dma, lock_acct, false); unpin_out: - if (ret) { + if (ret < 0) { if (!rsvd) { for (pfn = *pfn_base ; pinned ; pfn++, pinned--) put_pfn(pfn, dma->prot); @@ -610,6 +662,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, unsigned long *pfn_base, bool do_accounting) { + struct page *pages[1]; struct mm_struct *mm; int ret; @@ -617,8 +670,13 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, if (!mm) return -ENODEV; - ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); - if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { + ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); + if (ret != 1) + goto out; + + ret = 0; + + if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { ret = vfio_lock_acct(dma, 1, true); if (ret) { put_pfn(*pfn_base, dma->prot); @@ -630,6 +688,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, } } +out: mmput(mm); return ret; } @@ -1263,15 +1322,19 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, { dma_addr_t iova = dma->iova; unsigned long vaddr = dma->vaddr; + struct vfio_batch batch; size_t size = map_size; long npage; unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret = 0; + vfio_batch_init(&batch); + while (size) { /* Pin a contiguous chunk of memory */ npage = vfio_pin_pages_remote(dma, vaddr + dma->size, - size >> PAGE_SHIFT, &pfn, limit); + size >> PAGE_SHIFT, &pfn, limit, + &batch); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1291,6 +1354,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->size += npage << PAGE_SHIFT; } + vfio_batch_fini(&batch); dma->iommu_mapped = true; if (ret) @@ -1449,6 +1513,7 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { + struct vfio_batch batch; struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; @@ -1459,6 +1524,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + vfio_batch_init(&batch); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1506,7 +1573,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, npage = vfio_pin_pages_remote(dma, vaddr, n >> PAGE_SHIFT, - &pfn, limit); + &pfn, limit, + &batch); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1539,6 +1607,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, dma->iommu_mapped = true; } + vfio_batch_fini(&batch); return 0; unwind: @@ -1579,6 +1648,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, } } + vfio_batch_fini(&batch); return ret; } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 5beb20768b204..b9c8e40252142 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1517,6 +1517,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) nvq = &n->vqs[index]; mutex_lock(&vq->mutex); + if (fd == -1) + vhost_clear_msg(&n->dev); + /* Verify that ring has been setup correctly. */ if (!vhost_vq_access_ok(vq)) { r = -EFAULT; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index f41463ab4031d..1f9a1554ce5f4 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -669,7 +669,7 @@ void vhost_dev_stop(struct vhost_dev *dev) } EXPORT_SYMBOL_GPL(vhost_dev_stop); -static void vhost_clear_msg(struct vhost_dev *dev) +void vhost_clear_msg(struct vhost_dev *dev) { struct vhost_msg_node *node, *n; @@ -687,6 +687,7 @@ static void vhost_clear_msg(struct vhost_dev *dev) spin_unlock(&dev->iotlb_lock); } +EXPORT_SYMBOL_GPL(vhost_clear_msg); void vhost_dev_cleanup(struct vhost_dev *dev) { @@ -2041,7 +2042,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct vhost_dev *dev = vq->dev; struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; struct iovec *_iov; - u64 s = 0; + u64 s = 0, last = addr + len - 1; int ret = 0; while ((u64)len > s) { @@ -2051,7 +2052,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, break; } - map = vhost_iotlb_itree_first(umem, addr, addr + len - 1); + map = vhost_iotlb_itree_first(umem, addr, last); if (map == NULL || map->start > addr) { if (umem != dev->iotlb) { ret = -EFAULT; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index b063324c7669d..8f80d6b0d843e 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -183,6 +183,7 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp); bool vhost_vq_access_ok(struct vhost_virtqueue *vq); bool vhost_log_access_ok(struct vhost_dev *); +void vhost_clear_msg(struct vhost_dev *dev); int vhost_get_vq_desc(struct vhost_virtqueue *, struct iovec iov[], unsigned int iov_count, diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 5a0340c85dc6b..48f4ec2ba40a0 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -1077,7 +1077,7 @@ static int iotlb_translate(const struct vringh *vrh, struct vhost_iotlb_map *map; struct vhost_iotlb *iotlb = vrh->iotlb; int ret = 0; - u64 s = 0; + u64 s = 0, last = addr + len - 1; while (len > s) { u64 size, pa, pfn; @@ -1087,8 +1087,7 @@ static int iotlb_translate(const struct vringh *vrh, break; } - map = vhost_iotlb_itree_first(iotlb, addr, - addr + len - 1); + map = vhost_iotlb_itree_first(iotlb, addr, last); if (!map || map->start > addr) { ret = -EINVAL; break; diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 5d2d6ce7ff413..7bce5f982e58a 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -359,7 +359,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, return NULL; } - pkt->buf = kmalloc(pkt->len, GFP_KERNEL); + pkt->buf = kvmalloc(pkt->len, GFP_KERNEL); if (!pkt->buf) { kfree(pkt); return NULL; @@ -854,7 +854,14 @@ static int __init vhost_vsock_init(void) VSOCK_TRANSPORT_F_H2G); if (ret < 0) return ret; - return misc_register(&vhost_vsock_misc); + + ret = misc_register(&vhost_vsock_misc); + if (ret) { + vsock_core_unregister(&vhost_transport.transport); + return ret; + } + + return 0; }; static void __exit vhost_vsock_exit(void) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 5d32572880c73..6afb091f574df 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2232,7 +2232,6 @@ config FB_SSD1307 select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT select FB_DEFERRED_IO - select PWM select FB_BACKLIGHT help This driver implements support for the Solomon SSD1307 diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index 393894af26f84..2b00a9d554fc0 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) err_release_fb: framebuffer_release(p); err_disable: + pci_disable_device(dp); err_out: return rc; } diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 2618d3beef649..6d58c8a5cb446 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -609,7 +609,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, if (scr_readw(r) != vc->vc_video_erase_char) break; if (r != q && new_rows >= rows + logo_lines) { - save = kmalloc(array3_size(logo_lines, new_cols, 2), + save = kzalloc(array3_size(logo_lines, new_cols, 2), GFP_KERNEL); if (save) { int i = cols < new_cols ? cols : new_cols; @@ -2513,9 +2513,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres)) return -EINVAL; + if (font->width > 32 || font->height > 32) + return -EINVAL; + /* Make sure drawing engine can handle the font */ - if (!(info->pixmap.blit_x & (1 << (font->width - 1))) || - !(info->pixmap.blit_y & (1 << (font->height - 1)))) + if (!(info->pixmap.blit_x & BIT(font->width - 1)) || + !(info->pixmap.blit_y & BIT(font->height - 1))) return -EINVAL; /* Make sure driver can handle the font length */ diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 40baa79f8046e..f0a66a344d870 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -798,12 +798,18 @@ static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par, static int hvfb_on_panic(struct notifier_block *nb, unsigned long e, void *p) { + struct hv_device *hdev; struct hvfb_par *par; struct fb_info *info; par = container_of(nb, struct hvfb_par, hvfb_panic_nb); - par->synchronous_fb = true; info = par->info; + hdev = device_to_hv_device(info->device); + + if (hv_ringbuffer_spinlock_busy(hdev->channel)) + return NOTIFY_DONE; + + par->synchronous_fb = true; if (par->need_docopy) hvfb_docopy(par, 0, dio_fb_size); synthvid_update(info, 0, 0, INT_MAX, INT_MAX); diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c index ad5ced4ef972d..8fb4e01e1943f 100644 --- a/drivers/video/fbdev/i740fb.c +++ b/drivers/video/fbdev/i740fb.c @@ -662,6 +662,9 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var, static int i740fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { + if (!var->pixclock) + return -EINVAL; + switch (var->bits_per_pixel) { case 8: var->red.offset = var->green.offset = var->blue.offset = 0; diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index daaa99818d3b7..566f74832bc8a 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1377,8 +1377,8 @@ static struct video_board vbG200 = { .lowlevel = &matrox_G100 }; static struct video_board vbG200eW = { - .maxvram = 0x100000, - .maxdisplayable = 0x800000, + .maxvram = 0x1000000, + .maxdisplayable = 0x0800000, .accelID = FB_ACCEL_MATROX_MGAG200, .lowlevel = &matrox_G100 }; diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c index c12d46e283598..87b6a929a6b39 100644 --- a/drivers/video/fbdev/pm2fb.c +++ b/drivers/video/fbdev/pm2fb.c @@ -1529,8 +1529,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) } info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev); - if (!info) - return -ENOMEM; + if (!info) { + err = -ENOMEM; + goto err_exit_disable; + } default_par = info->par; switch (pdev->device) { @@ -1711,6 +1713,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); err_exit_neither: framebuffer_release(info); + err_exit_disable: + pci_disable_device(pdev); return retval; } @@ -1737,6 +1741,7 @@ static void pm2fb_remove(struct pci_dev *pdev) fb_dealloc_cmap(&info->cmap); kfree(info->pixmap.addr); framebuffer_release(info); + pci_disable_device(pdev); } static const struct pci_device_id pm2fb_id_table[] = { diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 9421d14d0eb02..9e9888e40c573 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -381,7 +381,7 @@ pxa3xx_gcu_write(struct file *file, const char *buff, struct pxa3xx_gcu_batch *buffer; struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file); - int words = count / 4; + size_t words = count / 4; /* Does not need to be atomic. There's a lock in user space, * but anyhow, this is just for statistics. */ diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index 28768c272b73d..b3295cd7fd4f9 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -97,7 +97,6 @@ struct ufx_data { struct kref kref; int fb_count; bool virtualized; /* true when physical usb device not present */ - struct delayed_work free_framebuffer_work; atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */ atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */ u8 *edid; /* null until we read edid from hw or get from sysfs */ @@ -137,6 +136,8 @@ static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len); static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size); static void ufx_free_urb_list(struct ufx_data *dev); +static DEFINE_MUTEX(disconnect_mutex); + /* reads a control register */ static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data) { @@ -1070,9 +1071,13 @@ static int ufx_ops_open(struct fb_info *info, int user) if (user == 0 && !console) return -EBUSY; + mutex_lock(&disconnect_mutex); + /* If the USB device is gone, we don't accept new opens */ - if (dev->virtualized) + if (dev->virtualized) { + mutex_unlock(&disconnect_mutex); return -ENODEV; + } dev->fb_count++; @@ -1096,6 +1101,8 @@ static int ufx_ops_open(struct fb_info *info, int user) pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d", info->node, user, info, dev->fb_count); + mutex_unlock(&disconnect_mutex); + return 0; } @@ -1108,15 +1115,24 @@ static void ufx_free(struct kref *kref) { struct ufx_data *dev = container_of(kref, struct ufx_data, kref); - /* this function will wait for all in-flight urbs to complete */ - if (dev->urbs.count > 0) - ufx_free_urb_list(dev); + kfree(dev); +} - pr_debug("freeing ufx_data %p", dev); +static void ufx_ops_destory(struct fb_info *info) +{ + struct ufx_data *dev = info->par; + int node = info->node; - kfree(dev); + /* Assume info structure is freed after this point */ + framebuffer_release(info); + + pr_debug("fb_info for /dev/fb%d has been freed", node); + + /* release reference taken by kref_init in probe() */ + kref_put(&dev->kref, ufx_free); } + static void ufx_release_urb_work(struct work_struct *work) { struct urb_node *unode = container_of(work, struct urb_node, @@ -1125,14 +1141,9 @@ static void ufx_release_urb_work(struct work_struct *work) up(&unode->dev->urbs.limit_sem); } -static void ufx_free_framebuffer_work(struct work_struct *work) +static void ufx_free_framebuffer(struct ufx_data *dev) { - struct ufx_data *dev = container_of(work, struct ufx_data, - free_framebuffer_work.work); struct fb_info *info = dev->info; - int node = info->node; - - unregister_framebuffer(info); if (info->cmap.len != 0) fb_dealloc_cmap(&info->cmap); @@ -1144,11 +1155,6 @@ static void ufx_free_framebuffer_work(struct work_struct *work) dev->info = NULL; - /* Assume info structure is freed after this point */ - framebuffer_release(info); - - pr_debug("fb_info for /dev/fb%d has been freed", node); - /* ref taken in probe() as part of registering framebfufer */ kref_put(&dev->kref, ufx_free); } @@ -1160,11 +1166,13 @@ static int ufx_ops_release(struct fb_info *info, int user) { struct ufx_data *dev = info->par; + mutex_lock(&disconnect_mutex); + dev->fb_count--; /* We can't free fb_info here - fbmem will touch it when we return */ if (dev->virtualized && (dev->fb_count == 0)) - schedule_delayed_work(&dev->free_framebuffer_work, HZ); + ufx_free_framebuffer(dev); if ((dev->fb_count == 0) && (info->fbdefio)) { fb_deferred_io_cleanup(info); @@ -1177,6 +1185,8 @@ static int ufx_ops_release(struct fb_info *info, int user) kref_put(&dev->kref, ufx_free); + mutex_unlock(&disconnect_mutex); + return 0; } @@ -1283,6 +1293,7 @@ static const struct fb_ops ufx_ops = { .fb_blank = ufx_ops_blank, .fb_check_var = ufx_ops_check_var, .fb_set_par = ufx_ops_set_par, + .fb_destroy = ufx_ops_destory, }; /* Assumes &info->lock held by caller @@ -1610,7 +1621,7 @@ static int ufx_usb_probe(struct usb_interface *interface, struct usb_device *usbdev; struct ufx_data *dev; struct fb_info *info; - int retval; + int retval = -ENOMEM; u32 id_rev, fpga_rev; /* usb initialization */ @@ -1642,15 +1653,17 @@ static int ufx_usb_probe(struct usb_interface *interface, if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { dev_err(dev->gdev, "ufx_alloc_urb_list failed\n"); - goto e_nomem; + goto put_ref; } /* We don't register a new USB class. Our client interface is fbdev */ /* allocates framebuffer driver structure, not framebuffer memory */ info = framebuffer_alloc(0, &usbdev->dev); - if (!info) - goto e_nomem; + if (!info) { + dev_err(dev->gdev, "framebuffer_alloc failed\n"); + goto free_urb_list; + } dev->info = info; info->par = dev; @@ -1664,9 +1677,6 @@ static int ufx_usb_probe(struct usb_interface *interface, goto destroy_modedb; } - INIT_DELAYED_WORK(&dev->free_framebuffer_work, - ufx_free_framebuffer_work); - retval = ufx_reg_read(dev, 0x3000, &id_rev); check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval); dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev); @@ -1696,22 +1706,34 @@ static int ufx_usb_probe(struct usb_interface *interface, check_warn_goto_error(retval, "unable to find common mode for display and adapter"); retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001); - check_warn_goto_error(retval, "error %d enabling graphics engine", retval); + if (retval < 0) { + dev_err(dev->gdev, "error %d enabling graphics engine", retval); + goto setup_modes; + } /* ready to begin using device */ atomic_set(&dev->usb_active, 1); dev_dbg(dev->gdev, "checking var"); retval = ufx_ops_check_var(&info->var, info); - check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval); + if (retval < 0) { + dev_err(dev->gdev, "error %d ufx_ops_check_var", retval); + goto reset_active; + } dev_dbg(dev->gdev, "setting par"); retval = ufx_ops_set_par(info); - check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval); + if (retval < 0) { + dev_err(dev->gdev, "error %d ufx_ops_set_par", retval); + goto reset_active; + } dev_dbg(dev->gdev, "registering framebuffer"); retval = register_framebuffer(info); - check_warn_goto_error(retval, "error %d register_framebuffer", retval); + if (retval < 0) { + dev_err(dev->gdev, "error %d register_framebuffer", retval); + goto reset_active; + } dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution." " Using %dK framebuffer memory\n", info->node, @@ -1719,28 +1741,34 @@ static int ufx_usb_probe(struct usb_interface *interface, return 0; -error: - fb_dealloc_cmap(&info->cmap); -destroy_modedb: +reset_active: + atomic_set(&dev->usb_active, 0); +setup_modes: fb_destroy_modedb(info->monspecs.modedb); vfree(info->screen_base); fb_destroy_modelist(&info->modelist); +error: + fb_dealloc_cmap(&info->cmap); +destroy_modedb: framebuffer_release(info); +free_urb_list: + if (dev->urbs.count > 0) + ufx_free_urb_list(dev); put_ref: kref_put(&dev->kref, ufx_free); /* ref for framebuffer */ kref_put(&dev->kref, ufx_free); /* last ref from kref_init */ return retval; - -e_nomem: - retval = -ENOMEM; - goto put_ref; } static void ufx_usb_disconnect(struct usb_interface *interface) { struct ufx_data *dev; + struct fb_info *info; + + mutex_lock(&disconnect_mutex); dev = usb_get_intfdata(interface); + info = dev->info; pr_debug("USB disconnect starting\n"); @@ -1754,12 +1782,17 @@ static void ufx_usb_disconnect(struct usb_interface *interface) /* if clients still have us open, will be freed on last close */ if (dev->fb_count == 0) - schedule_delayed_work(&dev->free_framebuffer_work, 0); + ufx_free_framebuffer(dev); - /* release reference taken by kref_init in probe() */ - kref_put(&dev->kref, ufx_free); + /* this function will wait for all in-flight urbs to complete */ + if (dev->urbs.count > 0) + ufx_free_urb_list(dev); + + pr_debug("freeing ufx_data %p", dev); + + unregister_framebuffer(info); - /* consider ufx_data freed */ + mutex_unlock(&disconnect_mutex); } static struct usb_driver ufx_driver = { diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 002f265d8db58..3feb6e40d56d8 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1041,6 +1041,48 @@ stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area) SETUP_FB(fb); } +#define ARTIST_VRAM_SIZE 0x000804 +#define ARTIST_VRAM_SRC 0x000808 +#define ARTIST_VRAM_SIZE_TRIGGER_WINFILL 0x000a04 +#define ARTIST_VRAM_DEST_TRIGGER_BLOCKMOVE 0x000b00 +#define ARTIST_SRC_BM_ACCESS 0x018008 +#define ARTIST_FGCOLOR 0x018010 +#define ARTIST_BGCOLOR 0x018014 +#define ARTIST_BITMAP_OP 0x01801c + +static void +stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct stifb_info *fb = container_of(info, struct stifb_info, info); + + if (rect->rop != ROP_COPY || + (fb->id == S9000_ID_HCRX && fb->info.var.bits_per_pixel == 32)) + return cfb_fillrect(info, rect); + + SETUP_HW(fb); + + if (fb->info.var.bits_per_pixel == 32) { + WRITE_WORD(0xBBA0A000, fb, REG_10); + + NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff); + } else { + WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10); + + NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff); + } + + WRITE_WORD(0x03000300, fb, ARTIST_BITMAP_OP); + WRITE_WORD(0x2ea01000, fb, ARTIST_SRC_BM_ACCESS); + NGLE_QUICK_SET_DST_BM_ACCESS(fb, 0x2ea01000); + NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, rect->color); + WRITE_WORD(0, fb, ARTIST_BGCOLOR); + + NGLE_SET_DSTXY(fb, (rect->dx << 16) | (rect->dy)); + SET_LENXY_START_RECFILL(fb, (rect->width << 16) | (rect->height)); + + SETUP_FB(fb); +} + static void __init stifb_init_display(struct stifb_info *fb) { @@ -1105,7 +1147,7 @@ static const struct fb_ops stifb_ops = { .owner = THIS_MODULE, .fb_setcolreg = stifb_setcolreg, .fb_blank = stifb_blank, - .fb_fillrect = cfb_fillrect, + .fb_fillrect = stifb_fillrect, .fb_copyarea = stifb_copyarea, .fb_imageblit = cfb_imageblit, }; @@ -1257,7 +1299,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) /* limit fbsize to max visible screen size */ if (fix->smem_len > yres*fix->line_length) - fix->smem_len = yres*fix->line_length; + fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024); fix->accel = FB_ACCEL_NONE; @@ -1297,7 +1339,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) goto out_err0; } info->screen_size = fix->smem_len; - info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA; + info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; info->pseudo_palette = &fb->pseudo_palette; /* This has to be done !!! */ diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index def14ac0ebe14..661f12742e4f0 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -1756,6 +1756,7 @@ static int uvesafb_probe(struct platform_device *dev) out_unmap: iounmap(info->screen_base); out_mem: + arch_phys_wc_del(par->mtrr_handle); release_mem_region(info->fix.smem_start, info->fix.smem_len); out_reg: release_region(0x3c0, 32); diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index ff61605b8764f..a543643ce014d 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c @@ -277,8 +277,10 @@ static int vmlfb_get_gpu(struct vml_par *par) mutex_unlock(&vml_mutex); - if (pci_enable_device(par->gpu) < 0) + if (pci_enable_device(par->gpu) < 0) { + pci_dev_put(par->gpu); return -ENODEV; + } return 0; } diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c index 89d75079b7307..0363b478fa3ef 100644 --- a/drivers/video/fbdev/via/via-core.c +++ b/drivers/video/fbdev/via/via-core.c @@ -725,7 +725,14 @@ static int __init via_core_init(void) return ret; viafb_i2c_init(); viafb_gpio_init(); - return pci_register_driver(&via_driver); + ret = pci_register_driver(&via_driver); + if (ret) { + viafb_gpio_exit(); + viafb_i2c_exit(); + return ret; + } + + return 0; } static void __exit via_core_exit(void) diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c index 6a1bc284f297c..eae78366eb028 100644 --- a/drivers/vme/bridges/vme_fake.c +++ b/drivers/vme/bridges/vme_fake.c @@ -1073,6 +1073,8 @@ static int __init fake_init(void) /* We need a fake parent device */ vme_root = __root_device_register("vme", THIS_MODULE); + if (IS_ERR(vme_root)) + return PTR_ERR(vme_root); /* If we want to support more than one bridge at some point, we need to * dynamically allocate this so we get one per device. diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 50ae26977a027..5ccda1a363ecc 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c @@ -1771,6 +1771,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, return 0; err_dma: + list_del(&entry->list); err_dest: err_source: err_align: diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 15a2ee32f116e..15842377c8d2c 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -1131,6 +1131,8 @@ int w1_process(void *data) /* remainder if it woke up early */ unsigned long jremain = 0; + atomic_inc(&dev->refcnt); + for (;;) { if (!jremain && dev->search_count) { @@ -1158,8 +1160,10 @@ int w1_process(void *data) */ mutex_unlock(&dev->list_mutex); - if (kthread_should_stop()) + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); break; + } /* Only sleep when the search is active. */ if (dev->search_count) { diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index b3e1792d9c49f..3a71c5eb2f837 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c @@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl, dev->search_count = w1_search_count; dev->enable_pullup = w1_enable_pullup; - /* 1 for w1_process to decrement - * 1 for __w1_remove_master_device to decrement + /* For __w1_remove_master_device to decrement */ - atomic_set(&dev->refcnt, 2); + atomic_set(&dev->refcnt, 1); INIT_LIST_HEAD(&dev->slist); INIT_LIST_HEAD(&dev->async_list); diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c index aafc8d98bf9fd..370f648cb4b1a 100644 --- a/drivers/watchdog/diag288_wdt.c +++ b/drivers/watchdog/diag288_wdt.c @@ -86,7 +86,7 @@ static int __diag288(unsigned int func, unsigned int timeout, "1:\n" EX_TABLE(0b, 1b) : "+d" (err) : "d"(__func), "d"(__timeout), - "d"(__action), "d"(__len) : "1", "cc"); + "d"(__action), "d"(__len) : "1", "cc", "memory"); return err; } @@ -272,12 +272,21 @@ static int __init diag288_init(void) char ebc_begin[] = { 194, 197, 199, 201, 213 }; + char *ebc_cmd; watchdog_set_nowayout(&wdt_dev, nowayout_info); if (MACHINE_IS_VM) { - if (__diag288_vm(WDT_FUNC_INIT, 15, - ebc_begin, sizeof(ebc_begin)) != 0) { + ebc_cmd = kmalloc(sizeof(ebc_begin), GFP_KERNEL); + if (!ebc_cmd) { + pr_err("The watchdog cannot be initialized\n"); + return -ENOMEM; + } + memcpy(ebc_cmd, ebc_begin, sizeof(ebc_begin)); + ret = __diag288_vm(WDT_FUNC_INIT, 15, + ebc_cmd, sizeof(ebc_begin)); + kfree(ebc_cmd); + if (ret != 0) { pr_err("The watchdog cannot be initialized\n"); return -EINVAL; } diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h index 40ef379c28ab0..9c286b2a19001 100644 --- a/drivers/xen/gntdev-common.h +++ b/drivers/xen/gntdev-common.h @@ -44,9 +44,10 @@ struct gntdev_unmap_notify { }; struct gntdev_grant_map { + atomic_t in_use; struct mmu_interval_notifier notifier; + bool notifier_init; struct list_head next; - struct vm_area_struct *vma; int index; int count; int flags; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 54fee4087bf10..16acddaff9aea 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -276,6 +276,9 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) */ } + if (use_ptemod && map->notifier_init) + mmu_interval_notifier_remove(&map->notifier); + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { notify_remote_via_evtchn(map->notify.event); evtchn_put(map->notify.event); @@ -288,21 +291,14 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) { struct gntdev_grant_map *map = data; - unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; - int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; + unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT; + int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte | + (1 << _GNTMAP_guest_avail0); u64 pte_maddr; BUG_ON(pgnr >= map->count); pte_maddr = arbitrary_virt_to_machine(pte).maddr; - /* - * Set the PTE as special to force get_user_pages_fast() fall - * back to the slow path. If this is not supported as part of - * the grant map, it will be done afterwards. - */ - if (xen_feature(XENFEAT_gnttab_map_avail_bits)) - flags |= (1 << _GNTMAP_guest_avail0); - gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, map->grants[pgnr].ref, map->grants[pgnr].domid); @@ -311,14 +307,6 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) return 0; } -#ifdef CONFIG_X86 -static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data) -{ - set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte)); - return 0; -} -#endif - int gntdev_map_grant_pages(struct gntdev_grant_map *map) { size_t alloced = 0; @@ -372,8 +360,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) for (i = 0; i < map->count; i++) { if (map->map_ops[i].status == GNTST_okay) { map->unmap_ops[i].handle = map->map_ops[i].handle; - if (!use_ptemod) - alloced++; + alloced++; } else if (!err) err = -EINVAL; @@ -382,8 +369,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) if (use_ptemod) { if (map->kmap_ops[i].status == GNTST_okay) { - if (map->map_ops[i].status == GNTST_okay) - alloced++; + alloced++; map->kunmap_ops[i].handle = map->kmap_ops[i].handle; } else if (!err) err = -EINVAL; @@ -399,20 +385,42 @@ static void __unmap_grant_pages_done(int result, unsigned int i; struct gntdev_grant_map *map = data->data; unsigned int offset = data->unmap_ops - map->unmap_ops; + int successful_unmaps = 0; + int live_grants; for (i = 0; i < data->count; i++) { + if (map->unmap_ops[offset + i].status == GNTST_okay && + map->unmap_ops[offset + i].handle != -1) + successful_unmaps++; + WARN_ON(map->unmap_ops[offset+i].status && map->unmap_ops[offset+i].handle != -1); pr_debug("unmap handle=%d st=%d\n", map->unmap_ops[offset+i].handle, map->unmap_ops[offset+i].status); map->unmap_ops[offset+i].handle = -1; + if (use_ptemod) { + if (map->kunmap_ops[offset + i].status == GNTST_okay && + map->kunmap_ops[offset + i].handle != -1) + successful_unmaps++; + + WARN_ON(map->kunmap_ops[offset+i].status && + map->kunmap_ops[offset+i].handle != -1); + pr_debug("kunmap handle=%u st=%d\n", + map->kunmap_ops[offset+i].handle, + map->kunmap_ops[offset+i].status); + map->kunmap_ops[offset+i].handle = -1; + } } + /* * Decrease the live-grant counter. This must happen after the loop to * prevent premature reuse of the grants by gnttab_mmap(). */ - atomic_sub(data->count, &map->live_grants); + live_grants = atomic_sub_return(successful_unmaps, &map->live_grants); + if (WARN_ON(live_grants < 0)) + pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n", + __func__, live_grants, successful_unmaps); /* Release reference taken by __unmap_grant_pages */ gntdev_put_map(NULL, map); @@ -493,11 +501,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma) struct gntdev_priv *priv = file->private_data; pr_debug("gntdev_vma_close %p\n", vma); - if (use_ptemod) { - WARN_ON(map->vma != vma); - mmu_interval_notifier_remove(&map->notifier); - map->vma = NULL; - } + vma->vm_private_data = NULL; gntdev_put_map(priv, map); } @@ -525,29 +529,30 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, struct gntdev_grant_map *map = container_of(mn, struct gntdev_grant_map, notifier); unsigned long mstart, mend; + unsigned long map_start, map_end; if (!mmu_notifier_range_blockable(range)) return false; + map_start = map->pages_vm_start; + map_end = map->pages_vm_start + (map->count << PAGE_SHIFT); + /* * If the VMA is split or otherwise changed the notifier is not * updated, but we don't want to process VA's outside the modified * VMA. FIXME: It would be much more understandable to just prevent * modifying the VMA in the first place. */ - if (map->vma->vm_start >= range->end || - map->vma->vm_end <= range->start) + if (map_start >= range->end || map_end <= range->start) return true; - mstart = max(range->start, map->vma->vm_start); - mend = min(range->end, map->vma->vm_end); + mstart = max(range->start, map_start); + mend = min(range->end, map_end); pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", - map->index, map->count, - map->vma->vm_start, map->vma->vm_end, - range->start, range->end, mstart, mend); - unmap_grant_pages(map, - (mstart - map->vma->vm_start) >> PAGE_SHIFT, - (mend - mstart) >> PAGE_SHIFT); + map->index, map->count, map_start, map_end, + range->start, range->end, mstart, mend); + unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT, + (mend - mstart) >> PAGE_SHIFT); return true; } @@ -1027,18 +1032,15 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) return -EINVAL; pr_debug("map %d+%d at %lx (pgoff %lx)\n", - index, count, vma->vm_start, vma->vm_pgoff); + index, count, vma->vm_start, vma->vm_pgoff); mutex_lock(&priv->lock); map = gntdev_find_map_index(priv, index, count); if (!map) goto unlock_out; - if (use_ptemod && map->vma) - goto unlock_out; - if (atomic_read(&map->live_grants)) { - err = -EAGAIN; + if (!atomic_add_unless(&map->in_use, 1, 1)) goto unlock_out; - } + refcount_inc(&map->users); vma->vm_ops = &gntdev_vmops; @@ -1059,15 +1061,16 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) map->flags |= GNTMAP_readonly; } + map->pages_vm_start = vma->vm_start; + if (use_ptemod) { - map->vma = vma; err = mmu_interval_notifier_insert_locked( &map->notifier, vma->vm_mm, vma->vm_start, vma->vm_end - vma->vm_start, &gntdev_mmu_ops); - if (err) { - map->vma = NULL; + if (err) goto out_unlock_put; - } + + map->notifier_init = true; } mutex_unlock(&priv->lock); @@ -1084,7 +1087,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) */ mmu_interval_read_begin(&map->notifier); - map->pages_vm_start = vma->vm_start; err = apply_to_page_range(vma->vm_mm, vma->vm_start, vma->vm_end - vma->vm_start, find_grant_ptes, map); @@ -1102,23 +1104,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) err = vm_map_pages_zero(vma, map->pages, map->count); if (err) goto out_put_map; - } else { -#ifdef CONFIG_X86 - /* - * If the PTEs were not made special by the grant map - * hypercall, do so here. - * - * This is racy since the mapping is already visible - * to userspace but userspace should be well-behaved - * enough to not touch it until the mmap() call - * returns. - */ - if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) { - apply_to_page_range(vma->vm_mm, vma->vm_start, - vma->vm_end - vma->vm_start, - set_grant_ptes_as_special, NULL); - } -#endif } return 0; @@ -1130,13 +1115,8 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) out_unlock_put: mutex_unlock(&priv->lock); out_put_map: - if (use_ptemod) { + if (use_ptemod) unmap_grant_pages(map, 0, map->count); - if (map->vma) { - mmu_interval_notifier_remove(&map->notifier); - map->vma = NULL; - } - } gntdev_put_map(priv, map); return err; } diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index cdc6daa7a9f66..9cf7085a260b4 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c @@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu) err = device_register(dev); if (err) { - pcpu_release(dev); + put_device(dev); return err; } diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 9db557b76511b..804d8f4d0e73d 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -137,7 +137,7 @@ static int platform_pci_probe(struct pci_dev *pdev, if (ret) { dev_warn(&pdev->dev, "Unable to set the evtchn callback " "err=%d\n", ret); - goto out; + goto irq_out; } } @@ -145,13 +145,16 @@ static int platform_pci_probe(struct pci_dev *pdev, grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_setup_auto_xlat_frames(grant_frames); if (ret) - goto out; + goto irq_out; ret = gnttab_init(); if (ret) goto grant_out; return 0; grant_out: gnttab_free_auto_xlat_frames(); +irq_out: + if (!xen_have_vector_callback) + free_irq(pdev->irq, pdev); out: pci_release_region(pdev, 0); mem_out: diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index cd5f2f09468e2..28537a1a0e0b9 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -760,7 +760,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, goto out; } - pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL); + pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); if (!pfns) { rc = -ENOMEM; goto out; diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index a7d293fa8d140..3b5a8e2c4d475 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -129,13 +129,13 @@ static bool pvcalls_conn_back_read(void *opaque) if (masked_prod < masked_cons) { vec[0].iov_base = data->in + masked_prod; vec[0].iov_len = wanted; - iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted); + iov_iter_kvec(&msg.msg_iter, READ, vec, 1, wanted); } else { vec[0].iov_base = data->in + masked_prod; vec[0].iov_len = array_size - masked_prod; vec[1].iov_base = data->in; vec[1].iov_len = wanted - vec[0].iov_len; - iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted); + iov_iter_kvec(&msg.msg_iter, READ, vec, 2, wanted); } atomic_set(&map->read, 0); @@ -188,13 +188,13 @@ static bool pvcalls_conn_back_write(struct sock_mapping *map) if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) { vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); vec[0].iov_len = size; - iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size); + iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, size); } else { vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); vec[0].iov_len = array_size - pvcalls_mask(cons, array_size); vec[1].iov_base = data->out; vec[1].iov_len = size - vec[0].iov_len; - iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size); + iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, size); } atomic_set(&map->write, 0); diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 5e53b4817f167..097316a741268 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -190,13 +190,16 @@ static const struct config_field caplist_pm[] = { }; static struct msi_msix_field_config { - u16 enable_bit; /* bit for enabling MSI/MSI-X */ - unsigned int int_type; /* interrupt type for exclusiveness check */ + u16 enable_bit; /* bit for enabling MSI/MSI-X */ + u16 allowed_bits; /* bits allowed to be changed */ + unsigned int int_type; /* interrupt type for exclusiveness check */ } msi_field_config = { .enable_bit = PCI_MSI_FLAGS_ENABLE, + .allowed_bits = PCI_MSI_FLAGS_ENABLE, .int_type = INTERRUPT_TYPE_MSI, }, msix_field_config = { .enable_bit = PCI_MSIX_FLAGS_ENABLE, + .allowed_bits = PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL, .int_type = INTERRUPT_TYPE_MSIX, }; @@ -229,7 +232,7 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value, return 0; if (!dev_data->allow_interrupt_control || - (new_value ^ old_value) & ~field_config->enable_bit) + (new_value ^ old_value) & ~field_config->allowed_bits) return PCIBIOS_SET_FAILED; if (new_value & field_config->enable_bit) { diff --git a/fs/Makefile b/fs/Makefile index 999d1a23f036c..c660ce28f1498 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -32,8 +32,6 @@ obj-$(CONFIG_TIMERFD) += timerfd.o obj-$(CONFIG_EVENTFD) += eventfd.o obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_AIO) += aio.o -obj-$(CONFIG_IO_URING) += io_uring.o -obj-$(CONFIG_IO_WQ) += io-wq.o obj-$(CONFIG_FS_DAX) += dax.o obj-$(CONFIG_FS_ENCRYPTION) += crypto/ obj-$(CONFIG_FS_VERITY) += verity/ diff --git a/fs/affs/file.c b/fs/affs/file.c index d91b0133d95da..c3d89fa1bab77 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -879,7 +879,7 @@ affs_truncate(struct inode *inode) if (inode->i_size > AFFS_I(inode)->mmu_private) { struct address_space *mapping = inode->i_mapping; struct page *page; - void *fsdata; + void *fsdata = NULL; loff_t isize = inode->i_size; int res; diff --git a/fs/afs/flock.c b/fs/afs/flock.c index cb3054c7843ea..466ad609f2057 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call) if (call->error == 0) { spin_lock(&vnode->lock); trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0); - vnode->locked_at = call->reply_time; + vnode->locked_at = call->issue_time; afs_schedule_lock_extension(vnode); spin_unlock(&vnode->lock); } diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index e7e98ad63a91a..def80365fe79d 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -161,8 +161,8 @@ void afs_fileserver_probe_result(struct afs_call *call) } } - if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && - rtt_us < server->probe.rtt) { + rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us); + if (rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; server->rtt = rtt_us; alist->preferred = index; @@ -360,12 +360,15 @@ void afs_fs_probe_dispatcher(struct work_struct *work) unsigned long nowj, timer_at, poll_at; bool first_pass = true, set_timer = false; - if (!net->live) + if (!net->live) { + afs_dec_servers_outstanding(net); return; + } _enter(""); if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) { + afs_dec_servers_outstanding(net); _leave(" [none]"); return; } diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 1d95ed9dd86e6..0048a32cb040e 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -130,7 +130,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp, static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) { - return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry; + return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; } static void xdr_decode_AFSCallBack(const __be32 **_bp, diff --git a/fs/afs/internal.h b/fs/afs/internal.h index dc08a3d9b3a8b..637cbe549397c 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -135,7 +135,6 @@ struct afs_call { bool need_attention; /* T if RxRPC poked us */ bool async; /* T if asynchronous */ bool upgrade; /* T to request service upgrade */ - bool have_reply_time; /* T if have got reply_time */ bool intr; /* T if interruptible */ bool unmarshalling_error; /* T if an unmarshalling error occurred */ u16 service_id; /* Actual service ID (after upgrade) */ @@ -149,7 +148,7 @@ struct afs_call { } __attribute__((packed)); __be64 tmp64; }; - ktime_t reply_time; /* Time of first reply packet */ + ktime_t issue_time; /* Time of issue of operation */ }; struct afs_call_type { diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 1d1a8debe4723..f1dc2162900a4 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -69,6 +69,7 @@ int afs_abort_to_error(u32 abort_code) /* Unified AFS error table */ case UAEPERM: return -EPERM; case UAENOENT: return -ENOENT; + case UAEAGAIN: return -EAGAIN; case UAEACCES: return -EACCES; case UAEBUSY: return -EBUSY; case UAEEXIST: return -EEXIST; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index efe0fb3ad8bdc..535d28b44bca3 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -429,6 +429,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) if (call->max_lifespan) rxrpc_kernel_set_max_life(call->net->socket, rxcall, call->max_lifespan); + call->issue_time = ktime_get_real(); /* send the request */ iov[0].iov_base = call->request; @@ -533,12 +534,6 @@ static void afs_deliver_to_call(struct afs_call *call) return; } - if (!call->have_reply_time && - rxrpc_kernel_get_reply_time(call->net->socket, - call->rxcall, - &call->reply_time)) - call->have_reply_time = true; - ret = call->type->deliver(call); state = READ_ONCE(call->state); if (ret == 0 && call->unmarshalling_error) diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index bd787e71a657f..5b2ef5ffd716f 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -239,8 +239,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp, struct afs_callback *cb = &scb->callback; ktime_t cb_expiry; - cb_expiry = call->reply_time; - cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100); + cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC); scb->have_cb = true; *_bp += xdr_size(x); diff --git a/fs/aio.c b/fs/aio.c index 2a9dfa58ec3ab..5934ea84b4993 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -335,6 +335,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma) spin_lock(&mm->ioctx_lock); rcu_read_lock(); table = rcu_dereference(mm->ioctx_table); + if (!table) + goto out_unlock; + for (i = 0; i < table->nr; i++) { struct kioctx *ctx; @@ -348,6 +351,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma) } } +out_unlock: rcu_read_unlock(); spin_unlock(&mm->ioctx_lock); return res; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 213864bc7e8c0..ccc4c6d8a578f 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -907,7 +907,7 @@ static int load_elf_binary(struct linux_binprm *bprm) interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL); if (!interp_elf_ex) { retval = -ENOMEM; - goto out_free_ph; + goto out_free_file; } /* Get the exec headers */ @@ -1328,6 +1328,7 @@ static int load_elf_binary(struct linux_binprm *bprm) out_free_dentry: kfree(interp_elf_ex); kfree(interp_elf_phdata); +out_free_file: allow_write_access(interpreter); if (interpreter) fput(interpreter); diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 5764295a3f0ff..8c3b7cc8e2a16 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -434,8 +434,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) current->mm->start_stack = current->mm->start_brk + stack_size; #endif - if (create_elf_fdpic_tables(bprm, current->mm, - &exec_params, &interp_params) < 0) + retval = create_elf_fdpic_tables(bprm, current->mm, &exec_params, + &interp_params); + if (retval < 0) goto error; kdebug("- start_code %lx", current->mm->start_code); diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 11b5bf2419555..ce0047feea729 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -44,10 +44,10 @@ static LIST_HEAD(entries); static int enabled = 1; enum {Enabled, Magic}; -#define MISC_FMT_PRESERVE_ARGV0 (1 << 31) -#define MISC_FMT_OPEN_BINARY (1 << 30) -#define MISC_FMT_CREDENTIALS (1 << 29) -#define MISC_FMT_OPEN_FILE (1 << 28) +#define MISC_FMT_PRESERVE_ARGV0 (1UL << 31) +#define MISC_FMT_OPEN_BINARY (1UL << 30) +#define MISC_FMT_CREDENTIALS (1UL << 29) +#define MISC_FMT_OPEN_FILE (1UL << 28) typedef struct { struct list_head list; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index baff31a147e7d..d2f77b1242a13 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -137,6 +137,7 @@ struct share_check { u64 root_objectid; u64 inum; int share_count; + bool have_delayed_delete_refs; }; static inline int extent_is_shared(struct share_check *sc) @@ -287,8 +288,10 @@ static void prelim_release(struct preftree *preftree) struct prelim_ref *ref, *next_ref; rbtree_postorder_for_each_entry_safe(ref, next_ref, - &preftree->root.rb_root, rbnode) + &preftree->root.rb_root, rbnode) { + free_inode_elem_list(ref->inode_list); free_pref(ref); + } preftree->root = RB_ROOT_CACHED; preftree->count = 0; @@ -429,6 +432,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, u64 wanted_disk_byte = ref->wanted_disk_byte; u64 count = 0; u64 data_offset; + u8 type; if (level != 0) { eb = path->nodes[level]; @@ -483,6 +487,9 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, continue; } fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); + type = btrfs_file_extent_type(eb, fi); + if (type == BTRFS_FILE_EXTENT_INLINE) + goto next; disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); data_offset = btrfs_file_extent_offset(eb, fi); @@ -646,6 +653,18 @@ unode_aux_to_inode_list(struct ulist_node *node) return (struct extent_inode_elem *)(uintptr_t)node->aux; } +static void free_leaf_list(struct ulist *ulist) +{ + struct ulist_node *node; + struct ulist_iterator uiter; + + ULIST_ITER_INIT(&uiter); + while ((node = ulist_next(ulist, &uiter))) + free_inode_elem_list(unode_aux_to_inode_list(node)); + + ulist_free(ulist); +} + /* * We maintain three separate rbtrees: one for direct refs, one for * indirect refs which have a key, and one for indirect refs which do not @@ -760,7 +779,11 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, cond_resched(); } out: - ulist_free(parents); + /* + * We may have inode lists attached to refs in the parents ulist, so we + * must free them before freeing the ulist and its refs. + */ + free_leaf_list(parents); return ret; } @@ -817,16 +840,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, struct preftrees *preftrees, struct share_check *sc) { struct btrfs_delayed_ref_node *node; - struct btrfs_delayed_extent_op *extent_op = head->extent_op; struct btrfs_key key; - struct btrfs_key tmp_op_key; struct rb_node *n; int count; int ret = 0; - if (extent_op && extent_op->update_key) - btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); - spin_lock(&head->lock); for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { node = rb_entry(n, struct btrfs_delayed_ref_node, @@ -852,10 +870,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, case BTRFS_TREE_BLOCK_REF_KEY: { /* NORMAL INDIRECT METADATA backref */ struct btrfs_delayed_tree_ref *ref; + struct btrfs_key *key_ptr = NULL; + + if (head->extent_op && head->extent_op->update_key) { + btrfs_disk_key_to_cpu(&key, &head->extent_op->key); + key_ptr = &key; + } ref = btrfs_delayed_node_to_tree_ref(node); ret = add_indirect_ref(fs_info, preftrees, ref->root, - &tmp_op_key, ref->level + 1, + key_ptr, ref->level + 1, node->bytenr, count, sc, GFP_ATOMIC); break; @@ -881,13 +905,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, key.offset = ref->offset; /* - * Found a inum that doesn't match our known inum, we - * know it's shared. + * If we have a share check context and a reference for + * another inode, we can't exit immediately. This is + * because even if this is a BTRFS_ADD_DELAYED_REF + * reference we may find next a BTRFS_DROP_DELAYED_REF + * which cancels out this ADD reference. + * + * If this is a DROP reference and there was no previous + * ADD reference, then we need to signal that when we + * process references from the extent tree (through + * add_inline_refs() and add_keyed_refs()), we should + * not exit early if we find a reference for another + * inode, because one of the delayed DROP references + * may cancel that reference in the extent tree. */ - if (sc && sc->inum && ref->objectid != sc->inum) { - ret = BACKREF_FOUND_SHARED; - goto out; - } + if (sc && count < 0) + sc->have_delayed_delete_refs = true; ret = add_indirect_ref(fs_info, preftrees, ref->root, &key, 0, node->bytenr, count, sc, @@ -917,7 +950,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, } if (!ret) ret = extent_is_shared(sc); -out: + spin_unlock(&head->lock); return ret; } @@ -1020,7 +1053,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); - if (sc && sc->inum && key.objectid != sc->inum) { + if (sc && sc->inum && key.objectid != sc->inum && + !sc->have_delayed_delete_refs) { ret = BACKREF_FOUND_SHARED; break; } @@ -1030,6 +1064,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, ret = add_indirect_ref(fs_info, preftrees, root, &key, 0, bytenr, count, sc, GFP_NOFS); + break; } default: @@ -1119,7 +1154,8 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); - if (sc && sc->inum && key.objectid != sc->inum) { + if (sc && sc->inum && key.objectid != sc->inum && + !sc->have_delayed_delete_refs) { ret = BACKREF_FOUND_SHARED; break; } @@ -1358,6 +1394,12 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, if (ret < 0) goto out; ref->inode_list = eie; + /* + * We transferred the list ownership to the ref, + * so set to NULL to avoid a double free in case + * an error happens after this. + */ + eie = NULL; } ret = ulist_add_merge_ptr(refs, ref->parent, ref->inode_list, @@ -1383,6 +1425,14 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, eie->next = ref->inode_list; } eie = NULL; + /* + * We have transferred the inode list ownership from + * this ref to the ref we added to the 'refs' ulist. + * So set this ref's inode list to NULL to avoid + * use-after-free when our caller uses it or double + * frees in case an error happens before we return. + */ + ref->inode_list = NULL; } cond_resched(); } @@ -1399,24 +1449,6 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, return ret; } -static void free_leaf_list(struct ulist *blocks) -{ - struct ulist_node *node = NULL; - struct extent_inode_elem *eie; - struct ulist_iterator uiter; - - ULIST_ITER_INIT(&uiter); - while ((node = ulist_next(blocks, &uiter))) { - if (!node->aux) - continue; - eie = unode_aux_to_inode_list(node); - free_inode_elem_list(eie); - node->aux = 0; - } - - ulist_free(blocks); -} - /* * Finds all leafs with a reference to the specified combination of bytenr and * offset. key_list_head will point to a list of corresponding keys (caller must @@ -1542,6 +1574,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, .root_objectid = root->root_key.objectid, .inum = inum, .share_count = 0, + .have_delayed_delete_refs = false, }; ulist_init(roots); @@ -1576,6 +1609,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, break; bytenr = node->val; shared.share_count = 0; + shared.have_delayed_delete_refs = false; cond_resched(); } @@ -2030,10 +2064,29 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, return ret; } +static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) +{ + struct btrfs_data_container *inodes = ctx; + const size_t c = 3 * sizeof(u64); + + if (inodes->bytes_left >= c) { + inodes->bytes_left -= c; + inodes->val[inodes->elem_cnt] = inum; + inodes->val[inodes->elem_cnt + 1] = offset; + inodes->val[inodes->elem_cnt + 2] = root; + inodes->elem_cnt += 3; + } else { + inodes->bytes_missing += c - inodes->bytes_left; + inodes->bytes_left = 0; + inodes->elem_missed += 3; + } + + return 0; +} + int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, struct btrfs_path *path, - iterate_extent_inodes_t *iterate, void *ctx, - bool ignore_offset) + void *ctx, bool ignore_offset) { int ret; u64 extent_item_pos; @@ -2051,7 +2104,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, extent_item_pos = logical - found_key.objectid; ret = iterate_extent_inodes(fs_info, found_key.objectid, extent_item_pos, search_commit_root, - iterate, ctx, ignore_offset); + build_ino_list, ctx, ignore_offset); return ret; } diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 17abde7f794ce..6ed18b807b640 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -35,8 +35,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, bool ignore_offset); int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, - struct btrfs_path *path, - iterate_extent_inodes_t *iterate, void *ctx, + struct btrfs_path *path, void *ctx, bool ignore_offset); int paths_from_inode(u64 inum, struct inode_fs_paths *ipath); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2c7e50980a706..f2abd8bfd4a0f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4105,6 +4105,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) /* clear out the rbtree of defraggable inodes */ btrfs_cleanup_defrag_inodes(fs_info); + /* + * After we parked the cleaner kthread, ordered extents may have + * completed and created new delayed iputs. If one of the async reclaim + * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we + * can hang forever trying to stop it, because if a delayed iput is + * added after it ran btrfs_run_delayed_iputs() and before it called + * btrfs_wait_on_delayed_iputs(), it will hang forever since there is + * no one else to run iputs. + * + * So wait for all ongoing ordered extents to complete and then run + * delayed iputs. This works because once we reach this point no one + * can either create new ordered extents nor create delayed iputs + * through some other means. + * + * Also note that btrfs_wait_ordered_roots() is not safe here, because + * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent, + * but the delayed iput for the respective inode is made only when doing + * the final btrfs_put_ordered_extent() (which must happen at + * btrfs_finish_ordered_io() when we are unmounting). + */ + btrfs_flush_workqueue(fs_info->endio_write_workers); + /* Ordered extents for free space inodes. */ + btrfs_flush_workqueue(fs_info->endio_freespace_worker); + btrfs_run_delayed_iputs(fs_info); + cancel_work_sync(&fs_info->async_reclaim_work); cancel_work_sync(&fs_info->async_data_reclaim_work); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 1a8d419d9e1f4..bfa2bf44529c2 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -58,7 +58,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, } struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, + u64 root_objectid, u64 generation, int check_generation) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h index f32f4113c976a..5afb7ca428289 100644 --- a/fs/btrfs/export.h +++ b/fs/btrfs/export.h @@ -19,7 +19,7 @@ struct btrfs_fid { } __attribute__ ((packed)); struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, + u64 root_objectid, u64 generation, int check_generation); struct dentry *btrfs_get_parent(struct dentry *child); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 284294620e9fa..7d9b8050b09cd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1684,6 +1684,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, BUG(); if (ret && insert_reserved) btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); + if (ret < 0) + btrfs_err(trans->fs_info, +"failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d", + node->bytenr, node->num_bytes, node->type, + node->action, node->ref_mod, ret); return ret; } @@ -1935,8 +1940,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, if (ret) { unselect_delayed_ref_head(delayed_refs, locked_ref); btrfs_put_delayed_ref(ref); - btrfs_debug(fs_info, "run_one_delayed_ref returned %d", - ret); return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b5e9bfe884c4b..fc335b5e44df8 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2811,6 +2811,8 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) } } + btrfs_free_path(path); + path = NULL; if (copy_to_user(argp, subvol_info, sizeof(*subvol_info))) ret = -EFAULT; @@ -2903,6 +2905,8 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp) } out: + btrfs_free_path(path); + if (!ret || ret == -EOVERFLOW) { rootrefs->num_items = found; /* update min_treeid for next search */ @@ -2914,7 +2918,6 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp) } kfree(rootrefs); - btrfs_free_path(path); return ret; } @@ -3398,13 +3401,10 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info, di_args->bytes_used = btrfs_device_get_bytes_used(dev); di_args->total_bytes = btrfs_device_get_total_bytes(dev); memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); - if (dev->name) { - strncpy(di_args->path, rcu_str_deref(dev->name), - sizeof(di_args->path) - 1); - di_args->path[sizeof(di_args->path) - 1] = 0; - } else { + if (dev->name) + strscpy(di_args->path, rcu_str_deref(dev->name), sizeof(di_args->path)); + else di_args->path[0] = '\0'; - } out: rcu_read_unlock(); @@ -3878,6 +3878,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) ipath->fspath->val[i] = rel_ptr; } + btrfs_free_path(path); + path = NULL; ret = copy_to_user((void __user *)(unsigned long)ipa->fspath, ipath->fspath, size); if (ret) { @@ -3893,26 +3895,6 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) return ret; } -static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) -{ - struct btrfs_data_container *inodes = ctx; - const size_t c = 3 * sizeof(u64); - - if (inodes->bytes_left >= c) { - inodes->bytes_left -= c; - inodes->val[inodes->elem_cnt] = inum; - inodes->val[inodes->elem_cnt + 1] = offset; - inodes->val[inodes->elem_cnt + 2] = root; - inodes->elem_cnt += 3; - } else { - inodes->bytes_missing += c - inodes->bytes_left; - inodes->bytes_left = 0; - inodes->elem_missed += 3; - } - - return 0; -} - static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, void __user *arg, int version) { @@ -3948,21 +3930,20 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, size = min_t(u32, loi->size, SZ_16M); } - path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } - inodes = init_data_container(size); if (IS_ERR(inodes)) { ret = PTR_ERR(inodes); - inodes = NULL; - goto out; + goto out_loi; } + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto out; + } ret = iterate_inodes_from_logical(loi->logical, fs_info, path, - build_ino_list, inodes, ignore_offset); + inodes, ignore_offset); + btrfs_free_path(path); if (ret == -EINVAL) ret = -ENOENT; if (ret < 0) @@ -3974,7 +3955,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, ret = -EFAULT; out: - btrfs_free_path(path); kvfree(inodes); out_loi: kfree(loi); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index a02e38fb696c1..9fe6a01ea8b85 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1158,6 +1158,21 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); + } else { + /* + * We have set both BTRFS_FS_QUOTA_ENABLED and + * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with + * -EINPROGRESS. That can happen because someone started the + * rescan worker by calling quota rescan ioctl before we + * attempted to initialize the rescan worker. Failure due to + * quotas disabled in the meanwhile is not possible, because + * we are holding a write lock on fs_info->subvol_sem, which + * is also acquired when disabling quotas. + * Ignore such error, and any other error would need to undo + * everything we did in the transaction we just committed. + */ + ASSERT(ret == -EINPROGRESS); + ret = 0; } out_free_path: @@ -2898,14 +2913,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, dstgroup->rsv_rfer = inherit->lim.rsv_rfer; dstgroup->rsv_excl = inherit->lim.rsv_excl; - ret = update_qgroup_limit_item(trans, dstgroup); - if (ret) { - fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - btrfs_info(fs_info, - "unable to update quota limit for %llu", - dstgroup->qgroupid); - goto unlock; - } + qgroup_dirty(fs_info, dstgroup); } if (srcid) { @@ -3275,7 +3283,8 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, static bool rescan_should_stop(struct btrfs_fs_info *fs_info) { return btrfs_fs_closing(fs_info) || - test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); + test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) || + !test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); } static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) @@ -3287,6 +3296,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) int err = -ENOMEM; int ret = 0; bool stopped = false; + bool did_leaf_rescans = false; path = btrfs_alloc_path(); if (!path) @@ -3305,11 +3315,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) err = PTR_ERR(trans); break; } - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { - err = -EINTR; - } else { - err = qgroup_rescan_leaf(trans, path); - } + + err = qgroup_rescan_leaf(trans, path); + did_leaf_rescans = true; + if (err > 0) btrfs_commit_transaction(trans); else @@ -3323,22 +3332,29 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) if (err > 0 && fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - } else if (err < 0) { + } else if (err < 0 || stopped) { fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; } mutex_unlock(&fs_info->qgroup_rescan_lock); /* - * only update status, since the previous part has already updated the - * qgroup info. + * Only update status, since the previous part has already updated the + * qgroup info, and only if we did any actual work. This also prevents + * race with a concurrent quota disable, which has already set + * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at + * btrfs_quota_disable(). */ - trans = btrfs_start_transaction(fs_info->quota_root, 1); - if (IS_ERR(trans)) { - err = PTR_ERR(trans); + if (did_leaf_rescans) { + trans = btrfs_start_transaction(fs_info->quota_root, 1); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + trans = NULL; + btrfs_err(fs_info, + "fail to start transaction for status update: %d", + err); + } + } else { trans = NULL; - btrfs_err(fs_info, - "fail to start transaction for status update: %d", - err); } mutex_lock(&fs_info->qgroup_rescan_lock); diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h index 5c1a617eb25de..5c2b66d155ef7 100644 --- a/fs/btrfs/rcu-string.h +++ b/fs/btrfs/rcu-string.h @@ -18,7 +18,11 @@ static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) (len * sizeof(char)), mask); if (!ret) return ret; - strncpy(ret->str, src, len); + /* Warn if the source got unexpectedly truncated. */ + if (WARN_ON(strscpy(ret->str, src, len) < 0)) { + kfree(ret); + return NULL; + } return ret; } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 0392c556af601..88b9a5394561e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3811,6 +3811,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, int ret; struct btrfs_device *dev; unsigned int nofs_flag; + bool need_commit = false; if (btrfs_fs_closing(fs_info)) return -EAGAIN; @@ -3924,6 +3925,12 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { + u64 old_super_errors; + + spin_lock(&sctx->stat_lock); + old_super_errors = sctx->stat.super_errors; + spin_unlock(&sctx->stat_lock); + btrfs_info(fs_info, "scrub: started on devid %llu", devid); /* * by holding device list mutex, we can @@ -3932,6 +3939,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); + + spin_lock(&sctx->stat_lock); + /* + * Super block errors found, but we can not commit transaction + * at current context, since btrfs_commit_transaction() needs + * to pause the current running scrub (hold by ourselves). + */ + if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) + need_commit = true; + spin_unlock(&sctx->stat_lock); } if (!ret) @@ -3958,6 +3975,25 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, scrub_workers_put(fs_info); scrub_put_ctx(sctx); + /* + * We found some super block errors before, now try to force a + * transaction commit, as scrub has finished. + */ + if (need_commit) { + struct btrfs_trans_handle *trans; + + trans = btrfs_start_transaction(fs_info->tree_root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + btrfs_err(fs_info, + "scrub: failed to start transaction to fix super block errors: %d", ret); + return ret; + } + ret = btrfs_commit_transaction(trans); + if (ret < 0) + btrfs_err(fs_info, + "scrub: failed to commit transaction to fix super block errors: %d", ret); + } return ret; out: scrub_workers_put(fs_info); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 6b80dee17f49d..4a6ba0997e399 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5398,6 +5398,7 @@ static int clone_range(struct send_ctx *sctx, u64 ext_len; u64 clone_len; u64 clone_data_offset; + bool crossed_src_i_size = false; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(clone_root->root, path); @@ -5454,8 +5455,10 @@ static int clone_range(struct send_ctx *sctx, if (key.offset >= clone_src_i_size) break; - if (key.offset + ext_len > clone_src_i_size) + if (key.offset + ext_len > clone_src_i_size) { ext_len = clone_src_i_size - key.offset; + crossed_src_i_size = true; + } clone_data_offset = btrfs_file_extent_offset(leaf, ei); if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) { @@ -5515,6 +5518,25 @@ static int clone_range(struct send_ctx *sctx, ret = send_clone(sctx, offset, clone_len, clone_root); } + } else if (crossed_src_i_size && clone_len < len) { + /* + * If we are at i_size of the clone source inode and we + * can not clone from it, terminate the loop. This is + * to avoid sending two write operations, one with a + * length matching clone_len and the final one after + * this loop with a length of len - clone_len. + * + * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED + * was passed to the send ioctl), this helps avoid + * sending an encoded write for an offset that is not + * sector size aligned, in case the i_size of the source + * inode is not sector size aligned. That will make the + * receiver fallback to decompression of the data and + * writing it using regular buffered IO, therefore while + * not incorrect, it's not optimal due decompression and + * possible re-compression at the receiver. + */ + break; } else { ret = send_extent_data(sctx, offset, clone_len); } diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 3bb6b688ece52..ecf190286377c 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -1767,8 +1767,11 @@ int __init btrfs_init_sysfs(void) #ifdef CONFIG_BTRFS_DEBUG ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group); - if (ret) - goto out2; + if (ret) { + sysfs_unmerge_group(&btrfs_kset->kobj, + &btrfs_static_feature_attr_group); + goto out_remove_group; + } #endif return 0; diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 999c14e5d0bdd..0599566c66b06 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -192,7 +192,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) void btrfs_free_dummy_root(struct btrfs_root *root) { - if (!root) + if (IS_ERR_OR_NULL(root)) return; /* Will be freed by btrfs_free_fs_roots */ if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state))) diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index ce1ca8e73c2d1..289366c98f5b8 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -230,21 +230,21 @@ static int test_no_shared_qgroup(struct btrfs_root *root, ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, BTRFS_FS_TREE_OBJECTID); - if (ret) + if (ret) { + ulist_free(old_roots); return ret; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } @@ -256,31 +256,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root, return ret; } + /* btrfs_qgroup_account_extent() always frees the ulists passed to it. */ + old_roots = NULL; + new_roots = NULL; + if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, nodesize, nodesize)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } - old_roots = NULL; - new_roots = NULL; ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = remove_extent_item(root, nodesize, nodesize); - if (ret) + if (ret) { + ulist_free(old_roots); return -EINVAL; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } @@ -331,21 +333,21 @@ static int test_multiple_refs(struct btrfs_root *root, ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, BTRFS_FS_TREE_OBJECTID); - if (ret) + if (ret) { + ulist_free(old_roots); return ret; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } @@ -366,21 +368,21 @@ static int test_multiple_refs(struct btrfs_root *root, ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = add_tree_ref(root, nodesize, nodesize, 0, BTRFS_FIRST_FREE_OBJECTID); - if (ret) + if (ret) { + ulist_free(old_roots); return ret; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } @@ -407,21 +409,21 @@ static int test_multiple_refs(struct btrfs_root *root, ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = remove_extent_ref(root, nodesize, nodesize, 0, BTRFS_FIRST_FREE_OBJECTID); - if (ret) + if (ret) { + ulist_free(old_roots); return ret; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d4d89e0738ff4..15435f983180f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -381,6 +381,7 @@ void btrfs_free_device(struct btrfs_device *device) static void free_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; + WARN_ON(fs_devices->opened); while (!list_empty(&fs_devices->devices)) { device = list_entry(fs_devices->devices.next, @@ -1227,9 +1228,22 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) mutex_lock(&uuid_mutex); close_fs_devices(fs_devices); - if (!fs_devices->opened) + if (!fs_devices->opened) { list_splice_init(&fs_devices->seed_list, &list); + /* + * If the struct btrfs_fs_devices is not assembled with any + * other device, it can be re-initialized during the next mount + * without the needing device-scan step. Therefore, it can be + * fully freed. + */ + if (fs_devices->num_devices == 1) { + list_del(&fs_devices->fs_list); + free_fs_devices(fs_devices); + } + } + + list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { close_fs_devices(fs_devices); list_del(&fs_devices->seed_list); @@ -1580,7 +1594,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device, goto out; } - while (1) { + while (search_start < search_end) { l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { @@ -1603,6 +1617,9 @@ static int find_free_dev_extent_start(struct btrfs_device *device, if (key.type != BTRFS_DEV_EXTENT_KEY) goto next; + if (key.offset > search_end) + break; + if (key.offset > search_start) { hole_size = key.offset - search_start; dev_extent_hole_check(device, &search_start, &hole_size, @@ -1663,6 +1680,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device, else ret = 0; + ASSERT(max_hole_start + max_hole_size <= search_end); out: btrfs_free_path(path); *start = max_hole_start; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 05615a1099dbc..673d74d7f7182 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -63,7 +63,7 @@ struct list_head *zlib_alloc_workspace(unsigned int level) workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); - workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL); + workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL); workspace->level = level; workspace->buf = NULL; /* diff --git a/fs/buffer.c b/fs/buffer.c index 23f645657488b..ee66abadcbc2b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2350,7 +2350,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) { struct address_space *mapping = inode->i_mapping; struct page *page; - void *fsdata; + void *fsdata = NULL; int err; err = inode_newsize_ok(inode, size); @@ -2376,7 +2376,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; unsigned int blocksize = i_blocksize(inode); struct page *page; - void *fsdata; + void *fsdata = NULL; pgoff_t index, curidx; loff_t curpos; unsigned zerofrom, offset, len; diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 76e43a487bc63..210496dc2fd49 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2294,6 +2294,7 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid) */ static int unsafe_request_wait(struct inode *inode) { + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_request *req1 = NULL, *req2 = NULL; int ret, err = 0; @@ -2313,6 +2314,76 @@ static int unsafe_request_wait(struct inode *inode) } spin_unlock(&ci->i_unsafe_lock); + /* + * Trigger to flush the journal logs in all the relevant MDSes + * manually, or in the worst case we must wait at most 5 seconds + * to wait the journal logs to be flushed by the MDSes periodically. + */ + if (req1 || req2) { + struct ceph_mds_request *req; + struct ceph_mds_session **sessions; + struct ceph_mds_session *s; + unsigned int max_sessions; + int i; + + mutex_lock(&mdsc->mutex); + max_sessions = mdsc->max_sessions; + + sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL); + if (!sessions) { + mutex_unlock(&mdsc->mutex); + err = -ENOMEM; + goto out; + } + + spin_lock(&ci->i_unsafe_lock); + if (req1) { + list_for_each_entry(req, &ci->i_unsafe_dirops, + r_unsafe_dir_item) { + s = req->r_session; + if (!s) + continue; + if (!sessions[s->s_mds]) { + s = ceph_get_mds_session(s); + sessions[s->s_mds] = s; + } + } + } + if (req2) { + list_for_each_entry(req, &ci->i_unsafe_iops, + r_unsafe_target_item) { + s = req->r_session; + if (!s) + continue; + if (!sessions[s->s_mds]) { + s = ceph_get_mds_session(s); + sessions[s->s_mds] = s; + } + } + } + spin_unlock(&ci->i_unsafe_lock); + + /* the auth MDS */ + spin_lock(&ci->i_ceph_lock); + if (ci->i_auth_cap) { + s = ci->i_auth_cap->session; + if (!sessions[s->s_mds]) + sessions[s->s_mds] = ceph_get_mds_session(s); + } + spin_unlock(&ci->i_ceph_lock); + mutex_unlock(&mdsc->mutex); + + /* send flush mdlog request to MDSes */ + for (i = 0; i < max_sessions; i++) { + s = sessions[i]; + if (s) { + send_flush_mdlog(s); + ceph_put_mds_session(s); + } + } + kfree(sessions); + } + dout("unsafe_request_wait %p wait on tid %llu %llu\n", inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL); if (req1) { @@ -2320,15 +2391,19 @@ static int unsafe_request_wait(struct inode *inode) ceph_timeout_jiffies(req1->r_timeout)); if (ret) err = -EIO; - ceph_mdsc_put_request(req1); } if (req2) { ret = !wait_for_completion_timeout(&req2->r_safe_completion, ceph_timeout_jiffies(req2->r_timeout)); if (ret) err = -EIO; - ceph_mdsc_put_request(req2); } + +out: + if (req1) + ceph_mdsc_put_request(req1); + if (req2) + ceph_mdsc_put_request(req2); return err; } @@ -2882,7 +2957,7 @@ int ceph_get_caps(struct file *filp, int need, int want, while (true) { flags &= CEPH_FILE_MODE_MASK; - if (atomic_read(&fi->num_locks)) + if (vfs_inode_has_locks(inode)) flags |= CHECK_FILELOCK; _got = 0; ret = try_get_cap_refs(inode, need, want, endoff, @@ -4310,33 +4385,9 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s) dout("flush_dirty_caps done\n"); } -static void iterate_sessions(struct ceph_mds_client *mdsc, - void (*cb)(struct ceph_mds_session *)) -{ - int mds; - - mutex_lock(&mdsc->mutex); - for (mds = 0; mds < mdsc->max_sessions; ++mds) { - struct ceph_mds_session *s; - - if (!mdsc->sessions[mds]) - continue; - - s = ceph_get_mds_session(mdsc->sessions[mds]); - if (!s) - continue; - - mutex_unlock(&mdsc->mutex); - cb(s); - ceph_put_mds_session(s); - mutex_lock(&mdsc->mutex); - } - mutex_unlock(&mdsc->mutex); -} - void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) { - iterate_sessions(mdsc, flush_dirty_session_caps); + ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true); } void __ceph_touch_fmode(struct ceph_inode_info *ci, diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 93d986856f1c9..943655e36a799 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -703,6 +703,12 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, if (dentry->d_name.len > NAME_MAX) return -ENAMETOOLONG; + /* + * Do not truncate the file, since atomic_open is called before the + * permission check. The caller will do the truncation afterward. + */ + flags &= ~O_TRUNC; + if (flags & O_CREAT) { if (ceph_quota_is_max_files_exceeded(dir)) return -EDQUOT; @@ -769,9 +775,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, } set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); - err = ceph_mdsc_do_request(mdsc, - (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, - req); + err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req); err = ceph_handle_snapdir(req, dentry, err); if (err) goto out_req; diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 048a435a29bee..674d6ea89f717 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -32,18 +32,14 @@ void __init ceph_flock_init(void) static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src) { - struct ceph_file_info *fi = dst->fl_file->private_data; struct inode *inode = file_inode(dst->fl_file); atomic_inc(&ceph_inode(inode)->i_filelock_ref); - atomic_inc(&fi->num_locks); } static void ceph_fl_release_lock(struct file_lock *fl) { - struct ceph_file_info *fi = fl->fl_file->private_data; struct inode *inode = file_inode(fl->fl_file); struct ceph_inode_info *ci = ceph_inode(inode); - atomic_dec(&fi->num_locks); if (atomic_dec_and_test(&ci->i_filelock_ref)) { /* clear error when all locks are released */ spin_lock(&ci->i_ceph_lock); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 6859967df2b19..87a9e9096421a 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -809,6 +809,33 @@ static void put_request_session(struct ceph_mds_request *req) } } +void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc, + void (*cb)(struct ceph_mds_session *), + bool check_state) +{ + int mds; + + mutex_lock(&mdsc->mutex); + for (mds = 0; mds < mdsc->max_sessions; ++mds) { + struct ceph_mds_session *s; + + s = __ceph_lookup_mds_session(mdsc, mds); + if (!s) + continue; + + if (check_state && !check_session_state(s)) { + ceph_put_mds_session(s); + continue; + } + + mutex_unlock(&mdsc->mutex); + cb(s); + ceph_put_mds_session(s); + mutex_lock(&mdsc->mutex); + } + mutex_unlock(&mdsc->mutex); +} + void ceph_mdsc_release_request(struct kref *kref) { struct ceph_mds_request *req = container_of(kref, @@ -1157,7 +1184,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, /* * session messages */ -static struct ceph_msg *create_session_msg(u32 op, u64 seq) +struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq) { struct ceph_msg *msg; struct ceph_mds_session_head *h; @@ -1165,7 +1192,8 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq) msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, false); if (!msg) { - pr_err("create_session_msg ENOMEM creating msg\n"); + pr_err("ENOMEM creating session %s msg\n", + ceph_session_op_name(op)); return NULL; } h = msg->front.iov_base; @@ -1299,7 +1327,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes, GFP_NOFS, false); if (!msg) { - pr_err("create_session_msg ENOMEM creating msg\n"); + pr_err("ENOMEM creating session open msg\n"); return ERR_PTR(-ENOMEM); } p = msg->front.iov_base; @@ -1833,8 +1861,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc, dout("send_renew_caps to mds%d (%s)\n", session->s_mds, ceph_mds_state_name(state)); - msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, - ++session->s_renew_seq); + msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, + ++session->s_renew_seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); @@ -1848,7 +1876,7 @@ static int send_flushmsg_ack(struct ceph_mds_client *mdsc, dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", session->s_mds, ceph_session_state_name(session->s_state), seq); - msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); + msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); @@ -1900,7 +1928,8 @@ static int request_close_session(struct ceph_mds_session *session) dout("request_close_session mds%d state %s seq %lld\n", session->s_mds, ceph_session_state_name(session->s_state), session->s_seq); - msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); + msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE, + session->s_seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); @@ -3467,6 +3496,12 @@ static void handle_session(struct ceph_mds_session *session, break; case CEPH_SESSION_FLUSHMSG: + /* flush cap releases */ + spin_lock(&session->s_cap_lock); + if (session->s_num_cap_releases) + ceph_flush_cap_releases(mdsc, session); + spin_unlock(&session->s_cap_lock); + send_flushmsg_ack(mdsc, session, seq); break; @@ -4375,24 +4410,12 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, } /* - * lock unlock sessions, to wait ongoing session activities + * lock unlock the session, to wait ongoing session activities */ -static void lock_unlock_sessions(struct ceph_mds_client *mdsc) +static void lock_unlock_session(struct ceph_mds_session *s) { - int i; - - mutex_lock(&mdsc->mutex); - for (i = 0; i < mdsc->max_sessions; i++) { - struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); - if (!s) - continue; - mutex_unlock(&mdsc->mutex); - mutex_lock(&s->s_mutex); - mutex_unlock(&s->s_mutex); - ceph_put_mds_session(s); - mutex_lock(&mdsc->mutex); - } - mutex_unlock(&mdsc->mutex); + mutex_lock(&s->s_mutex); + mutex_unlock(&s->s_mutex); } static void maybe_recover_session(struct ceph_mds_client *mdsc) @@ -4647,6 +4670,30 @@ static void wait_requests(struct ceph_mds_client *mdsc) dout("wait_requests done\n"); } +void send_flush_mdlog(struct ceph_mds_session *s) +{ + struct ceph_msg *msg; + + /* + * Pre-luminous MDS crashes when it sees an unknown session request + */ + if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS)) + return; + + mutex_lock(&s->s_mutex); + dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds, + ceph_session_state_name(s->s_state), s->s_seq); + msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG, + s->s_seq); + if (!msg) { + pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n", + s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); + } else { + ceph_con_send(&s->s_con, msg); + } + mutex_unlock(&s->s_mutex); +} + /* * called before mount is ro, and before dentries are torn down. * (hmm, does this still race with new lookups?) @@ -4656,7 +4703,8 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) dout("pre_umount\n"); mdsc->stopping = 1; - lock_unlock_sessions(mdsc); + ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); + ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false); ceph_flush_dirty_caps(mdsc); wait_requests(mdsc); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index acf33d7192bb6..a92e42e8a9f82 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -518,6 +518,11 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) kref_put(&req->r_kref, ceph_mdsc_release_request); } +extern void send_flush_mdlog(struct ceph_mds_session *s); +extern void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc, + void (*cb)(struct ceph_mds_session *), + bool check_state); +extern struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq); extern void __ceph_queue_cap_release(struct ceph_mds_session *session, struct ceph_cap *cap); extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc, diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 0369f672a76fb..734873be56a74 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -697,9 +697,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, struct ceph_mds_snap_realm *ri; /* encoded */ __le64 *snaps; /* encoded */ __le64 *prior_parent_snaps; /* encoded */ - struct ceph_snap_realm *realm = NULL; + struct ceph_snap_realm *realm; struct ceph_snap_realm *first_realm = NULL; - int invalidate = 0; + struct ceph_snap_realm *realm_to_rebuild = NULL; + int rebuild_snapcs; int err = -ENOMEM; LIST_HEAD(dirty_realms); @@ -707,6 +708,8 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, dout("update_snap_trace deletion=%d\n", deletion); more: + realm = NULL; + rebuild_snapcs = 0; ceph_decode_need(&p, e, sizeof(*ri), bad); ri = p; p += sizeof(*ri); @@ -730,7 +733,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); if (err < 0) goto fail; - invalidate += err; + rebuild_snapcs += err; if (le64_to_cpu(ri->seq) > realm->seq) { dout("update_snap_trace updating %llx %p %lld -> %lld\n", @@ -755,22 +758,30 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, if (realm->seq > mdsc->last_snap_seq) mdsc->last_snap_seq = realm->seq; - invalidate = 1; + rebuild_snapcs = 1; } else if (!realm->cached_context) { dout("update_snap_trace %llx %p seq %lld new\n", realm->ino, realm, realm->seq); - invalidate = 1; + rebuild_snapcs = 1; } else { dout("update_snap_trace %llx %p seq %lld unchanged\n", realm->ino, realm, realm->seq); } - dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, - realm, invalidate, p, e); + dout("done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino, + realm, rebuild_snapcs, p, e); + + /* + * this will always track the uppest parent realm from which + * we need to rebuild the snapshot contexts _downward_ in + * hierarchy. + */ + if (rebuild_snapcs) + realm_to_rebuild = realm; - /* invalidate when we reach the _end_ (root) of the trace */ - if (invalidate && p >= e) - rebuild_snap_realms(realm, &dirty_realms); + /* rebuild_snapcs when we reach the _end_ (root) of the trace */ + if (realm_to_rebuild && p >= e) + rebuild_snap_realms(realm_to_rebuild, &dirty_realms); if (!first_realm) first_realm = realm; diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c index 4a79f3632260e..573bb9556fb56 100644 --- a/fs/ceph/strings.c +++ b/fs/ceph/strings.c @@ -46,6 +46,7 @@ const char *ceph_session_op_name(int op) case CEPH_SESSION_FLUSHMSG_ACK: return "flushmsg_ack"; case CEPH_SESSION_FORCE_RO: return "force_ro"; case CEPH_SESSION_REJECT: return "reject"; + case CEPH_SESSION_REQUEST_FLUSH_MDLOG: return "flush_mdlog"; } return "???"; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 4db305fd2a02a..8716cb618cbb7 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -772,7 +772,6 @@ struct ceph_file_info { struct list_head rw_contexts; u32 filp_gen; - atomic_t num_locks; }; struct ceph_dir_file_info { diff --git a/fs/char_dev.c b/fs/char_dev.c index ba0ded7842a77..3f667292608c0 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -547,7 +547,7 @@ int cdev_device_add(struct cdev *cdev, struct device *dev) } rc = device_add(dev); - if (rc) + if (rc && dev->devt) cdev_del(cdev); return rc; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index bc957e6ca48b9..0d8d7b57636f7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -619,9 +619,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root) seq_printf(s, ",echo_interval=%lu", tcon->ses->server->echo_interval / HZ); - /* Only display max_credits if it was overridden on mount */ + /* Only display the following if overridden on mount */ if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); + if (tcon->ses->server->tcp_nodelay) + seq_puts(s, ",tcpnodelay"); + if (tcon->ses->server->noautotune) + seq_puts(s, ",noautotune"); + if (tcon->ses->server->noblocksnd) + seq_puts(s, ",noblocksend"); if (tcon->snapshot_time) seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); @@ -1221,8 +1227,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, ssize_t rc; struct cifsFileInfo *cfile = dst_file->private_data; - if (cfile->swapfile) - return -EOPNOTSUPP; + if (cfile->swapfile) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, len, flags); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 196285b0fe46c..92a7628560ccb 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include #include "cifs_fs_sb.h" @@ -30,6 +32,7 @@ #include #include #include "smb2pdu.h" +#include "smb2glob.h" #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ @@ -2046,4 +2049,70 @@ static inline bool is_tcon_dfs(struct cifs_tcon *tcon) tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT); } +static inline unsigned int cifs_get_num_sgs(const struct smb_rqst *rqst, + int num_rqst, + const u8 *sig) +{ + unsigned int len, skip; + unsigned int nents = 0; + unsigned long addr; + int i, j; + + /* Assumes the first rqst has a transform header as the first iov. + * I.e. + * rqst[0].rq_iov[0] is transform header + * rqst[0].rq_iov[1+] data to be encrypted/decrypted + * rqst[1+].rq_iov[0+] data to be encrypted/decrypted + */ + for (i = 0; i < num_rqst; i++) { + /* + * The first rqst has a transform header where the + * first 20 bytes are not part of the encrypted blob. + */ + for (j = 0; j < rqst[i].rq_nvec; j++) { + struct kvec *iov = &rqst[i].rq_iov[j]; + + skip = (i == 0) && (j == 0) ? 20 : 0; + addr = (unsigned long)iov->iov_base + skip; + if (unlikely(is_vmalloc_addr((void *)addr))) { + len = iov->iov_len - skip; + nents += DIV_ROUND_UP(offset_in_page(addr) + len, + PAGE_SIZE); + } else { + nents++; + } + } + nents += rqst[i].rq_npages; + } + nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE); + return nents; +} + +/* We can not use the normal sg_set_buf() as we will sometimes pass a + * stack object as buf. + */ +static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg, + const void *buf, + unsigned int buflen) +{ + unsigned long addr = (unsigned long)buf; + unsigned int off = offset_in_page(addr); + + addr &= PAGE_MASK; + if (unlikely(is_vmalloc_addr((void *)addr))) { + do { + unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off); + + sg_set_page(sg++, vmalloc_to_page((void *)addr), len, off); + + off = 0; + addr += PAGE_SIZE; + buflen -= len; + } while (buflen); + } else { + sg_set_page(sg++, virt_to_page(addr), buflen, off); + } + return sg; +} + #endif /* _CIFS_GLOB_H */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 24c6f36177bac..ca34cc1e1931a 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -230,6 +230,8 @@ extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace); extern void dequeue_mid(struct mid_q_entry *mid, bool malformed); extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read); +extern ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server, + size_t to_read); extern int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int page_offset, @@ -600,8 +602,8 @@ int cifs_alloc_hash(const char *name, struct crypto_shash **shash, struct sdesc **sdesc); void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc); -extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page, - unsigned int *len, unsigned int *offset); +void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page, + unsigned int *len, unsigned int *offset); struct cifs_chan * cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server); int cifs_try_adding_channels(struct cifs_ses *ses); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 0496934feecb7..c279527aae92d 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1451,9 +1451,9 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server) while (remaining > 0) { int length; - length = cifs_read_from_socket(server, server->bigbuf, - min_t(unsigned int, remaining, - CIFSMaxBufSize + MAX_HEADER_SIZE(server))); + length = cifs_discard_from_socket(server, + min_t(size_t, remaining, + CIFSMaxBufSize + MAX_HEADER_SIZE(server))); if (length < 0) return length; server->total_read += length; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 7f5d173760cfc..164b985407160 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -695,9 +695,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) int length = 0; int total_read; - smb_msg->msg_control = NULL; - smb_msg->msg_controllen = 0; - for (total_read = 0; msg_data_left(smb_msg); total_read += length) { try_to_freeze(); @@ -748,18 +745,33 @@ int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; struct kvec iov = {.iov_base = buf, .iov_len = to_read}; iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read); return cifs_readv_from_socket(server, &smb_msg); } +ssize_t +cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) +{ + struct msghdr smb_msg = {}; + + /* + * iov_iter_discard already sets smb_msg.type and count and iov_offset + * and cifs_readv_from_socket sets msg_control and msg_controllen + * so little to initialize in struct msghdr + */ + iov_iter_discard(&smb_msg.msg_iter, READ, to_read); + + return cifs_readv_from_socket(server, &smb_msg); +} + int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int page_offset, unsigned int to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; struct bio_vec bv = { .bv_page = page, .bv_len = to_read, .bv_offset = page_offset}; iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read); @@ -3026,7 +3038,7 @@ cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)), struct cifs_ses * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) { - int rc = -ENOMEM; + int rc = 0; unsigned int xid; struct cifs_ses *ses; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; @@ -3068,6 +3080,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) return ses; } + rc = -ENOMEM; + cifs_dbg(FYI, "Existing smb sess not found\n"); ses = sesInfoAlloc(); if (ses == NULL) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 6c06870f90184..5fe85dc0e2651 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1735,11 +1735,13 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) struct cifsFileInfo *cfile; __u32 type; - rc = -EACCES; xid = get_xid(); - if (!(fl->fl_flags & FL_FLOCK)) - return -ENOLCK; + if (!(fl->fl_flags & FL_FLOCK)) { + rc = -ENOLCK; + free_xid(xid); + return rc; + } cfile = (struct cifsFileInfo *)file->private_data; tcon = tlink_tcon(cfile->tlink); @@ -1758,8 +1760,9 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) * if no lock or unlock then nothing to do since we do not * know what it is */ + rc = -EOPNOTSUPP; free_xid(xid); - return -EOPNOTSUPP; + return rc; } rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock, @@ -3244,6 +3247,9 @@ static ssize_t __cifs_writev( ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from) { + struct file *file = iocb->ki_filp; + + cifs_revalidate_mapping(file->f_inode); return __cifs_writev(iocb, from, true); } @@ -3533,7 +3539,7 @@ uncached_fill_pages(struct TCP_Server_Info *server, rdata->got_bytes += result; } - return rdata->got_bytes > 0 && result != -ECONNABORTED ? + return result != -ECONNABORTED && rdata->got_bytes > 0 ? rdata->got_bytes : result; } @@ -3933,6 +3939,15 @@ static ssize_t __cifs_readv( len = ctx->len; } + if (direct) { + rc = filemap_write_and_wait_range(file->f_inode->i_mapping, + offset, offset + len - 1); + if (rc) { + kref_put(&ctx->refcount, cifs_aio_ctx_release); + return -EAGAIN; + } + } + /* grab a lock here due to read response handlers can access ctx */ mutex_lock(&ctx->aio_mutex); @@ -4287,7 +4302,7 @@ readpages_fill_pages(struct TCP_Server_Info *server, rdata->got_bytes += result; } - return rdata->got_bytes > 0 && result != -ECONNABORTED ? + return result != -ECONNABORTED && rdata->got_bytes > 0 ? rdata->got_bytes : result; } diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index dcde44ff6cf9f..e45598b622427 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -193,7 +193,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE, (int __user *)arg); - if (rc != EOPNOTSUPP) + if (rc != -EOPNOTSUPP) break; } #endif /* CONFIG_CIFS_POSIX */ @@ -222,7 +222,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) * pSMBFile->fid.netfid, * extAttrBits, * &ExtAttrMask); - * if (rc != EOPNOTSUPP) + * if (rc != -EOPNOTSUPP) * break; */ diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 85d30fef98a29..4fc6eb8e786d4 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -471,6 +471,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, oparms.disposition = FILE_CREATE; oparms.fid = &fid; oparms.reconnect = false; + oparms.mode = 0644; rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL, NULL); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 9d740916a8ee5..9044b0fca9a3d 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -974,8 +974,8 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc) * Input: rqst - a smb_rqst, page - a page index for rqst * Output: *len - the length for this page, *offset - the offset for this page */ -void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page, - unsigned int *len, unsigned int *offset) +void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page, + unsigned int *len, unsigned int *offset) { *len = rqst->rq_pagesz; *offset = (page == 0) ? rqst->rq_offset : 0; diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index d58c5ffeca0d9..cf6fd138d8d5c 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -306,6 +306,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface) cifs_put_tcp_session(chan->server, 0); unload_nls(vol.local_nls); + free_xid(xid); return rc; } diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 2fa3ba354cc96..001c26daacbaa 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -74,7 +74,6 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, nr_ioctl_req.Reserved = 0; rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, - true /* is_fsctl */, (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), CIFSMaxBufSize, NULL, NULL /* no return info */); if (rc == -EOPNOTSUPP) { diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index b6d72e3c5ebad..844db4652dd1d 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -587,7 +587,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) struct cifs_ses *ses = tcon->ses; rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, + FSCTL_QUERY_NETWORK_INTERFACE_INFO, NULL /* no data input */, 0 /* no data input */, CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); if (rc == -EOPNOTSUPP) { @@ -1256,6 +1256,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, COMPOUND_FID, current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); + if (rc) + goto sea_exit; smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); @@ -1266,6 +1268,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, rqst[2].rq_nvec = 1; rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); + if (rc) + goto sea_exit; smb2_set_related(&rqst[2]); rc = compound_send_recv(xid, ses, server, @@ -1470,9 +1474,8 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, struct resume_key_req *res_key; rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, - FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, - NULL, 0 /* no input */, CIFSMaxBufSize, - (char **)&res_key, &ret_data_len); + FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */, + CIFSMaxBufSize, (char **)&res_key, &ret_data_len); if (rc) { cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc); @@ -1611,7 +1614,7 @@ smb2_ioctl_query_info(const unsigned int xid, rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, - qi.info_type, true, buffer, qi.output_buffer_length, + qi.info_type, buffer, qi.output_buffer_length, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); free_req1_func = SMB2_ioctl_free; @@ -1787,9 +1790,8 @@ smb2_copychunk_range(const unsigned int xid, retbuf = NULL; rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, - true /* is_fsctl */, (char *)pcchunk, - sizeof(struct copychunk_ioctl), CIFSMaxBufSize, - (char **)&retbuf, &ret_data_len); + (char *)pcchunk, sizeof(struct copychunk_ioctl), + CIFSMaxBufSize, (char **)&retbuf, &ret_data_len); if (rc == 0) { if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) { @@ -1949,7 +1951,6 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_SPARSE, - true /* is_fctl */, &setsparse, 1, CIFSMaxBufSize, NULL, NULL); if (rc) { tcon->broken_sparse_sup = true; @@ -2032,7 +2033,6 @@ smb2_duplicate_extents(const unsigned int xid, rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_DUPLICATE_EXTENTS_TO_FILE, - true /* is_fsctl */, (char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file), CIFSMaxBufSize, NULL, @@ -2067,7 +2067,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_INTEGRITY_INFORMATION, - true /* is_fsctl */, (char *)&integr_info, sizeof(struct fsctl_set_integrity_information_req), CIFSMaxBufSize, NULL, @@ -2120,7 +2119,6 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SRV_ENUMERATE_SNAPSHOTS, - true /* is_fsctl */, NULL, 0 /* no input data */, max_response_size, (char **)&retbuf, &ret_data_len); @@ -2762,7 +2760,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, do { rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_DFS_GET_REFERRALS, - true /* is_fsctl */, (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, (char **)&dfs_rsp, &dfs_rsp_size); } while (rc == -EAGAIN); @@ -2964,8 +2961,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl_init(tcon, server, &rqst[1], fid.persistent_fid, - fid.volatile_fid, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, + fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); @@ -3145,8 +3141,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, - COMPOUND_FID, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, + COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); @@ -3409,7 +3404,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true, + cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, (char *)&fsctl_buf, sizeof(struct file_zero_data_information), 0, NULL, NULL); @@ -3439,7 +3434,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, loff_t offset, loff_t len) { - struct inode *inode; + struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; struct file_zero_data_information fsctl_buf; long rc; @@ -3448,14 +3443,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, xid = get_xid(); - inode = d_inode(cfile->dentry); - + inode_lock(inode); /* Need to make file sparse, if not already, before freeing range. */ /* Consider adding equivalent for compressed since it could also work */ if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { rc = -EOPNOTSUPP; - free_xid(xid); - return rc; + goto out; } /* @@ -3471,9 +3464,11 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, - true /* is_fctl */, (char *)&fsctl_buf, + (char *)&fsctl_buf, sizeof(struct file_zero_data_information), CIFSMaxBufSize, NULL, NULL); +out: + inode_unlock(inode); free_xid(xid); return rc; } @@ -3530,7 +3525,7 @@ static int smb3_simple_fallocate_range(unsigned int xid, in_data.length = cpu_to_le64(len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -3771,7 +3766,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -3831,7 +3826,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -4169,69 +4164,82 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len, memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8); } -/* We can not use the normal sg_set_buf() as we will sometimes pass a - * stack object as buf. - */ -static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf, - unsigned int buflen) +static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst, + int num_rqst, const u8 *sig, u8 **iv, + struct aead_request **req, struct scatterlist **sgl, + unsigned int *num_sgs) { - void *addr; - /* - * VMAP_STACK (at least) puts stack into the vmalloc address space - */ - if (is_vmalloc_addr(buf)) - addr = vmalloc_to_page(buf); - else - addr = virt_to_page(buf); - sg_set_page(sg, addr, buflen, offset_in_page(buf)); + unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm); + unsigned int iv_size = crypto_aead_ivsize(tfm); + unsigned int len; + u8 *p; + + *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig); + + len = iv_size; + len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); + len = ALIGN(len, crypto_tfm_ctx_alignment()); + len += req_size; + len = ALIGN(len, __alignof__(struct scatterlist)); + len += *num_sgs * sizeof(**sgl); + + p = kmalloc(len, GFP_ATOMIC); + if (!p) + return NULL; + + *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1); + *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, + crypto_tfm_ctx_alignment()); + *sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, + __alignof__(struct scatterlist)); + return p; } -/* Assumes the first rqst has a transform header as the first iov. - * I.e. - * rqst[0].rq_iov[0] is transform header - * rqst[0].rq_iov[1+] data to be encrypted/decrypted - * rqst[1+].rq_iov[0+] data to be encrypted/decrypted - */ -static struct scatterlist * -init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign) +static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst, + int num_rqst, const u8 *sig, u8 **iv, + struct aead_request **req, struct scatterlist **sgl) { - unsigned int sg_len; + unsigned int off, len, skip; struct scatterlist *sg; - unsigned int i; - unsigned int j; - unsigned int idx = 0; - int skip; - - sg_len = 1; - for (i = 0; i < num_rqst; i++) - sg_len += rqst[i].rq_nvec + rqst[i].rq_npages; + unsigned int num_sgs; + unsigned long addr; + int i, j; + void *p; - sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL); - if (!sg) + p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs); + if (!p) return NULL; - sg_init_table(sg, sg_len); + sg_init_table(*sgl, num_sgs); + sg = *sgl; + + /* Assumes the first rqst has a transform header as the first iov. + * I.e. + * rqst[0].rq_iov[0] is transform header + * rqst[0].rq_iov[1+] data to be encrypted/decrypted + * rqst[1+].rq_iov[0+] data to be encrypted/decrypted + */ for (i = 0; i < num_rqst; i++) { + /* + * The first rqst has a transform header where the + * first 20 bytes are not part of the encrypted blob. + */ for (j = 0; j < rqst[i].rq_nvec; j++) { - /* - * The first rqst has a transform header where the - * first 20 bytes are not part of the encrypted blob - */ - skip = (i == 0) && (j == 0) ? 20 : 0; - smb2_sg_set_buf(&sg[idx++], - rqst[i].rq_iov[j].iov_base + skip, - rqst[i].rq_iov[j].iov_len - skip); - } + struct kvec *iov = &rqst[i].rq_iov[j]; + skip = (i == 0) && (j == 0) ? 20 : 0; + addr = (unsigned long)iov->iov_base + skip; + len = iov->iov_len - skip; + sg = cifs_sg_set_buf(sg, (void *)addr, len); + } for (j = 0; j < rqst[i].rq_npages; j++) { - unsigned int len, offset; - - rqst_page_get_length(&rqst[i], j, &len, &offset); - sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset); + rqst_page_get_length(&rqst[i], j, &len, &off); + sg_set_page(sg++, rqst[i].rq_pages[j], len, off); } } - smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE); - return sg; + cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE); + + return p; } static int @@ -4275,11 +4283,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, u8 sign[SMB2_SIGNATURE_SIZE] = {}; u8 key[SMB3_ENC_DEC_KEY_SIZE]; struct aead_request *req; - char *iv; - unsigned int iv_len; + u8 *iv; DECLARE_CRYPTO_WAIT(wait); struct crypto_aead *tfm; unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); + void *creq; rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); if (rc) { @@ -4314,32 +4322,15 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, return rc; } - req = aead_request_alloc(tfm, GFP_KERNEL); - if (!req) { - cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__); + creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg); + if (unlikely(!creq)) return -ENOMEM; - } if (!enc) { memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE); crypt_len += SMB2_SIGNATURE_SIZE; } - sg = init_sg(num_rqst, rqst, sign); - if (!sg) { - cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__); - rc = -ENOMEM; - goto free_req; - } - - iv_len = crypto_aead_ivsize(tfm); - iv = kzalloc(iv_len, GFP_KERNEL); - if (!iv) { - cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__); - rc = -ENOMEM; - goto free_sg; - } - if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE); @@ -4348,6 +4339,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE); } + aead_request_set_tfm(req, tfm); aead_request_set_crypt(req, sg, sg, crypt_len, iv); aead_request_set_ad(req, assoc_data_len); @@ -4360,11 +4352,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, if (!rc && enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); - kfree(iv); -free_sg: - kfree(sg); -free_req: - kfree(req); + kfree_sensitive(creq); return rc; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 24dd711fa9b95..9a80047bc9b7b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1075,13 +1075,13 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) pneg_inbuf->Dialects[0] = cpu_to_le16(server->vals->protocol_id); pneg_inbuf->DialectCount = cpu_to_le16(1); - /* structure is big enough for 3 dialects, sending only 1 */ + /* structure is big enough for 4 dialects, sending only 1 */ inbuflen = sizeof(*pneg_inbuf) - - sizeof(pneg_inbuf->Dialects[0]) * 2; + sizeof(pneg_inbuf->Dialects[0]) * 3; } rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, + FSCTL_VALIDATE_NEGOTIATE_INFO, (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, (char **)&pneg_rsp, &rsplen); if (rc == -EOPNOTSUPP) { @@ -2294,7 +2294,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) unsigned int acelen, acl_size, ace_count; unsigned int owner_offset = 0; unsigned int group_offset = 0; - struct smb3_acl acl; + struct smb3_acl acl = {}; *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); @@ -2367,6 +2367,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ acl.AclSize = cpu_to_le16(acl_size); acl.AceCount = cpu_to_le16(ace_count); + /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */ memcpy(aclptr, &acl, sizeof(struct smb3_acl)); buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); @@ -2922,7 +2923,7 @@ int SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + char *in_data, u32 indatalen, __u32 max_response_size) { struct smb2_ioctl_req *req; @@ -2997,10 +2998,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, req->sync_hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), SMB2_MAX_BUFFER_SIZE)); - if (is_fsctl) - req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); - else - req->Flags = 0; + /* always an FSCTL (for now) */ + req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) @@ -3027,9 +3026,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst) */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, - u64 volatile_fid, u32 opcode, bool is_fsctl, - char *in_data, u32 indatalen, u32 max_out_data_len, - char **out_data, u32 *plen /* returned data len */) + u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, + u32 max_out_data_len, char **out_data, + u32 *plen /* returned data len */) { struct smb_rqst rqst; struct smb2_ioctl_rsp *rsp = NULL; @@ -3071,7 +3070,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, rc = SMB2_ioctl_init(tcon, server, &rqst, persistent_fid, volatile_fid, opcode, - is_fsctl, in_data, indatalen, max_out_data_len); + in_data, indatalen, max_out_data_len); if (rc) goto ioctl_exit; @@ -3153,7 +3152,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, - FSCTL_SET_COMPRESSION, true /* is_fsctl */, + FSCTL_SET_COMPRESSION, (char *)&fsctl_input /* data input */, 2 /* in data len */, CIFSMaxBufSize /* max out data */, &ret_data /* out data */, NULL); @@ -3926,12 +3925,15 @@ smb2_readv_callback(struct mid_q_entry *mid) (struct smb2_sync_hdr *)rdata->iov[0].iov_base; struct cifs_credits credits = { .value = 0, .instance = 0 }; struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], - .rq_nvec = 1, - .rq_pages = rdata->pages, - .rq_offset = rdata->page_offset, - .rq_npages = rdata->nr_pages, - .rq_pagesz = rdata->pagesz, - .rq_tailsz = rdata->tailsz }; + .rq_nvec = 1, }; + + if (rdata->got_bytes) { + rqst.rq_pages = rdata->pages; + rqst.rq_offset = rdata->page_offset; + rqst.rq_npages = rdata->nr_pages; + rqst.rq_pagesz = rdata->pagesz; + rqst.rq_tailsz = rdata->tailsz; + } WARN_ONCE(rdata->server != mid->server, "rdata server %p != mid server %p", diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 4eb0ca84355a6..ed2b4fb012a41 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -155,13 +155,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, extern void SMB2_open_free(struct smb_rqst *rqst); extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen, + char *in_data, u32 indatalen, u32 maxoutlen, char **out_data, u32 *plen /* returned data len */); extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + char *in_data, u32 indatalen, __u32 max_response_size); extern void SMB2_ioctl_free(struct smb_rqst *rqst); extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index b029ed31ef916..f73f9b0625251 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -1394,6 +1394,7 @@ void smbd_destroy(struct TCP_Server_Info *server) destroy_workqueue(info->workqueue); log_rdma_event(INFO, "rdma session destroyed\n"); kfree(info); + server->smbd_conn = NULL; } /* diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 503a0056b60f2..b137006f0fd25 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -209,10 +209,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, *sent = 0; - smb_msg->msg_name = (struct sockaddr *) &server->dstaddr; - smb_msg->msg_namelen = sizeof(struct sockaddr); - smb_msg->msg_control = NULL; - smb_msg->msg_controllen = 0; if (server->noblocksnd) smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; else @@ -324,7 +320,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, sigset_t mask, oldmask; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; - struct msghdr smb_msg; + struct msghdr smb_msg = {}; __be32 rfc1002_marker; if (cifs_rdma_enabled(server)) { diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 5ad27e484014f..12388ed4faa59 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -317,6 +317,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry, return 0; out_remove: + configfs_put(dentry->d_fsdata); configfs_remove_dirent(dentry); return PTR_ERR(inode); } @@ -383,6 +384,7 @@ int configfs_create_link(struct configfs_dirent *target, struct dentry *parent, return 0; out_remove: + configfs_put(dentry->d_fsdata); configfs_remove_dirent(dentry); return PTR_ERR(inode); } diff --git a/fs/coredump.c b/fs/coredump.c index edbaf61125c9c..9d91e831ed0b2 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -523,7 +523,7 @@ static bool dump_interrupted(void) * but then we need to teach dump_write() to restart and clear * TIF_SIGPENDING. */ - return signal_pending(current); + return fatal_signal_pending(current) || freezing(current); } static void wait_for_dump_helpers(struct file *file) diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 052ad40ecdb28..b746d7df37582 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -220,7 +220,7 @@ struct fscrypt_info { * will be NULL if the master key was found in a process-subscribed * keyring rather than in the filesystem-level keyring. */ - struct key *ci_master_key; + struct fscrypt_master_key *ci_master_key; /* * Link in list of inodes that were unlocked with the master key. @@ -431,6 +431,40 @@ struct fscrypt_master_key_secret { */ struct fscrypt_master_key { + /* + * Back-pointer to the super_block of the filesystem to which this + * master key has been added. Only valid if ->mk_active_refs > 0. + */ + struct super_block *mk_sb; + + /* + * Link in ->mk_sb->s_master_keys->key_hashtable. + * Only valid if ->mk_active_refs > 0. + */ + struct hlist_node mk_node; + + /* Semaphore that protects ->mk_secret and ->mk_users */ + struct rw_semaphore mk_sem; + + /* + * Active and structural reference counts. An active ref guarantees + * that the struct continues to exist, continues to be in the keyring + * ->mk_sb->s_master_keys, and that any embedded subkeys (e.g. + * ->mk_direct_keys) that have been prepared continue to exist. + * A structural ref only guarantees that the struct continues to exist. + * + * There is one active ref associated with ->mk_secret being present, + * and one active ref for each inode in ->mk_decrypted_inodes. + * + * There is one structural ref associated with the active refcount being + * nonzero. Finding a key in the keyring also takes a structural ref, + * which is then held temporarily while the key is operated on. + */ + refcount_t mk_active_refs; + refcount_t mk_struct_refs; + + struct rcu_head mk_rcu_head; + /* * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is * executed, this is wiped and no new inodes can be unlocked with this @@ -439,16 +473,12 @@ struct fscrypt_master_key { * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again. * - * Locking: protected by key->sem (outer) and mk_secret_sem (inner). - * The reason for two locks is that key->sem also protects modifying - * mk_users, which ranks it above the semaphore for the keyring key - * type, which is in turn above page faults (via keyring_read). But - * sometimes filesystems call fscrypt_get_encryption_info() from within - * a transaction, which ranks it below page faults. So we need a - * separate lock which protects mk_secret but not also mk_users. + * While ->mk_secret is present, one ref in ->mk_active_refs is held. + * + * Locking: protected by ->mk_sem. The manipulation of ->mk_active_refs + * associated with this field is protected by ->mk_sem as well. */ struct fscrypt_master_key_secret mk_secret; - struct rw_semaphore mk_secret_sem; /* * For v1 policy keys: an arbitrary key descriptor which was assigned by @@ -467,22 +497,12 @@ struct fscrypt_master_key { * * This is NULL for v1 policy keys; those can only be added by root. * - * Locking: in addition to this keyrings own semaphore, this is - * protected by the master key's key->sem, so we can do atomic - * search+insert. It can also be searched without taking any locks, but - * in that case the returned key may have already been removed. + * Locking: protected by ->mk_sem. (We don't just rely on the keyrings + * subsystem semaphore ->mk_users->sem, as we need support for atomic + * search+insert along with proper synchronization with ->mk_secret.) */ struct key *mk_users; - /* - * Length of ->mk_decrypted_inodes, plus one if mk_secret is present. - * Once this goes to 0, the master key is removed from ->s_master_keys. - * The 'struct fscrypt_master_key' will continue to live as long as the - * 'struct key' whose payload it is, but we won't let this reference - * count rise again. - */ - refcount_t mk_refcount; - /* * List of inodes that were unlocked using this key. This allows the * inodes to be evicted efficiently if the key is removed. @@ -508,11 +528,11 @@ static inline bool is_master_key_secret_present(const struct fscrypt_master_key_secret *secret) { /* - * The READ_ONCE() is only necessary for fscrypt_drop_inode() and - * fscrypt_key_describe(). These run in atomic context, so they can't - * take ->mk_secret_sem and thus 'secret' can change concurrently which - * would be a data race. But they only need to know whether the secret - * *was* present at the time of check, so READ_ONCE() suffices. + * The READ_ONCE() is only necessary for fscrypt_drop_inode(). + * fscrypt_drop_inode() runs in atomic context, so it can't take the key + * semaphore and thus 'secret' can change concurrently which would be a + * data race. But fscrypt_drop_inode() only need to know whether the + * secret *was* present at the time of check, so READ_ONCE() suffices. */ return READ_ONCE(secret->size) != 0; } @@ -540,7 +560,11 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec) return 0; } -struct key * +void fscrypt_put_master_key(struct fscrypt_master_key *mk); + +void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk); + +struct fscrypt_master_key * fscrypt_find_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec); diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index 4180371bf8642..8268206ef21e5 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -5,8 +5,6 @@ * Encryption hooks for higher-level filesystem operations. */ -#include - #include "fscrypt_private.h" /** @@ -154,13 +152,13 @@ int fscrypt_prepare_setflags(struct inode *inode, ci = inode->i_crypt_info; if (ci->ci_policy.version != FSCRYPT_POLICY_V2) return -EINVAL; - mk = ci->ci_master_key->payload.data[0]; - down_read(&mk->mk_secret_sem); + mk = ci->ci_master_key; + down_read(&mk->mk_sem); if (is_master_key_secret_present(&mk->mk_secret)) err = fscrypt_derive_dirhash_key(ci, mk); else err = -ENOKEY; - up_read(&mk->mk_secret_sem); + up_read(&mk->mk_sem); return err; } return 0; diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index d7ec52cb3d9af..02f8bf8bd54da 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -18,6 +18,7 @@ * information about these ioctls. */ +#include #include #include #include @@ -25,6 +26,18 @@ #include "fscrypt_private.h" +/* The master encryption keys for a filesystem (->s_master_keys) */ +struct fscrypt_keyring { + /* + * Lock that protects ->key_hashtable. It does *not* protect the + * fscrypt_master_key structs themselves. + */ + spinlock_t lock; + + /* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */ + struct hlist_head key_hashtable[128]; +}; + static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret) { fscrypt_destroy_hkdf(&secret->hkdf); @@ -38,20 +51,70 @@ static void move_master_key_secret(struct fscrypt_master_key_secret *dst, memzero_explicit(src, sizeof(*src)); } -static void free_master_key(struct fscrypt_master_key *mk) +static void fscrypt_free_master_key(struct rcu_head *head) +{ + struct fscrypt_master_key *mk = + container_of(head, struct fscrypt_master_key, mk_rcu_head); + /* + * The master key secret and any embedded subkeys should have already + * been wiped when the last active reference to the fscrypt_master_key + * struct was dropped; doing it here would be unnecessarily late. + * Nevertheless, use kfree_sensitive() in case anything was missed. + */ + kfree_sensitive(mk); +} + +void fscrypt_put_master_key(struct fscrypt_master_key *mk) +{ + if (!refcount_dec_and_test(&mk->mk_struct_refs)) + return; + /* + * No structural references left, so free ->mk_users, and also free the + * fscrypt_master_key struct itself after an RCU grace period ensures + * that concurrent keyring lookups can no longer find it. + */ + WARN_ON(refcount_read(&mk->mk_active_refs) != 0); + key_put(mk->mk_users); + mk->mk_users = NULL; + call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key); +} + +void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk) { + struct super_block *sb = mk->mk_sb; + struct fscrypt_keyring *keyring = sb->s_master_keys; size_t i; - wipe_master_key_secret(&mk->mk_secret); + if (!refcount_dec_and_test(&mk->mk_active_refs)) + return; + /* + * No active references left, so complete the full removal of this + * fscrypt_master_key struct by removing it from the keyring and + * destroying any subkeys embedded in it. + */ + + spin_lock(&keyring->lock); + hlist_del_rcu(&mk->mk_node); + spin_unlock(&keyring->lock); + + /* + * ->mk_active_refs == 0 implies that ->mk_secret is not present and + * that ->mk_decrypted_inodes is empty. + */ + WARN_ON(is_master_key_secret_present(&mk->mk_secret)); + WARN_ON(!list_empty(&mk->mk_decrypted_inodes)); for (i = 0; i <= FSCRYPT_MODE_MAX; i++) { fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]); } + memzero_explicit(&mk->mk_ino_hash_key, + sizeof(mk->mk_ino_hash_key)); + mk->mk_ino_hash_key_initialized = false; - key_put(mk->mk_users); - kfree_sensitive(mk); + /* Drop the structural ref associated with the active refs. */ + fscrypt_put_master_key(mk); } static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) @@ -61,44 +124,6 @@ static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) return master_key_spec_len(spec) != 0; } -static int fscrypt_key_instantiate(struct key *key, - struct key_preparsed_payload *prep) -{ - key->payload.data[0] = (struct fscrypt_master_key *)prep->data; - return 0; -} - -static void fscrypt_key_destroy(struct key *key) -{ - free_master_key(key->payload.data[0]); -} - -static void fscrypt_key_describe(const struct key *key, struct seq_file *m) -{ - seq_puts(m, key->description); - - if (key_is_positive(key)) { - const struct fscrypt_master_key *mk = key->payload.data[0]; - - if (!is_master_key_secret_present(&mk->mk_secret)) - seq_puts(m, ": secret removed"); - } -} - -/* - * Type of key in ->s_master_keys. Each key of this type represents a master - * key which has been added to the filesystem. Its payload is a - * 'struct fscrypt_master_key'. The "." prefix in the key type name prevents - * users from adding keys of this type via the keyrings syscalls rather than via - * the intended method of FS_IOC_ADD_ENCRYPTION_KEY. - */ -static struct key_type key_type_fscrypt = { - .name = "._fscrypt", - .instantiate = fscrypt_key_instantiate, - .destroy = fscrypt_key_destroy, - .describe = fscrypt_key_describe, -}; - static int fscrypt_user_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { @@ -131,32 +156,6 @@ static struct key_type key_type_fscrypt_user = { .describe = fscrypt_user_key_describe, }; -/* Search ->s_master_keys or ->mk_users */ -static struct key *search_fscrypt_keyring(struct key *keyring, - struct key_type *type, - const char *description) -{ - /* - * We need to mark the keyring reference as "possessed" so that we - * acquire permission to search it, via the KEY_POS_SEARCH permission. - */ - key_ref_t keyref = make_key_ref(keyring, true /* possessed */); - - keyref = keyring_search(keyref, type, description, false); - if (IS_ERR(keyref)) { - if (PTR_ERR(keyref) == -EAGAIN || /* not found */ - PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ - keyref = ERR_PTR(-ENOKEY); - return ERR_CAST(keyref); - } - return key_ref_to_ptr(keyref); -} - -#define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \ - (CONST_STRLEN("fscrypt-") + sizeof_field(struct super_block, s_id)) - -#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1) - #define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \ (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \ CONST_STRLEN("-users") + 1) @@ -164,21 +163,6 @@ static struct key *search_fscrypt_keyring(struct key *keyring, #define FSCRYPT_MK_USER_DESCRIPTION_SIZE \ (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1) -static void format_fs_keyring_description( - char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE], - const struct super_block *sb) -{ - sprintf(description, "fscrypt-%s", sb->s_id); -} - -static void format_mk_description( - char description[FSCRYPT_MK_DESCRIPTION_SIZE], - const struct fscrypt_key_specifier *mk_spec) -{ - sprintf(description, "%*phN", - master_key_spec_len(mk_spec), (u8 *)&mk_spec->u); -} - static void format_mk_users_keyring_description( char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE], const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) @@ -199,20 +183,15 @@ static void format_mk_user_description( /* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */ static int allocate_filesystem_keyring(struct super_block *sb) { - char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE]; - struct key *keyring; + struct fscrypt_keyring *keyring; if (sb->s_master_keys) return 0; - format_fs_keyring_description(description, sb); - keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - current_cred(), KEY_POS_SEARCH | - KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW, - KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); - if (IS_ERR(keyring)) - return PTR_ERR(keyring); - + keyring = kzalloc(sizeof(*keyring), GFP_KERNEL); + if (!keyring) + return -ENOMEM; + spin_lock_init(&keyring->lock); /* * Pairs with the smp_load_acquire() in fscrypt_find_master_key(). * I.e., here we publish ->s_master_keys with a RELEASE barrier so that @@ -222,21 +201,80 @@ static int allocate_filesystem_keyring(struct super_block *sb) return 0; } -void fscrypt_sb_free(struct super_block *sb) +/* + * Release all encryption keys that have been added to the filesystem, along + * with the keyring that contains them. + * + * This is called at unmount time. The filesystem's underlying block device(s) + * are still available at this time; this is important because after user file + * accesses have been allowed, this function may need to evict keys from the + * keyslots of an inline crypto engine, which requires the block device(s). + * + * This is also called when the super_block is being freed. This is needed to + * avoid a memory leak if mounting fails after the "test_dummy_encryption" + * option was processed, as in that case the unmount-time call isn't made. + */ +void fscrypt_destroy_keyring(struct super_block *sb) { - key_put(sb->s_master_keys); + struct fscrypt_keyring *keyring = sb->s_master_keys; + size_t i; + + if (!keyring) + return; + + for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) { + struct hlist_head *bucket = &keyring->key_hashtable[i]; + struct fscrypt_master_key *mk; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) { + /* + * Since all inodes were already evicted, every key + * remaining in the keyring should have an empty inode + * list, and should only still be in the keyring due to + * the single active ref associated with ->mk_secret. + * There should be no structural refs beyond the one + * associated with the active ref. + */ + WARN_ON(refcount_read(&mk->mk_active_refs) != 1); + WARN_ON(refcount_read(&mk->mk_struct_refs) != 1); + WARN_ON(!is_master_key_secret_present(&mk->mk_secret)); + wipe_master_key_secret(&mk->mk_secret); + fscrypt_put_master_key_activeref(mk); + } + } + kfree_sensitive(keyring); sb->s_master_keys = NULL; } +static struct hlist_head * +fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring, + const struct fscrypt_key_specifier *mk_spec) +{ + /* + * Since key specifiers should be "random" values, it is sufficient to + * use a trivial hash function that just takes the first several bits of + * the key specifier. + */ + unsigned long i = get_unaligned((unsigned long *)&mk_spec->u); + + return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)]; +} + /* - * Find the specified master key in ->s_master_keys. - * Returns ERR_PTR(-ENOKEY) if not found. + * Find the specified master key struct in ->s_master_keys and take a structural + * ref to it. The structural ref guarantees that the key struct continues to + * exist, but it does *not* guarantee that ->s_master_keys continues to contain + * the key struct. The structural ref needs to be dropped by + * fscrypt_put_master_key(). Returns NULL if the key struct is not found. */ -struct key *fscrypt_find_master_key(struct super_block *sb, - const struct fscrypt_key_specifier *mk_spec) +struct fscrypt_master_key * +fscrypt_find_master_key(struct super_block *sb, + const struct fscrypt_key_specifier *mk_spec) { - struct key *keyring; - char description[FSCRYPT_MK_DESCRIPTION_SIZE]; + struct fscrypt_keyring *keyring; + struct hlist_head *bucket; + struct fscrypt_master_key *mk; /* * Pairs with the smp_store_release() in allocate_filesystem_keyring(). @@ -246,10 +284,38 @@ struct key *fscrypt_find_master_key(struct super_block *sb, */ keyring = smp_load_acquire(&sb->s_master_keys); if (keyring == NULL) - return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */ - - format_mk_description(description, mk_spec); - return search_fscrypt_keyring(keyring, &key_type_fscrypt, description); + return NULL; /* No keyring yet, so no keys yet. */ + + bucket = fscrypt_mk_hash_bucket(keyring, mk_spec); + rcu_read_lock(); + switch (mk_spec->type) { + case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: + hlist_for_each_entry_rcu(mk, bucket, mk_node) { + if (mk->mk_spec.type == + FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + memcmp(mk->mk_spec.u.descriptor, + mk_spec->u.descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && + refcount_inc_not_zero(&mk->mk_struct_refs)) + goto out; + } + break; + case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: + hlist_for_each_entry_rcu(mk, bucket, mk_node) { + if (mk->mk_spec.type == + FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && + memcmp(mk->mk_spec.u.identifier, + mk_spec->u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 && + refcount_inc_not_zero(&mk->mk_struct_refs)) + goto out; + } + break; + } + mk = NULL; +out: + rcu_read_unlock(); + return mk; } static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) @@ -277,17 +343,30 @@ static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) static struct key *find_master_key_user(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; + key_ref_t keyref; format_mk_user_description(description, mk->mk_spec.u.identifier); - return search_fscrypt_keyring(mk->mk_users, &key_type_fscrypt_user, - description); + + /* + * We need to mark the keyring reference as "possessed" so that we + * acquire permission to search it, via the KEY_POS_SEARCH permission. + */ + keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/), + &key_type_fscrypt_user, description, false); + if (IS_ERR(keyref)) { + if (PTR_ERR(keyref) == -EAGAIN || /* not found */ + PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ + keyref = ERR_PTR(-ENOKEY); + return ERR_CAST(keyref); + } + return key_ref_to_ptr(keyref); } /* * Give the current user a "key" in ->mk_users. This charges the user's quota * and marks the master key as added by the current user, so that it cannot be - * removed by another user with the key. Either the master key's key->sem must - * be held for write, or the master key must be still undergoing initialization. + * removed by another user with the key. Either ->mk_sem must be held for + * write, or the master key must be still undergoing initialization. */ static int add_master_key_user(struct fscrypt_master_key *mk) { @@ -309,7 +388,7 @@ static int add_master_key_user(struct fscrypt_master_key *mk) /* * Remove the current user's "key" from ->mk_users. - * The master key's key->sem must be held for write. + * ->mk_sem must be held for write. * * Returns 0 if removed, -ENOKEY if not found, or another -errno code. */ @@ -327,64 +406,49 @@ static int remove_master_key_user(struct fscrypt_master_key *mk) } /* - * Allocate a new fscrypt_master_key which contains the given secret, set it as - * the payload of a new 'struct key' of type fscrypt, and link the 'struct key' - * into the given keyring. Synchronized by fscrypt_add_key_mutex. + * Allocate a new fscrypt_master_key, transfer the given secret over to it, and + * insert it into sb->s_master_keys. */ -static int add_new_master_key(struct fscrypt_master_key_secret *secret, - const struct fscrypt_key_specifier *mk_spec, - struct key *keyring) +static int add_new_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + const struct fscrypt_key_specifier *mk_spec) { + struct fscrypt_keyring *keyring = sb->s_master_keys; struct fscrypt_master_key *mk; - char description[FSCRYPT_MK_DESCRIPTION_SIZE]; - struct key *key; int err; mk = kzalloc(sizeof(*mk), GFP_KERNEL); if (!mk) return -ENOMEM; + mk->mk_sb = sb; + init_rwsem(&mk->mk_sem); + refcount_set(&mk->mk_struct_refs, 1); mk->mk_spec = *mk_spec; - move_master_key_secret(&mk->mk_secret, secret); - init_rwsem(&mk->mk_secret_sem); - - refcount_set(&mk->mk_refcount, 1); /* secret is present */ INIT_LIST_HEAD(&mk->mk_decrypted_inodes); spin_lock_init(&mk->mk_decrypted_inodes_lock); if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { err = allocate_master_key_users_keyring(mk); if (err) - goto out_free_mk; + goto out_put; err = add_master_key_user(mk); if (err) - goto out_free_mk; + goto out_put; } - /* - * Note that we don't charge this key to anyone's quota, since when - * ->mk_users is in use those keys are charged instead, and otherwise - * (when ->mk_users isn't in use) only root can add these keys. - */ - format_mk_description(description, mk_spec); - key = key_alloc(&key_type_fscrypt, description, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), - KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_VIEW, - KEY_ALLOC_NOT_IN_QUOTA, NULL); - if (IS_ERR(key)) { - err = PTR_ERR(key); - goto out_free_mk; - } - err = key_instantiate_and_link(key, mk, sizeof(*mk), keyring, NULL); - key_put(key); - if (err) - goto out_free_mk; + move_master_key_secret(&mk->mk_secret, secret); + refcount_set(&mk->mk_active_refs, 1); /* ->mk_secret is present */ + spin_lock(&keyring->lock); + hlist_add_head_rcu(&mk->mk_node, + fscrypt_mk_hash_bucket(keyring, mk_spec)); + spin_unlock(&keyring->lock); return 0; -out_free_mk: - free_master_key(mk); +out_put: + fscrypt_put_master_key(mk); return err; } @@ -393,45 +457,34 @@ static int add_new_master_key(struct fscrypt_master_key_secret *secret, static int add_existing_master_key(struct fscrypt_master_key *mk, struct fscrypt_master_key_secret *secret) { - struct key *mk_user; - bool rekey; int err; /* * If the current user is already in ->mk_users, then there's nothing to - * do. (Not applicable for v1 policy keys, which have NULL ->mk_users.) + * do. Otherwise, we need to add the user to ->mk_users. (Neither is + * applicable for v1 policy keys, which have NULL ->mk_users.) */ if (mk->mk_users) { - mk_user = find_master_key_user(mk); + struct key *mk_user = find_master_key_user(mk); + if (mk_user != ERR_PTR(-ENOKEY)) { if (IS_ERR(mk_user)) return PTR_ERR(mk_user); key_put(mk_user); return 0; } - } - - /* If we'll be re-adding ->mk_secret, try to take the reference. */ - rekey = !is_master_key_secret_present(&mk->mk_secret); - if (rekey && !refcount_inc_not_zero(&mk->mk_refcount)) - return KEY_DEAD; - - /* Add the current user to ->mk_users, if applicable. */ - if (mk->mk_users) { err = add_master_key_user(mk); - if (err) { - if (rekey && refcount_dec_and_test(&mk->mk_refcount)) - return KEY_DEAD; + if (err) return err; - } } /* Re-add the secret if needed. */ - if (rekey) { - down_write(&mk->mk_secret_sem); + if (!is_master_key_secret_present(&mk->mk_secret)) { + if (!refcount_inc_not_zero(&mk->mk_active_refs)) + return KEY_DEAD; move_master_key_secret(&mk->mk_secret, secret); - up_write(&mk->mk_secret_sem); } + return 0; } @@ -440,38 +493,36 @@ static int do_add_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec) { static DEFINE_MUTEX(fscrypt_add_key_mutex); - struct key *key; + struct fscrypt_master_key *mk; int err; mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */ -retry: - key = fscrypt_find_master_key(sb, mk_spec); - if (IS_ERR(key)) { - err = PTR_ERR(key); - if (err != -ENOKEY) - goto out_unlock; + + mk = fscrypt_find_master_key(sb, mk_spec); + if (!mk) { /* Didn't find the key in ->s_master_keys. Add it. */ err = allocate_filesystem_keyring(sb); - if (err) - goto out_unlock; - err = add_new_master_key(secret, mk_spec, sb->s_master_keys); + if (!err) + err = add_new_master_key(sb, secret, mk_spec); } else { /* * Found the key in ->s_master_keys. Re-add the secret if * needed, and add the user to ->mk_users if needed. */ - down_write(&key->sem); - err = add_existing_master_key(key->payload.data[0], secret); - up_write(&key->sem); + down_write(&mk->mk_sem); + err = add_existing_master_key(mk, secret); + up_write(&mk->mk_sem); if (err == KEY_DEAD) { - /* Key being removed or needs to be removed */ - key_invalidate(key); - key_put(key); - goto retry; + /* + * We found a key struct, but it's already been fully + * removed. Ignore the old struct and add a new one. + * fscrypt_add_key_mutex means we don't need to worry + * about concurrent adds. + */ + err = add_new_master_key(sb, secret, mk_spec); } - key_put(key); + fscrypt_put_master_key(mk); } -out_unlock: mutex_unlock(&fscrypt_add_key_mutex); return err; } @@ -735,19 +786,19 @@ int fscrypt_verify_key_added(struct super_block *sb, const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { struct fscrypt_key_specifier mk_spec; - struct key *key, *mk_user; struct fscrypt_master_key *mk; + struct key *mk_user; int err; mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); - key = fscrypt_find_master_key(sb, &mk_spec); - if (IS_ERR(key)) { - err = PTR_ERR(key); + mk = fscrypt_find_master_key(sb, &mk_spec); + if (!mk) { + err = -ENOKEY; goto out; } - mk = key->payload.data[0]; + down_read(&mk->mk_sem); mk_user = find_master_key_user(mk); if (IS_ERR(mk_user)) { err = PTR_ERR(mk_user); @@ -755,7 +806,8 @@ int fscrypt_verify_key_added(struct super_block *sb, key_put(mk_user); err = 0; } - key_put(key); + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); out: if (err == -ENOKEY && capable(CAP_FOWNER)) err = 0; @@ -917,11 +969,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_remove_key_arg __user *uarg = _uarg; struct fscrypt_remove_key_arg arg; - struct key *key; struct fscrypt_master_key *mk; u32 status_flags = 0; int err; - bool dead; + bool inodes_remain; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; @@ -941,12 +992,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) return -EACCES; /* Find the key being removed. */ - key = fscrypt_find_master_key(sb, &arg.key_spec); - if (IS_ERR(key)) - return PTR_ERR(key); - mk = key->payload.data[0]; - - down_write(&key->sem); + mk = fscrypt_find_master_key(sb, &arg.key_spec); + if (!mk) + return -ENOKEY; + down_write(&mk->mk_sem); /* If relevant, remove current user's (or all users) claim to the key */ if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) { @@ -955,7 +1004,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) else err = remove_master_key_user(mk); if (err) { - up_write(&key->sem); + up_write(&mk->mk_sem); goto out_put_key; } if (mk->mk_users->keys.nr_leaves_on_tree != 0) { @@ -967,28 +1016,22 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) status_flags |= FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS; err = 0; - up_write(&key->sem); + up_write(&mk->mk_sem); goto out_put_key; } } /* No user claims remaining. Go ahead and wipe the secret. */ - dead = false; + err = -ENOKEY; if (is_master_key_secret_present(&mk->mk_secret)) { - down_write(&mk->mk_secret_sem); wipe_master_key_secret(&mk->mk_secret); - dead = refcount_dec_and_test(&mk->mk_refcount); - up_write(&mk->mk_secret_sem); - } - up_write(&key->sem); - if (dead) { - /* - * No inodes reference the key, and we wiped the secret, so the - * key object is free to be removed from the keyring. - */ - key_invalidate(key); + fscrypt_put_master_key_activeref(mk); err = 0; - } else { + } + inodes_remain = refcount_read(&mk->mk_active_refs) > 0; + up_write(&mk->mk_sem); + + if (inodes_remain) { /* Some inodes still reference this key; try to evict them. */ err = try_to_lock_encrypted_files(sb, mk); if (err == -EBUSY) { @@ -1004,7 +1047,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) * has been fully removed including all files locked. */ out_put_key: - key_put(key); + fscrypt_put_master_key(mk); if (err == 0) err = put_user(status_flags, &uarg->removal_status_flags); return err; @@ -1051,7 +1094,6 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_get_key_status_arg arg; - struct key *key; struct fscrypt_master_key *mk; int err; @@ -1068,19 +1110,18 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) arg.user_count = 0; memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved)); - key = fscrypt_find_master_key(sb, &arg.key_spec); - if (IS_ERR(key)) { - if (key != ERR_PTR(-ENOKEY)) - return PTR_ERR(key); + mk = fscrypt_find_master_key(sb, &arg.key_spec); + if (!mk) { arg.status = FSCRYPT_KEY_STATUS_ABSENT; err = 0; goto out; } - mk = key->payload.data[0]; - down_read(&key->sem); + down_read(&mk->mk_sem); if (!is_master_key_secret_present(&mk->mk_secret)) { - arg.status = FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED; + arg.status = refcount_read(&mk->mk_active_refs) > 0 ? + FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED : + FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */; err = 0; goto out_release_key; } @@ -1102,8 +1143,8 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) } err = 0; out_release_key: - up_read(&key->sem); - key_put(key); + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); out: if (!err && copy_to_user(uarg, &arg, sizeof(arg))) err = -EFAULT; @@ -1115,13 +1156,9 @@ int __init fscrypt_init_keyring(void) { int err; - err = register_key_type(&key_type_fscrypt); - if (err) - return err; - err = register_key_type(&key_type_fscrypt_user); if (err) - goto err_unregister_fscrypt; + return err; err = register_key_type(&key_type_fscrypt_provisioning); if (err) @@ -1131,7 +1168,5 @@ int __init fscrypt_init_keyring(void) err_unregister_fscrypt_user: unregister_key_type(&key_type_fscrypt_user); -err_unregister_fscrypt: - unregister_key_type(&key_type_fscrypt); return err; } diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 73d96e35d9ae4..7b14054fab494 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -9,7 +9,6 @@ */ #include -#include #include #include "fscrypt_private.h" @@ -151,6 +150,7 @@ void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) { crypto_free_skcipher(prep_key->tfm); fscrypt_destroy_inline_crypt_key(prep_key); + memzero_explicit(prep_key, sizeof(*prep_key)); } /* Given a per-file encryption key, set up the file's crypto transform object */ @@ -404,20 +404,18 @@ static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk, /* * Find the master key, then set up the inode's actual encryption key. * - * If the master key is found in the filesystem-level keyring, then the - * corresponding 'struct key' is returned in *master_key_ret with - * ->mk_secret_sem read-locked. This is needed to ensure that only one task - * links the fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race - * to create an fscrypt_info for the same inode), and to synchronize the master - * key being removed with a new inode starting to use it. + * If the master key is found in the filesystem-level keyring, then it is + * returned in *mk_ret with its semaphore read-locked. This is needed to ensure + * that only one task links the fscrypt_info into ->mk_decrypted_inodes (as + * multiple tasks may race to create an fscrypt_info for the same inode), and to + * synchronize the master key being removed with a new inode starting to use it. */ static int setup_file_encryption_key(struct fscrypt_info *ci, bool need_dirhash_key, - struct key **master_key_ret) + struct fscrypt_master_key **mk_ret) { - struct key *key; - struct fscrypt_master_key *mk = NULL; struct fscrypt_key_specifier mk_spec; + struct fscrypt_master_key *mk; int err; err = fscrypt_select_encryption_impl(ci); @@ -442,11 +440,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, return -EINVAL; } - key = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec); - if (IS_ERR(key)) { - if (key != ERR_PTR(-ENOKEY) || - ci->ci_policy.version != FSCRYPT_POLICY_V1) - return PTR_ERR(key); + mk = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec); + if (!mk) { + if (ci->ci_policy.version != FSCRYPT_POLICY_V1) + return -ENOKEY; /* * As a legacy fallback for v1 policies, search for the key in @@ -456,9 +453,7 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, */ return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci); } - - mk = key->payload.data[0]; - down_read(&mk->mk_secret_sem); + down_read(&mk->mk_sem); /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */ if (!is_master_key_secret_present(&mk->mk_secret)) { @@ -486,18 +481,18 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, if (err) goto out_release_key; - *master_key_ret = key; + *mk_ret = mk; return 0; out_release_key: - up_read(&mk->mk_secret_sem); - key_put(key); + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); return err; } static void put_crypt_info(struct fscrypt_info *ci) { - struct key *key; + struct fscrypt_master_key *mk; if (!ci) return; @@ -507,24 +502,18 @@ static void put_crypt_info(struct fscrypt_info *ci) else if (ci->ci_owns_key) fscrypt_destroy_prepared_key(&ci->ci_enc_key); - key = ci->ci_master_key; - if (key) { - struct fscrypt_master_key *mk = key->payload.data[0]; - + mk = ci->ci_master_key; + if (mk) { /* * Remove this inode from the list of inodes that were unlocked - * with the master key. - * - * In addition, if we're removing the last inode from a key that - * already had its secret removed, invalidate the key so that it - * gets removed from ->s_master_keys. + * with the master key. In addition, if we're removing the last + * inode from a master key struct that already had its secret + * removed, then complete the full removal of the struct. */ spin_lock(&mk->mk_decrypted_inodes_lock); list_del(&ci->ci_master_key_link); spin_unlock(&mk->mk_decrypted_inodes_lock); - if (refcount_dec_and_test(&mk->mk_refcount)) - key_invalidate(key); - key_put(key); + fscrypt_put_master_key_activeref(mk); } memzero_explicit(ci, sizeof(*ci)); kmem_cache_free(fscrypt_info_cachep, ci); @@ -538,7 +527,7 @@ fscrypt_setup_encryption_info(struct inode *inode, { struct fscrypt_info *crypt_info; struct fscrypt_mode *mode; - struct key *master_key = NULL; + struct fscrypt_master_key *mk = NULL; int res; res = fscrypt_initialize(inode->i_sb->s_cop->flags); @@ -561,8 +550,7 @@ fscrypt_setup_encryption_info(struct inode *inode, WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE); crypt_info->ci_mode = mode; - res = setup_file_encryption_key(crypt_info, need_dirhash_key, - &master_key); + res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk); if (res) goto out; @@ -577,12 +565,9 @@ fscrypt_setup_encryption_info(struct inode *inode, * We won the race and set ->i_crypt_info to our crypt_info. * Now link it into the master key's inode list. */ - if (master_key) { - struct fscrypt_master_key *mk = - master_key->payload.data[0]; - - refcount_inc(&mk->mk_refcount); - crypt_info->ci_master_key = key_get(master_key); + if (mk) { + crypt_info->ci_master_key = mk; + refcount_inc(&mk->mk_active_refs); spin_lock(&mk->mk_decrypted_inodes_lock); list_add(&crypt_info->ci_master_key_link, &mk->mk_decrypted_inodes); @@ -592,11 +577,9 @@ fscrypt_setup_encryption_info(struct inode *inode, } res = 0; out: - if (master_key) { - struct fscrypt_master_key *mk = master_key->payload.data[0]; - - up_read(&mk->mk_secret_sem); - key_put(master_key); + if (mk) { + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); } put_crypt_info(crypt_info); return res; @@ -747,7 +730,6 @@ EXPORT_SYMBOL(fscrypt_free_inode); int fscrypt_drop_inode(struct inode *inode) { const struct fscrypt_info *ci = fscrypt_get_info(inode); - const struct fscrypt_master_key *mk; /* * If ci is NULL, then the inode doesn't have an encryption key set up @@ -757,7 +739,6 @@ int fscrypt_drop_inode(struct inode *inode) */ if (!ci || !ci->ci_master_key) return 0; - mk = ci->ci_master_key->payload.data[0]; /* * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes @@ -769,13 +750,13 @@ int fscrypt_drop_inode(struct inode *inode) return 0; /* - * Note: since we aren't holding ->mk_secret_sem, the result here can + * Note: since we aren't holding the key semaphore, the result here can * immediately become outdated. But there's no correctness problem with * unnecessarily evicting. Nor is there a correctness problem with not * evicting while iput() is racing with the key being removed, since * then the thread removing the key will either evict the inode itself * or will correctly detect that it wasn't evicted due to the race. */ - return !is_master_key_secret_present(&mk->mk_secret); + return !is_master_key_secret_present(&ci->ci_master_key->mk_secret); } EXPORT_SYMBOL_GPL(fscrypt_drop_inode); diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index faa0f21daa684..f68265c36377e 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -686,12 +686,8 @@ int fscrypt_set_context(struct inode *inode, void *fs_data) * delayed key setup that requires the inode number. */ if (ci->ci_policy.version == FSCRYPT_POLICY_V2 && - (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) { - const struct fscrypt_master_key *mk = - ci->ci_master_key->payload.data[0]; - - fscrypt_hash_inode_number(ci, mk); - } + (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) + fscrypt_hash_inode_number(ci, ci->ci_master_key); return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, fs_data); } diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 96059af28f508..42bab9270e7d6 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -378,8 +378,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, } EXPORT_SYMBOL_GPL(debugfs_attr_read); -ssize_t debugfs_attr_write(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) +static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf, + size_t len, loff_t *ppos, bool is_signed) { struct dentry *dentry = F_DENTRY(file); ssize_t ret; @@ -387,12 +387,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf, ret = debugfs_file_get(dentry); if (unlikely(ret)) return ret; - ret = simple_attr_write(file, buf, len, ppos); + if (is_signed) + ret = simple_attr_write_signed(file, buf, len, ppos); + else + ret = simple_attr_write(file, buf, len, ppos); debugfs_file_put(dentry); return ret; } + +ssize_t debugfs_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return debugfs_attr_write_xsigned(file, buf, len, ppos, false); +} EXPORT_SYMBOL_GPL(debugfs_attr_write); +ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return debugfs_attr_write_xsigned(file, buf, len, ppos, true); +} +EXPORT_SYMBOL_GPL(debugfs_attr_write_signed); + static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode, struct dentry *parent, void *value, const struct file_operations *fops, @@ -748,11 +764,11 @@ static int debugfs_atomic_t_get(void *data, u64 *val) *val = atomic_read((atomic_t *)data); return 0; } -DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get, debugfs_atomic_t_set, "%lld\n"); -DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, "%lld\n"); -DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, "%lld\n"); /** diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 848e0aaa8da5d..f47f0a7d2c3b9 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -730,6 +730,28 @@ void debugfs_remove(struct dentry *dentry) } EXPORT_SYMBOL_GPL(debugfs_remove); +/** + * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it + * @name: a pointer to a string containing the name of the item to look up. + * @parent: a pointer to the parent dentry of the item. + * + * This is the equlivant of doing something like + * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting + * handled for the directory being looked up. + */ +void debugfs_lookup_and_remove(const char *name, struct dentry *parent) +{ + struct dentry *dentry; + + dentry = debugfs_lookup(name, parent); + if (!dentry) + return; + + debugfs_remove(dentry); + dput(dentry); +} +EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove); + /** * debugfs_rename - rename a file/directory in the debugfs filesystem * @old_dir: a pointer to the parent dentry for the renamed object. This diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 283c7b94eddad..ca06069e95c8c 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -198,13 +198,13 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, if (!prev_seq) { kref_get(&lkb->lkb_ref); + mutex_lock(&ls->ls_cb_mutex); if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { - mutex_lock(&ls->ls_cb_mutex); list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); - mutex_unlock(&ls->ls_cb_mutex); } else { queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); } + mutex_unlock(&ls->ls_cb_mutex); } out: mutex_unlock(&lkb->lkb_cb_mutex); @@ -284,7 +284,9 @@ void dlm_callback_stop(struct dlm_ls *ls) void dlm_callback_suspend(struct dlm_ls *ls) { + mutex_lock(&ls->ls_cb_mutex); set_bit(LSFL_CB_DELAY, &ls->ls_flags); + mutex_unlock(&ls->ls_cb_mutex); if (ls->ls_callback_wq) flush_workqueue(ls->ls_callback_wq); diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index eaa28d654e9f0..dde9afb6747ba 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -2888,24 +2888,24 @@ static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args) static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_args *args) { - int rv = -EINVAL; + int rv = -EBUSY; if (args->flags & DLM_LKF_CONVERT) { - if (lkb->lkb_flags & DLM_IFL_MSTCPY) + if (lkb->lkb_status != DLM_LKSTS_GRANTED) goto out; - if (args->flags & DLM_LKF_QUECVT && - !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) + if (lkb->lkb_wait_type) goto out; - rv = -EBUSY; - if (lkb->lkb_status != DLM_LKSTS_GRANTED) + if (is_overlap(lkb)) goto out; - if (lkb->lkb_wait_type) + rv = -EINVAL; + if (lkb->lkb_flags & DLM_IFL_MSTCPY) goto out; - if (is_overlap(lkb)) + if (args->flags & DLM_LKF_QUECVT && + !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) goto out; } diff --git a/fs/eventfd.c b/fs/eventfd.c index df466ef81dddf..4a14295cffe0d 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -45,21 +45,7 @@ struct eventfd_ctx { int id; }; -/** - * eventfd_signal - Adds @n to the eventfd counter. - * @ctx: [in] Pointer to the eventfd context. - * @n: [in] Value of the counter to be added to the eventfd internal counter. - * The value cannot be negative. - * - * This function is supposed to be called by the kernel in paths that do not - * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX - * value, and we signal this as overflow condition by returning a EPOLLERR - * to poll(2). - * - * Returns the amount by which the counter was incremented. This will be less - * than @n if the counter has overflowed. - */ -__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) +__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask) { unsigned long flags; @@ -80,12 +66,31 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) n = ULLONG_MAX - ctx->count; ctx->count += n; if (waitqueue_active(&ctx->wqh)) - wake_up_locked_poll(&ctx->wqh, EPOLLIN); + wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); this_cpu_dec(eventfd_wake_count); spin_unlock_irqrestore(&ctx->wqh.lock, flags); return n; } + +/** + * eventfd_signal - Adds @n to the eventfd counter. + * @ctx: [in] Pointer to the eventfd context. + * @n: [in] Value of the counter to be added to the eventfd internal counter. + * The value cannot be negative. + * + * This function is supposed to be called by the kernel in paths that do not + * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX + * value, and we signal this as overflow condition by returning a EPOLLERR + * to poll(2). + * + * Returns the amount by which the counter was incremented. This will be less + * than @n if the counter has overflowed. + */ +__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) +{ + return eventfd_signal_mask(ctx, n, 0); +} EXPORT_SYMBOL_GPL(eventfd_signal); static void eventfd_free_ctx(struct eventfd_ctx *ctx) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 2f1f053157090..13d4c3d504126 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -548,7 +548,8 @@ static int ep_call_nested(struct nested_calls *ncalls, */ #ifdef CONFIG_DEBUG_LOCK_ALLOC -static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) +static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi, + unsigned pollflags) { struct eventpoll *ep_src; unsigned long flags; @@ -579,16 +580,17 @@ static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) } spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests); ep->nests = nests + 1; - wake_up_locked_poll(&ep->poll_wait, EPOLLIN); + wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags); ep->nests = 0; spin_unlock_irqrestore(&ep->poll_wait.lock, flags); } #else -static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) +static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi, + unsigned pollflags) { - wake_up_poll(&ep->poll_wait, EPOLLIN); + wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags); } #endif @@ -815,7 +817,7 @@ static void ep_free(struct eventpoll *ep) /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); /* * We need to lock this because we could be hit by @@ -1284,7 +1286,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, epi); + ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE); if (!(epi->event.events & EPOLLEXCLUSIVE)) ewake = 1; @@ -1589,7 +1591,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); return 0; @@ -1692,7 +1694,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); return 0; } diff --git a/fs/exec.c b/fs/exec.c index b56bc4b4016e9..983295c0b8acf 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1198,11 +1198,11 @@ static int unshare_sighand(struct task_struct *me) return -ENOMEM; refcount_set(&newsighand->count, 1); - memcpy(newsighand->action, oldsighand->action, - sizeof(newsighand->action)); write_lock_irq(&tasklist_lock); spin_lock(&oldsighand->siglock); + memcpy(newsighand->action, oldsighand->action, + sizeof(newsighand->action)); rcu_assign_pointer(me->sighand, newsighand); spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4ad1c3ce9398a..81dc61f1c557f 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -553,7 +553,7 @@ enum { * * It's not paranoia if the Murphy's Law really *is* out to get you. :-) */ -#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG)) +#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1U << EXT4_INODE_##FLAG)) #define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG)) static inline void ext4_check_flag_values(void) @@ -2842,7 +2842,8 @@ int do_journal_get_write_access(handle_t *handle, typedef enum { EXT4_IGET_NORMAL = 0, EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */ - EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */ + EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */ + EXT4_IGET_BAD = 0x0004 /* Allow to iget a bad inode */ } ext4_iget_flags; extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, @@ -3485,8 +3486,8 @@ extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode, extern int ext4_ci_compare(const struct inode *parent, const struct qstr *fname, const struct qstr *entry, bool quick); -extern int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name, - struct inode *inode); +extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name, + struct inode *inode, struct dentry *dentry); extern int __ext4_link(struct inode *dir, struct inode *inode, struct dentry *dentry); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0f49bf547b848..bf0872bb34f69 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -459,6 +459,10 @@ static int __ext4_ext_check(const char *function, unsigned int line, error_msg = "invalid eh_entries"; goto corrupted; } + if (unlikely((eh->eh_entries == 0) && (depth > 0))) { + error_msg = "eh_entries is 0 but eh_depth is > 0"; + goto corrupted; + } if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { error_msg = "invalid extent entries"; goto corrupted; @@ -5178,6 +5182,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, * and it is decreased till we reach start. */ again: + ret = 0; if (SHIFT == SHIFT_LEFT) iterator = &start; else @@ -5221,14 +5226,21 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, ext4_ext_get_actual_len(extent); } else { extent = EXT_FIRST_EXTENT(path[depth].p_hdr); - if (le32_to_cpu(extent->ee_block) > 0) + if (le32_to_cpu(extent->ee_block) > start) *iterator = le32_to_cpu(extent->ee_block) - 1; - else - /* Beginning is reached, end of the loop */ + else if (le32_to_cpu(extent->ee_block) == start) iterator = NULL; - /* Update path extent in case we need to stop */ - while (le32_to_cpu(extent->ee_block) < start) + else { + extent = EXT_LAST_EXTENT(path[depth].p_hdr); + while (le32_to_cpu(extent->ee_block) >= start) + extent--; + + if (extent == EXT_LAST_EXTENT(path[depth].p_hdr)) + break; + extent++; + iterator = NULL; + } path[depth].p_ext = extent; } ret = ext4_ext_shift_path_extents(path, shift, inode, @@ -5790,6 +5802,14 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) struct ext4_extent *extent; ext4_lblk_t first_lblk, first_lclu, last_lclu; + /* + * if data can be stored inline, the logical cluster isn't + * mapped - no physical clusters have been allocated, and the + * file has no extents + */ + if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) + return 0; + /* search for the extent closest to the first block in the cluster */ path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); if (IS_ERR(path)) { diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 9a3a8996aacf7..aa99a3659edfc 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -1372,7 +1372,7 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, if (count_reserved) count_rsvd(inode, lblk, orig_es.es_len - len1 - len2, &orig_es, &rc); - goto out; + goto out_get_reserved; } if (len1 > 0) { @@ -1414,6 +1414,7 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, } } +out_get_reserved: if (count_reserved) *reserved = get_rsvd(inode, end, es, &rc); out: diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 501e60713010e..be768ef1fd168 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -66,7 +66,7 @@ * Fast Commit Ineligibility * ------------------------- * Not all operations are supported by fast commits today (e.g extended - * attributes). Fast commit ineligiblity is marked by calling one of the + * attributes). Fast commit ineligibility is marked by calling one of the * two following functions: * * - ext4_fc_mark_ineligible(): This makes next fast commit operation to fall @@ -371,25 +371,33 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update) struct __track_dentry_update_args *dentry_update = (struct __track_dentry_update_args *)arg; struct dentry *dentry = dentry_update->dentry; - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct inode *dir = dentry->d_parent->d_inode; + struct super_block *sb = inode->i_sb; + struct ext4_sb_info *sbi = EXT4_SB(sb); mutex_unlock(&ei->i_fc_lock); + + if (IS_ENCRYPTED(dir)) { + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME); + mutex_lock(&ei->i_fc_lock); + return -EOPNOTSUPP; + } + node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS); if (!node) { - ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM); + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM); mutex_lock(&ei->i_fc_lock); return -ENOMEM; } node->fcd_op = dentry_update->op; - node->fcd_parent = dentry->d_parent->d_inode->i_ino; + node->fcd_parent = dir->i_ino; node->fcd_ino = inode->i_ino; if (dentry->d_name.len > DNAME_INLINE_LEN) { node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS); if (!node->fcd_name.name) { kmem_cache_free(ext4_fc_dentry_cachep, node); - ext4_fc_mark_ineligible(inode->i_sb, - EXT4_FC_REASON_NOMEM); + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM); mutex_lock(&ei->i_fc_lock); return -ENOMEM; } @@ -628,6 +636,9 @@ static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc) *crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl)); if (pad_len > 0) ext4_fc_memzero(sb, tl + 1, pad_len, crc); + /* Don't leak uninitialized memory in the unused last byte. */ + *((u8 *)(tl + 1) + pad_len) = 0; + ext4_fc_submit_bh(sb); ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); @@ -684,6 +695,8 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc) dst += sizeof(tail.fc_tid); tail.fc_crc = cpu_to_le32(crc); ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL); + dst += sizeof(tail.fc_crc); + memset(dst, 0, bsize - off); /* Don't leak uninitialized memory. */ ext4_fc_submit_bh(sb); @@ -766,22 +779,25 @@ static int ext4_fc_write_inode(struct inode *inode, u32 *crc) tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_INODE); tl.fc_len = cpu_to_le16(inode_len + sizeof(fc_inode.fc_ino)); + ret = -ECANCELED; dst = ext4_fc_reserve_space(inode->i_sb, sizeof(tl) + inode_len + sizeof(fc_inode.fc_ino), crc); if (!dst) - return -ECANCELED; + goto err; if (!ext4_fc_memcpy(inode->i_sb, dst, &tl, sizeof(tl), crc)) - return -ECANCELED; + goto err; dst += sizeof(tl); if (!ext4_fc_memcpy(inode->i_sb, dst, &fc_inode, sizeof(fc_inode), crc)) - return -ECANCELED; + goto err; dst += sizeof(fc_inode); if (!ext4_fc_memcpy(inode->i_sb, dst, (u8 *)ext4_raw_inode(&iloc), inode_len, crc)) - return -ECANCELED; - - return 0; + goto err; + ret = 0; +err: + brelse(iloc.bh); + return ret; } /* @@ -1284,7 +1300,7 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl, return 0; } - ret = __ext4_unlink(NULL, old_parent, &entry, inode); + ret = __ext4_unlink(old_parent, &entry, inode, NULL); /* -ENOENT ok coz it might not exist anymore. */ if (ret == -ENOENT) ret = 0; @@ -1388,13 +1404,15 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino) if (state->fc_modified_inodes[i] == ino) return 0; if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) { - state->fc_modified_inodes = krealloc( - state->fc_modified_inodes, + int *fc_modified_inodes; + + fc_modified_inodes = krealloc(state->fc_modified_inodes, sizeof(int) * (state->fc_modified_inodes_size + EXT4_FC_REPLAY_REALLOC_INCREMENT), GFP_KERNEL); - if (!state->fc_modified_inodes) + if (!fc_modified_inodes) return -ENOMEM; + state->fc_modified_inodes = fc_modified_inodes; state->fc_modified_inodes_size += EXT4_FC_REPLAY_REALLOC_INCREMENT; } @@ -1579,15 +1597,18 @@ int ext4_fc_record_regions(struct super_block *sb, int ino, if (replay && state->fc_regions_used != state->fc_regions_valid) state->fc_regions_used = state->fc_regions_valid; if (state->fc_regions_used == state->fc_regions_size) { + struct ext4_fc_alloc_region *fc_regions; + + fc_regions = krealloc(state->fc_regions, + sizeof(struct ext4_fc_alloc_region) * + (state->fc_regions_size + + EXT4_FC_REPLAY_REALLOC_INCREMENT), + GFP_KERNEL); + if (!fc_regions) + return -ENOMEM; state->fc_regions_size += EXT4_FC_REPLAY_REALLOC_INCREMENT; - state->fc_regions = krealloc( - state->fc_regions, - state->fc_regions_size * - sizeof(struct ext4_fc_alloc_region), - GFP_KERNEL); - if (!state->fc_regions) - return -ENOMEM; + state->fc_regions = fc_regions; } region = &state->fc_regions[state->fc_regions_used++]; region->ino = ino; @@ -2129,17 +2150,17 @@ void ext4_fc_init(struct super_block *sb, journal_t *journal) journal->j_fc_cleanup_callback = ext4_fc_cleanup; } -static const char *fc_ineligible_reasons[] = { - "Extended attributes changed", - "Cross rename", - "Journal flag changed", - "Insufficient memory", - "Swap boot", - "Resize", - "Dir renamed", - "Falloc range op", - "Data journalling", - "FC Commit Failed" +static const char * const fc_ineligible_reasons[] = { + [EXT4_FC_REASON_XATTR] = "Extended attributes changed", + [EXT4_FC_REASON_CROSS_RENAME] = "Cross rename", + [EXT4_FC_REASON_JOURNAL_FLAG_CHANGE] = "Journal flag changed", + [EXT4_FC_REASON_NOMEM] = "Insufficient memory", + [EXT4_FC_REASON_SWAP_BOOT] = "Swap boot", + [EXT4_FC_REASON_RESIZE] = "Resize", + [EXT4_FC_REASON_RENAME_DIR] = "Dir renamed", + [EXT4_FC_REASON_FALLOC_RANGE] = "Falloc range op", + [EXT4_FC_REASON_INODE_JOURNAL_DATA] = "Data journalling", + [EXT4_FC_REASON_ENCRYPTED_FILENAME] = "Encrypted filename", }; int ext4_fc_info_show(struct seq_file *seq, void *v) diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h index d8d0998a5c163..4a5f96a9c9d72 100644 --- a/fs/ext4/fast_commit.h +++ b/fs/ext4/fast_commit.h @@ -104,6 +104,7 @@ enum { EXT4_FC_REASON_FALLOC_RANGE, EXT4_FC_REASON_INODE_JOURNAL_DATA, EXT4_FC_COMMIT_FAILED, + EXT4_FC_REASON_ENCRYPTED_FILENAME, EXT4_FC_REASON_MAX }; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 7b28d44b0ddd1..0f61e0aa85d6f 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -529,6 +529,12 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) ret = -EAGAIN; goto out; } + /* + * Make sure inline data cannot be created anymore since we are going + * to allocate blocks for DIO. We know the inode does not have any + * inline data now because ext4_dio_supported() checked for that. + */ + ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); offset = iocb->ki_pos; count = ret; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 875af329c43ec..c53c9b1322049 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -508,7 +508,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, goto fallback; } - max_dirs = ndirs / ngroups + inodes_per_group / 16; + max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16; min_inodes = avefreei - inodes_per_group*flex_size / 4; if (min_inodes < 1) min_inodes = 1; diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 05efa682bc2f9..237983cd8cdc2 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -148,6 +148,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; + unsigned int key; int ret = -EIO; *err = 0; @@ -156,7 +157,13 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, if (!p->key) goto no_block; while (--depth) { - bh = sb_getblk(sb, le32_to_cpu(p->key)); + key = le32_to_cpu(p->key); + if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) { + /* the block was out of range */ + ret = -EFSCORRUPTED; + goto failure; + } + bh = sb_getblk(sb, key); if (unlikely(!bh)) { ret = -ENOMEM; goto failure; @@ -705,7 +712,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode, /* * Truncate transactions can be complex and absolutely huge. So we need to - * be able to restart the transaction at a conventient checkpoint to make + * be able to restart the transaction at a convenient checkpoint to make * sure we don't overflow the journal. * * Try to extend this transaction for the purposes of truncation. If diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 88bd1d1cca233..77377befbb1c6 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -799,7 +799,7 @@ ext4_journalled_write_inline_data(struct inode *inode, * clear the inode state safely. * 2. The inode has inline data, then we need to read the data, make it * update and dirty so that ext4_da_writepages can handle it. We don't - * need to start the journal since the file's metatdata isn't changed now. + * need to start the journal since the file's metadata isn't changed now. */ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping, struct inode *inode, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 44b6d061ed71c..355343cf4609b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -179,6 +179,8 @@ void ext4_evict_inode(struct inode *inode) trace_ext4_evict_inode(inode); + if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL) + ext4_evict_ea_inode(inode); if (inode->i_nlink) { /* * When journalling data dirty buffers are tracked only in the @@ -223,13 +225,13 @@ void ext4_evict_inode(struct inode *inode) /* * For inodes with journalled data, transaction commit could have - * dirtied the inode. Flush worker is ignoring it because of I_FREEING - * flag but we still need to remove the inode from the writeback lists. + * dirtied the inode. And for inodes with dioread_nolock, unwritten + * extents converting worker could merge extents and also have dirtied + * the inode. Flush worker is ignoring it because of I_FREEING flag but + * we still need to remove the inode from the writeback lists. */ - if (!list_empty_careful(&inode->i_io_list)) { - WARN_ON_ONCE(!ext4_should_journal_data(inode)); + if (!list_empty_careful(&inode->i_io_list)) inode_io_list_del(inode); - } /* * Protect us against freezing - iput() caller didn't have to have any @@ -336,6 +338,12 @@ void ext4_evict_inode(struct inode *inode) ext4_xattr_inode_array_free(ea_inode_array); return; no_delete: + /* + * Check out some where else accidentally dirty the evicting inode, + * which may probably cause inode use-after-free issues later. + */ + WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list)); + if (!list_empty(&EXT4_I(inode)->i_fc_list)) ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM); ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ @@ -1175,6 +1183,13 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; + /* + * The same as page allocation, we prealloc buffer heads before + * starting the handle. + */ + if (!page_has_buffers(page)) + create_empty_buffers(page, inode->i_sb->s_blocksize, 0); + unlock_page(page); retry_journal: @@ -3872,7 +3887,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, * starting from file offset 'from'. The range to be zero'd must * be contained with in one block. If the specified range exceeds * the end of the block it will be shortened to end of the block - * that cooresponds to 'from' + * that corresponds to 'from' */ static int ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) @@ -4278,7 +4293,8 @@ int ext4_truncate(struct inode *inode) /* If we zero-out tail of the page, we have to create jinode for jbd2 */ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { - if (ext4_inode_attach_jinode(inode) < 0) + err = ext4_inode_attach_jinode(inode); + if (err) goto out_trace; } @@ -4379,9 +4395,17 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; inode_offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)); - block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); + block = ext4_inode_table(sb, gdp); + if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) || + (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) { + ext4_error(sb, "Invalid inode table block %llu in " + "block_group %u", block, iloc->block_group); + return -EFSCORRUPTED; + } + block += (inode_offset / inodes_per_block); + bh = sb_getblk(sb, block); if (unlikely(!bh)) return -ENOMEM; @@ -4953,8 +4977,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) ext4_error_inode(inode, function, line, 0, "casefold flag without casefold feature"); - brelse(iloc.bh); + if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) { + ext4_error_inode(inode, function, line, 0, + "bad inode without EXT4_IGET_BAD flag"); + ret = -EUCLEAN; + goto bad_inode; + } + brelse(iloc.bh); unlock_new_inode(inode); return inode; @@ -5769,7 +5799,12 @@ int ext4_mark_iloc_dirty(handle_t *handle, } ext4_fc_track_inode(handle, inode); - if (IS_I_VERSION(inode)) + /* + * ea_inodes are using i_version for storing reference count, don't + * mess with it + */ + if (IS_I_VERSION(inode) && + !(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) inode_inc_iversion(inode); /* the do_update_inode consumes one bh->b_count */ @@ -5846,6 +5881,14 @@ static int __ext4_expand_extra_isize(struct inode *inode, return 0; } + /* + * We may need to allocate external xattr block so we need quotas + * initialized. Here we can be called with various locks held so we + * cannot affort to initialize quotas ourselves. So just bail. + */ + if (dquot_initialize_needed(inode)) + return -EAGAIN; + /* try to expand with EAs present */ error = ext4_expand_extra_isize_ea(inode, new_extra_isize, raw_inode, handle); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 413bf3d2f7844..240d792db9f78 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -121,7 +121,8 @@ static long swap_inode_boot_loader(struct super_block *sb, blkcnt_t blocks; unsigned short bytes; - inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL); + inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, + EXT4_IGET_SPECIAL | EXT4_IGET_BAD); if (IS_ERR(inode_bl)) return PTR_ERR(inode_bl); ei_bl = EXT4_I(inode_bl); @@ -170,7 +171,7 @@ static long swap_inode_boot_loader(struct super_block *sb, /* Protect extent tree against block allocations via delalloc */ ext4_double_down_write_data_sem(inode, inode_bl); - if (inode_bl->i_nlink == 0) { + if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) { /* this inode has never been used as a BOOT_LOADER */ set_nlink(inode_bl, 1); i_uid_write(inode_bl, 0); @@ -494,6 +495,10 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid) if (ext4_is_quota_file(inode)) return err; + err = dquot_initialize(inode); + if (err) + return err; + err = ext4_get_inode_loc(inode, &iloc); if (err) return err; @@ -509,10 +514,6 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid) brelse(iloc.bh); } - err = dquot_initialize(inode); - if (err) - return err; - handle = ext4_journal_start(inode, EXT4_HT_QUOTA, EXT4_QUOTA_INIT_BLOCKS(sb) + EXT4_QUOTA_DEL_BLOCKS(sb) + 3); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c32d0895c3a3d..d5ca02a7766e0 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4959,6 +4959,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ext4_fsblk_t block = 0; unsigned int inquota = 0; unsigned int reserv_clstrs = 0; + int retries = 0; u64 seq; might_sleep(); @@ -5061,7 +5062,8 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ar->len = ac->ac_b_ex.fe_len; } } else { - if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) + if (++retries < 3 && + ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) goto repeat; /* * If block allocation fails then the pa allocated above diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index e75b4749aa1c2..7be6288e48ec2 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h @@ -59,7 +59,7 @@ * by the stream allocator, which purpose is to pack requests * as close each to other as possible to produce smooth I/O traffic * We use locality group prealloc space for stream request. - * We can tune the same via /proc/fs/ext4//stream_req + * We can tune the same via /proc/fs/ext4//stream_req */ #define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */ diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 04320715d61f1..b0ea646454ac8 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -32,7 +32,7 @@ static int finish_range(handle_t *handle, struct inode *inode, newext.ee_block = cpu_to_le32(lb->first_block); newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1); ext4_ext_store_pblock(&newext, lb->first_pblock); - /* Locking only for convinience since we are operating on temp inode */ + /* Locking only for convenience since we are operating on temp inode */ down_write(&EXT4_I(inode)->i_data_sem); path = ext4_find_extent(inode, lb->first_block, NULL, 0); if (IS_ERR(path)) { @@ -43,8 +43,8 @@ static int finish_range(handle_t *handle, struct inode *inode, /* * Calculate the credit needed to inserting this extent - * Since we are doing this in loop we may accumalate extra - * credit. But below we try to not accumalate too much + * Since we are doing this in loop we may accumulate extra + * credit. But below we try to not accumulate too much * of them by restarting the journal. */ needed = ext4_ext_calc_credits_for_single_extent(inode, @@ -425,7 +425,8 @@ int ext4_ext_migrate(struct inode *inode) * already is extent-based, error out. */ if (!ext4_has_feature_extents(inode->i_sb) || - (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) + ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || + ext4_has_inline_data(inode)) return -EINVAL; if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 58b0f1b12095b..7ec7c9c16a39e 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -125,7 +125,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode, struct ext4_dir_entry *dirent; int is_dx_block = 0; - if (block >= inode->i_size) { + if (block >= inode->i_size >> inode->i_blkbits) { ext4_error_inode(inode, func, line, block, "Attempting to read directory block (%u) that is past i_size (%llu)", block, inode->i_size); @@ -995,7 +995,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash, * If the hash is 1, then continue only if the next page has a * continuation hash of any value. This is used for readdir * handling. Otherwise, check to see if the hash matches the - * desired contiuation hash. If it doesn't, return since + * desired continuation hash. If it doesn't, return since * there's no point to read in the successive index pages. */ bhash = dx_get_hash(p->at); @@ -2153,8 +2153,16 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, memcpy(data2, de, len); de = (struct ext4_dir_entry_2 *) data2; top = data2 + len; - while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) + while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) { + if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len, + (data2 + (blocksize - csum_size) - + (char *) de))) { + brelse(bh2); + brelse(bh); + return -EFSCORRUPTED; + } de = de2; + } de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - (char *) de, blocksize); @@ -3236,14 +3244,20 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) return retval; } -int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name, - struct inode *inode) +int __ext4_unlink(struct inode *dir, const struct qstr *d_name, + struct inode *inode, + struct dentry *dentry /* NULL during fast_commit recovery */) { int retval = -ENOENT; struct buffer_head *bh; struct ext4_dir_entry_2 *de; + handle_t *handle; int skip_remove_dentry = 0; + /* + * Keep this outside the transaction; it may have to set up the + * directory's encryption key, which isn't GFP_NOFS-safe. + */ bh = ext4_find_entry(dir, d_name, &de, NULL); if (IS_ERR(bh)) return PTR_ERR(bh); @@ -3260,7 +3274,14 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) skip_remove_dentry = 1; else - goto out; + goto out_bh; + } + + handle = ext4_journal_start(dir, EXT4_HT_DIR, + EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); + if (IS_ERR(handle)) { + retval = PTR_ERR(handle); + goto out_bh; } if (IS_DIRSYNC(dir)) @@ -3269,12 +3290,12 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name if (!skip_remove_dentry) { retval = ext4_delete_entry(handle, dir, de, bh); if (retval) - goto out; + goto out_handle; dir->i_ctime = dir->i_mtime = current_time(dir); ext4_update_dx_flag(dir); retval = ext4_mark_inode_dirty(handle, dir); if (retval) - goto out; + goto out_handle; } else { retval = 0; } @@ -3287,15 +3308,17 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name ext4_orphan_add(handle, inode); inode->i_ctime = current_time(inode); retval = ext4_mark_inode_dirty(handle, inode); - -out: + if (dentry && !retval) + ext4_fc_track_unlink(handle, dentry); +out_handle: + ext4_journal_stop(handle); +out_bh: brelse(bh); return retval; } static int ext4_unlink(struct inode *dir, struct dentry *dentry) { - handle_t *handle; int retval; if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) @@ -3313,16 +3336,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (retval) goto out_trace; - handle = ext4_journal_start(dir, EXT4_HT_DIR, - EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); - if (IS_ERR(handle)) { - retval = PTR_ERR(handle); - goto out_trace; - } - - retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry)); - if (!retval) - ext4_fc_track_unlink(handle, dentry); + retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry), dentry); #ifdef CONFIG_UNICODE /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid @@ -3333,8 +3347,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (IS_CASEFOLDED(dir)) d_invalidate(dentry); #endif - if (handle) - ext4_journal_stop(handle); out_trace: trace_ext4_unlink_exit(dentry, retval); @@ -3834,6 +3846,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, return -EXDEV; retval = dquot_initialize(old.dir); + if (retval) + return retval; + retval = dquot_initialize(old.inode); if (retval) return retval; retval = dquot_initialize(new.dir); diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index f6409ddfd1172..51cebc1990eb1 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1545,8 +1545,8 @@ static int ext4_flex_group_add(struct super_block *sb, int meta_bg = ext4_has_feature_meta_bg(sb); sector_t old_gdb = 0; - update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, - sizeof(struct ext4_super_block), 0); + update_backups(sb, ext4_group_first_block_no(sb, 0), + (char *)es, sizeof(struct ext4_super_block), 0); for (; gdb_num <= gdb_num_end; gdb_num++) { struct buffer_head *gdb_bh; @@ -1753,7 +1753,7 @@ static int ext4_group_extend_no_check(struct super_block *sb, if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extended group to %llu " "blocks\n", ext4_blocks_count(es)); - update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, + update_backups(sb, ext4_group_first_block_no(sb, 0), (char *)es, sizeof(struct ext4_super_block), 0); } return err; @@ -2068,7 +2068,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) goto out; } - if (ext4_blocks_count(es) == n_blocks_count) + if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0) goto out; err = ext4_alloc_flex_bg_array(sb, n_group + 1); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index a0af833f7da70..e940fb07ef2e9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -188,19 +188,12 @@ int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io) int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait) { - if (trylock_buffer(bh)) { - if (wait) - return ext4_read_bh(bh, op_flags, NULL); + lock_buffer(bh); + if (!wait) { ext4_read_bh_nowait(bh, op_flags, NULL); return 0; } - if (wait) { - wait_on_buffer(bh); - if (buffer_uptodate(bh)) - return 0; - return -EIO; - } - return 0; + return ext4_read_bh(bh, op_flags, NULL); } /* @@ -247,7 +240,8 @@ void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) struct buffer_head *bh = sb_getblk_gfp(sb, block, 0); if (likely(bh)) { - ext4_read_bh_lock(bh, REQ_RAHEAD, false); + if (trylock_buffer(bh)) + ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL); brelse(bh); } } @@ -423,104 +417,6 @@ static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi) #define ext4_get_tstamp(es, tstamp) \ __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi) -static void __save_error_info(struct super_block *sb, int error, - __u32 ino, __u64 block, - const char *func, unsigned int line) -{ - struct ext4_super_block *es = EXT4_SB(sb)->s_es; - int err; - - EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; - if (bdev_read_only(sb->s_bdev)) - return; - es->s_state |= cpu_to_le16(EXT4_ERROR_FS); - ext4_update_tstamp(es, s_last_error_time); - strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); - es->s_last_error_line = cpu_to_le32(line); - es->s_last_error_ino = cpu_to_le32(ino); - es->s_last_error_block = cpu_to_le64(block); - switch (error) { - case EIO: - err = EXT4_ERR_EIO; - break; - case ENOMEM: - err = EXT4_ERR_ENOMEM; - break; - case EFSBADCRC: - err = EXT4_ERR_EFSBADCRC; - break; - case 0: - case EFSCORRUPTED: - err = EXT4_ERR_EFSCORRUPTED; - break; - case ENOSPC: - err = EXT4_ERR_ENOSPC; - break; - case ENOKEY: - err = EXT4_ERR_ENOKEY; - break; - case EROFS: - err = EXT4_ERR_EROFS; - break; - case EFBIG: - err = EXT4_ERR_EFBIG; - break; - case EEXIST: - err = EXT4_ERR_EEXIST; - break; - case ERANGE: - err = EXT4_ERR_ERANGE; - break; - case EOVERFLOW: - err = EXT4_ERR_EOVERFLOW; - break; - case EBUSY: - err = EXT4_ERR_EBUSY; - break; - case ENOTDIR: - err = EXT4_ERR_ENOTDIR; - break; - case ENOTEMPTY: - err = EXT4_ERR_ENOTEMPTY; - break; - case ESHUTDOWN: - err = EXT4_ERR_ESHUTDOWN; - break; - case EFAULT: - err = EXT4_ERR_EFAULT; - break; - default: - err = EXT4_ERR_UNKNOWN; - } - es->s_last_error_errcode = err; - if (!es->s_first_error_time) { - es->s_first_error_time = es->s_last_error_time; - es->s_first_error_time_hi = es->s_last_error_time_hi; - strncpy(es->s_first_error_func, func, - sizeof(es->s_first_error_func)); - es->s_first_error_line = cpu_to_le32(line); - es->s_first_error_ino = es->s_last_error_ino; - es->s_first_error_block = es->s_last_error_block; - es->s_first_error_errcode = es->s_last_error_errcode; - } - /* - * Start the daily error reporting function if it hasn't been - * started already - */ - if (!es->s_error_count) - mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ); - le32_add_cpu(&es->s_error_count, 1); -} - -static void save_error_info(struct super_block *sb, int error, - __u32 ino, __u64 block, - const char *func, unsigned int line) -{ - __save_error_info(sb, error, ino, block, func, line); - if (!bdev_read_only(sb->s_bdev)) - ext4_commit_super(sb, 1); -} - /* * The del_gendisk() function uninitializes the disk-specific data * structures, including the bdi structure, without telling anyone @@ -649,6 +545,89 @@ static bool system_going_down(void) || system_state == SYSTEM_RESTART; } +struct ext4_err_translation { + int code; + int errno; +}; + +#define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err } + +static struct ext4_err_translation err_translation[] = { + EXT4_ERR_TRANSLATE(EIO), + EXT4_ERR_TRANSLATE(ENOMEM), + EXT4_ERR_TRANSLATE(EFSBADCRC), + EXT4_ERR_TRANSLATE(EFSCORRUPTED), + EXT4_ERR_TRANSLATE(ENOSPC), + EXT4_ERR_TRANSLATE(ENOKEY), + EXT4_ERR_TRANSLATE(EROFS), + EXT4_ERR_TRANSLATE(EFBIG), + EXT4_ERR_TRANSLATE(EEXIST), + EXT4_ERR_TRANSLATE(ERANGE), + EXT4_ERR_TRANSLATE(EOVERFLOW), + EXT4_ERR_TRANSLATE(EBUSY), + EXT4_ERR_TRANSLATE(ENOTDIR), + EXT4_ERR_TRANSLATE(ENOTEMPTY), + EXT4_ERR_TRANSLATE(ESHUTDOWN), + EXT4_ERR_TRANSLATE(EFAULT), +}; + +static int ext4_errno_to_code(int errno) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(err_translation); i++) + if (err_translation[i].errno == errno) + return err_translation[i].code; + return EXT4_ERR_UNKNOWN; +} + +static void __save_error_info(struct super_block *sb, int error, + __u32 ino, __u64 block, + const char *func, unsigned int line) +{ + struct ext4_super_block *es = EXT4_SB(sb)->s_es; + + EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; + if (bdev_read_only(sb->s_bdev)) + return; + /* We default to EFSCORRUPTED error... */ + if (error == 0) + error = EFSCORRUPTED; + es->s_state |= cpu_to_le16(EXT4_ERROR_FS); + ext4_update_tstamp(es, s_last_error_time); + strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); + es->s_last_error_line = cpu_to_le32(line); + es->s_last_error_ino = cpu_to_le32(ino); + es->s_last_error_block = cpu_to_le64(block); + es->s_last_error_errcode = ext4_errno_to_code(error); + if (!es->s_first_error_time) { + es->s_first_error_time = es->s_last_error_time; + es->s_first_error_time_hi = es->s_last_error_time_hi; + strncpy(es->s_first_error_func, func, + sizeof(es->s_first_error_func)); + es->s_first_error_line = cpu_to_le32(line); + es->s_first_error_ino = es->s_last_error_ino; + es->s_first_error_block = es->s_last_error_block; + es->s_first_error_errcode = es->s_last_error_errcode; + } + /* + * Start the daily error reporting function if it hasn't been + * started already + */ + if (!es->s_error_count) + mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ); + le32_add_cpu(&es->s_error_count, 1); +} + +static void save_error_info(struct super_block *sb, int error, + __u32 ino, __u64 block, + const char *func, unsigned int line) +{ + __save_error_info(sb, error, ino, block, func, line); + if (!bdev_read_only(sb->s_bdev)) + ext4_commit_super(sb, 1); +} + /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * @@ -1300,6 +1279,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) return NULL; inode_set_iversion(&ei->vfs_inode, 1); + ei->i_flags = 0; spin_lock_init(&ei->i_raw_lock); INIT_LIST_HEAD(&ei->i_prealloc_list); atomic_set(&ei->i_prealloc_active, 0); @@ -3550,6 +3530,7 @@ static int ext4_lazyinit_thread(void *arg) unsigned long next_wakeup, cur; BUG_ON(NULL == eli); + set_freezable(); cont_thread: while (true) { @@ -4814,30 +4795,31 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_has_feature_journal_needs_recovery(sb)) { ext4_msg(sb, KERN_ERR, "required journal recovery " "suppressed and not mounted read-only"); - goto failed_mount_wq; + goto failed_mount3a; } else { /* Nojournal mode, all journal mount options are illegal */ - if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { - ext4_msg(sb, KERN_ERR, "can't mount with " - "journal_checksum, fs mounted w/o journal"); - goto failed_mount_wq; - } if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with " "journal_async_commit, fs mounted w/o journal"); - goto failed_mount_wq; + goto failed_mount3a; + } + + if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "journal_checksum, fs mounted w/o journal"); + goto failed_mount3a; } if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { ext4_msg(sb, KERN_ERR, "can't mount with " "commit=%lu, fs mounted w/o journal", sbi->s_commit_interval / HZ); - goto failed_mount_wq; + goto failed_mount3a; } if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { ext4_msg(sb, KERN_ERR, "can't mount with " "data=, fs mounted w/o journal"); - goto failed_mount_wq; + goto failed_mount3a; } sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); @@ -5281,7 +5263,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb, jbd_debug(2, "Journal inode found at %p: %lld bytes\n", journal_inode, journal_inode->i_size); - if (!S_ISREG(journal_inode->i_mode)) { + if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) { ext4_msg(sb, KERN_ERR, "invalid journal inode"); iput(journal_inode); return NULL; @@ -6273,7 +6255,7 @@ static int ext4_write_info(struct super_block *sb, int type) handle_t *handle; /* Data block + inode block */ - handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2); + handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); @@ -6390,6 +6372,20 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, return err; } +static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum) +{ + switch (type) { + case USRQUOTA: + return qf_inum == EXT4_USR_QUOTA_INO; + case GRPQUOTA: + return qf_inum == EXT4_GRP_QUOTA_INO; + case PRJQUOTA: + return qf_inum >= EXT4_GOOD_OLD_FIRST_INO; + default: + BUG(); + } +} + static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags) { @@ -6406,9 +6402,16 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id, if (!qf_inums[type]) return -EPERM; + if (!ext4_check_quota_inum(type, qf_inums[type])) { + ext4_error(sb, "Bad quota inum: %lu, type: %d", + qf_inums[type], type); + return -EUCLEAN; + } + qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL); if (IS_ERR(qf_inode)) { - ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]); + ext4_error(sb, "Bad quota inode: %lu, type: %d", + qf_inums[type], type); return PTR_ERR(qf_inode); } @@ -6447,8 +6450,9 @@ static int ext4_enable_quotas(struct super_block *sb) if (err) { ext4_warning(sb, "Failed to enable quota tracking " - "(type=%d, err=%d). Please run " - "e2fsck to fix.", type, err); + "(type=%d, err=%d, ino=%lu). " + "Please run e2fsck to fix.", type, + err, qf_inums[type]); for (type--; type >= 0; type--) { struct inode *inode; diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index f24bef3be48a3..ce74cde6d8faa 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -487,6 +487,11 @@ static void ext4_sb_release(struct kobject *kobj) complete(&sbi->s_kobj_unregister); } +static void ext4_feat_release(struct kobject *kobj) +{ + kfree(kobj); +} + static const struct sysfs_ops ext4_attr_ops = { .show = ext4_attr_show, .store = ext4_attr_store, @@ -501,7 +506,7 @@ static struct kobj_type ext4_sb_ktype = { static struct kobj_type ext4_feat_ktype = { .default_groups = ext4_feat_groups, .sysfs_ops = &ext4_attr_ops, - .release = (void (*)(struct kobject *))kfree, + .release = ext4_feat_release, }; static struct kobject *ext4_root; diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index 00e3cbde472e4..e3019f920222f 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -79,8 +79,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count, size_t n = min_t(size_t, count, PAGE_SIZE - offset_in_page(pos)); struct page *page; - void *fsdata; - void *addr; + void *fsdata = NULL; int res; res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0, @@ -88,9 +87,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count, if (res) return res; - addr = kmap_atomic(page); - memcpy(addr + offset_in_page(pos), buf, n); - kunmap_atomic(addr); + memcpy_to_page(page, offset_in_page(pos), buf, n); res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n, page, fsdata); @@ -370,13 +367,14 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode, pgoff_t index, unsigned long num_ra_pages) { - DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); struct page *page; index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT; page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED); if (!page || !PageUptodate(page)) { + DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); + if (page) put_page(page); else if (num_ra_pages > 1) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 38531c5e16c60..6bf1c62eff04a 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -436,6 +436,21 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, return err; } +/* Remove entry from mbcache when EA inode is getting evicted */ +void ext4_evict_ea_inode(struct inode *inode) +{ + struct mb_cache_entry *oe; + + if (!EA_INODE_CACHE(inode)) + return; + /* Wait for entry to get unused so that we can remove it */ + while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode), + ext4_xattr_inode_get_hash(inode), inode->i_ino))) { + mb_cache_entry_wait_unused(oe); + mb_cache_entry_put(EA_INODE_CACHE(inode), oe); + } +} + static int ext4_xattr_inode_verify_hashes(struct inode *ea_inode, struct ext4_xattr_entry *entry, void *buffer, @@ -972,10 +987,8 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode, static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, int ref_change) { - struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode); struct ext4_iloc iloc; s64 ref_count; - u32 hash; int ret; inode_lock(ea_inode); @@ -998,14 +1011,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, set_nlink(ea_inode, 1); ext4_orphan_del(handle, ea_inode); - - if (ea_inode_cache) { - hash = ext4_xattr_inode_get_hash(ea_inode); - mb_cache_entry_create(ea_inode_cache, - GFP_NOFS, hash, - ea_inode->i_ino, - true /* reusable */); - } } } else { WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", @@ -1018,12 +1023,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, clear_nlink(ea_inode); ext4_orphan_add(handle, ea_inode); - - if (ea_inode_cache) { - hash = ext4_xattr_inode_get_hash(ea_inode); - mb_cache_entry_delete(ea_inode_cache, hash, - ea_inode->i_ino); - } } } @@ -1231,6 +1230,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, if (error) goto out; +retry_ref: lock_buffer(bh); hash = le32_to_cpu(BHDR(bh)->h_hash); ref = le32_to_cpu(BHDR(bh)->h_refcount); @@ -1240,9 +1240,18 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect freed block */ - if (ea_block_cache) - mb_cache_entry_delete(ea_block_cache, hash, - bh->b_blocknr); + if (ea_block_cache) { + struct mb_cache_entry *oe; + + oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, + bh->b_blocknr); + if (oe) { + unlock_buffer(bh); + mb_cache_entry_wait_unused(oe); + mb_cache_entry_put(ea_block_cache, oe); + goto retry_ref; + } + } get_bh(bh); unlock_buffer(bh); @@ -1266,7 +1275,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, ce = mb_cache_entry_get(ea_block_cache, hash, bh->b_blocknr); if (ce) { - ce->e_reusable = 1; + set_bit(MBE_REUSABLE_B, &ce->e_flags); mb_cache_entry_put(ea_block_cache, ce); } } @@ -1425,6 +1434,9 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle, if (!err) err = ext4_inode_attach_jinode(ea_inode); if (err) { + if (ext4_xattr_inode_dec_ref(handle, ea_inode)) + ext4_warning_inode(ea_inode, + "cleanup dec ref error %d", err); iput(ea_inode); return ERR_PTR(err); } @@ -1614,7 +1626,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, * If storing the value in an external inode is an option, * reserve space for xattr entries/names in the external * attribute block so that a long value does not occupy the - * whole space and prevent futher entries being added. + * whole space and prevent further entries being added. */ if (ext4_has_feature_ea_inode(inode->i_sb) && new_size && is_block && @@ -1851,6 +1863,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, #define header(x) ((struct ext4_xattr_header *)(x)) if (s->base) { + int offset = (char *)s->here - bs->bh->b_data; + BUFFER_TRACE(bs->bh, "get_write_access"); error = ext4_journal_get_write_access(handle, bs->bh); if (error) @@ -1865,9 +1879,20 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, * ext4_xattr_block_set() to reliably detect modified * block */ - if (ea_block_cache) - mb_cache_entry_delete(ea_block_cache, hash, - bs->bh->b_blocknr); + if (ea_block_cache) { + struct mb_cache_entry *oe; + + oe = mb_cache_entry_delete_or_get(ea_block_cache, + hash, bs->bh->b_blocknr); + if (oe) { + /* + * Xattr block is getting reused. Leave + * it alone. + */ + mb_cache_entry_put(ea_block_cache, oe); + goto clone_block; + } + } ea_bdebug(bs->bh, "modifying in-place"); error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */); @@ -1882,50 +1907,47 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, if (error) goto cleanup; goto inserted; - } else { - int offset = (char *)s->here - bs->bh->b_data; + } +clone_block: + unlock_buffer(bs->bh); + ea_bdebug(bs->bh, "cloning"); + s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS); + error = -ENOMEM; + if (s->base == NULL) + goto cleanup; + s->first = ENTRY(header(s->base)+1); + header(s->base)->h_refcount = cpu_to_le32(1); + s->here = ENTRY(s->base + offset); + s->end = s->base + bs->bh->b_size; - unlock_buffer(bs->bh); - ea_bdebug(bs->bh, "cloning"); - s->base = kmalloc(bs->bh->b_size, GFP_NOFS); - error = -ENOMEM; - if (s->base == NULL) + /* + * If existing entry points to an xattr inode, we need + * to prevent ext4_xattr_set_entry() from decrementing + * ref count on it because the reference belongs to the + * original block. In this case, make the entry look + * like it has an empty value. + */ + if (!s->not_found && s->here->e_value_inum) { + ea_ino = le32_to_cpu(s->here->e_value_inum); + error = ext4_xattr_inode_iget(inode, ea_ino, + le32_to_cpu(s->here->e_hash), + &tmp_inode); + if (error) goto cleanup; - memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); - s->first = ENTRY(header(s->base)+1); - header(s->base)->h_refcount = cpu_to_le32(1); - s->here = ENTRY(s->base + offset); - s->end = s->base + bs->bh->b_size; - - /* - * If existing entry points to an xattr inode, we need - * to prevent ext4_xattr_set_entry() from decrementing - * ref count on it because the reference belongs to the - * original block. In this case, make the entry look - * like it has an empty value. - */ - if (!s->not_found && s->here->e_value_inum) { - ea_ino = le32_to_cpu(s->here->e_value_inum); - error = ext4_xattr_inode_iget(inode, ea_ino, - le32_to_cpu(s->here->e_hash), - &tmp_inode); - if (error) - goto cleanup; - if (!ext4_test_inode_state(tmp_inode, - EXT4_STATE_LUSTRE_EA_INODE)) { - /* - * Defer quota free call for previous - * inode until success is guaranteed. - */ - old_ea_inode_quota = le32_to_cpu( - s->here->e_value_size); - } - iput(tmp_inode); - - s->here->e_value_inum = 0; - s->here->e_value_size = 0; + if (!ext4_test_inode_state(tmp_inode, + EXT4_STATE_LUSTRE_EA_INODE)) { + /* + * Defer quota free call for previous + * inode until success is guaranteed. + */ + old_ea_inode_quota = le32_to_cpu( + s->here->e_value_size); } + iput(tmp_inode); + + s->here->e_value_inum = 0; + s->here->e_value_size = 0; } } else { /* Allocate a buffer where we construct the new block. */ @@ -1992,18 +2014,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, lock_buffer(new_bh); /* * We have to be careful about races with - * freeing, rehashing or adding references to - * xattr block. Once we hold buffer lock xattr - * block's state is stable so we can check - * whether the block got freed / rehashed or - * not. Since we unhash mbcache entry under - * buffer lock when freeing / rehashing xattr - * block, checking whether entry is still - * hashed is reliable. Same rules hold for - * e_reusable handling. + * adding references to xattr block. Once we + * hold buffer lock xattr block's state is + * stable so we can check the additional + * reference fits. */ - if (hlist_bl_unhashed(&ce->e_hash_list) || - !ce->e_reusable) { + ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; + if (ref > EXT4_XATTR_REFCOUNT_MAX) { /* * Undo everything and check mbcache * again. @@ -2018,10 +2035,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, new_bh = NULL; goto inserted; } - ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; BHDR(new_bh)->h_refcount = cpu_to_le32(ref); - if (ref >= EXT4_XATTR_REFCOUNT_MAX) - ce->e_reusable = 0; + if (ref == EXT4_XATTR_REFCOUNT_MAX) + clear_bit(MBE_REUSABLE_B, &ce->e_flags); ea_bdebug(new_bh, "reusing; refcount now=%d", ref); ext4_xattr_block_csum_set(inode, new_bh); @@ -2049,19 +2065,11 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, goal = ext4_group_first_block_no(sb, EXT4_I(inode)->i_block_group); - - /* non-extent files can't have physical blocks past 2^32 */ - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; - block = ext4_new_meta_blocks(handle, inode, goal, 0, NULL, &error); if (error) goto cleanup; - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); - ea_idebug(inode, "creating block %llu", (unsigned long long)block); @@ -2556,7 +2564,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS); - buffer = kmalloc(value_size, GFP_NOFS); + buffer = kvmalloc(value_size, GFP_NOFS); b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS); if (!is || !bs || !buffer || !b_entry_name) { error = -ENOMEM; @@ -2608,7 +2616,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, error = 0; out: kfree(b_entry_name); - kfree(buffer); + kvfree(buffer); if (is) brelse(is->iloc.bh); if (bs) diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index 87e5863bb4931..b357872ab83b4 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -191,6 +191,7 @@ extern void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *array); extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, struct ext4_inode *raw_inode, handle_t *handle); +extern void ext4_evict_ea_inode(struct inode *inode); extern const struct xattr_handler *ext4_xattr_handlers[]; diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 1c49b9959b32a..cd46a64ace1b3 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -136,7 +136,7 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, unsigned int segno, offset; bool exist; - if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ) + if (type == DATA_GENERIC) return true; segno = GET_SEGNO(sbi, blkaddr); @@ -144,6 +144,13 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, se = get_seg_entry(sbi, segno); exist = f2fs_test_bit(offset, se->cur_valid_map); + if (exist && type == DATA_GENERIC_ENHANCE_UPDATE) { + f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", + blkaddr, exist); + set_sbi_flag(sbi, SBI_NEED_FSCK); + return exist; + } + if (!exist && type == DATA_GENERIC_ENHANCE) { f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", blkaddr, exist); @@ -181,6 +188,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, case DATA_GENERIC: case DATA_GENERIC_ENHANCE: case DATA_GENERIC_ENHANCE_READ: + case DATA_GENERIC_ENHANCE_UPDATE: if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || blkaddr < MAIN_BLKADDR(sbi))) { f2fs_warn(sbi, "access invalid blkaddr:%u", @@ -1039,7 +1047,8 @@ void f2fs_remove_dirty_inode(struct inode *inode) spin_unlock(&sbi->inode_lock[type]); } -int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) +int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, + bool from_cp) { struct list_head *head; struct inode *inode; @@ -1074,11 +1083,15 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) if (inode) { unsigned long cur_ino = inode->i_ino; - F2FS_I(inode)->cp_task = current; + if (from_cp) + F2FS_I(inode)->cp_task = current; + F2FS_I(inode)->wb_task = current; filemap_fdatawrite(inode->i_mapping); - F2FS_I(inode)->cp_task = NULL; + F2FS_I(inode)->wb_task = NULL; + if (from_cp) + F2FS_I(inode)->cp_task = NULL; iput(inode); /* We need to give cpu to another writers. */ @@ -1207,7 +1220,7 @@ static int block_operations(struct f2fs_sb_info *sbi) /* write all the dirty dentry pages */ if (get_pages(sbi, F2FS_DIRTY_DENTS)) { f2fs_unlock_all(sbi); - err = f2fs_sync_dirty_inodes(sbi, DIR_INODE); + err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true); if (err) return err; cond_resched(); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index b2016fd3a7ca3..9270330ec5ced 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2912,7 +2912,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted, } unlock_page(page); if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && - !F2FS_I(inode)->cp_task && allow_balance) + !F2FS_I(inode)->wb_task && allow_balance) f2fs_balance_fs(sbi, need_balance_fs); if (unlikely(f2fs_cp_error(sbi))) { @@ -3210,7 +3210,7 @@ static inline bool __should_serialize_io(struct inode *inode, struct writeback_control *wbc) { /* to avoid deadlock in path of data flush */ - if (F2FS_I(inode)->cp_task) + if (F2FS_I(inode)->wb_task) return false; if (!S_ISREG(inode->i_mode)) diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 3ebf976a682d5..ad0b83a412268 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -414,7 +414,8 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, struct extent_node *en; bool ret = false; - f2fs_bug_on(sbi, !et); + if (!et) + return false; trace_f2fs_lookup_extent_tree_start(inode, pgofs); @@ -762,9 +763,8 @@ void f2fs_drop_extent_tree(struct inode *inode) if (!f2fs_may_extent_tree(inode)) return; - set_inode_flag(inode, FI_NO_EXTENT); - write_lock(&et->lock); + set_inode_flag(inode, FI_NO_EXTENT); __free_extent_tree(sbi, et); if (et->largest.len) { et->largest.len = 0; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1066725c3c5d5..c03fdda1bddf6 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -235,6 +235,10 @@ enum { * condition of read on truncated area * by extent_cache */ + DATA_GENERIC_ENHANCE_UPDATE, /* + * strong check on range and segment + * bitmap for update case + */ META_GENERIC, }; @@ -697,6 +701,7 @@ struct f2fs_inode_info { unsigned int clevel; /* maximum level of given file name */ struct task_struct *task; /* lookup and create consistency */ struct task_struct *cp_task; /* separate cp/wb IO stats*/ + struct task_struct *wb_task; /* indicate inode is in context of writeback */ nid_t i_xattr_nid; /* node id that contains xattrs */ loff_t last_disk_size; /* lastly written file size */ spinlock_t i_size_lock; /* protect last_disk_size */ @@ -2422,24 +2427,31 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, return entry; } -static inline bool is_idle(struct f2fs_sb_info *sbi, int type) +static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) { - if (sbi->gc_mode == GC_URGENT_HIGH) - return true; - if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || get_pages(sbi, F2FS_WB_CP_DATA) || get_pages(sbi, F2FS_DIO_READ) || get_pages(sbi, F2FS_DIO_WRITE)) - return false; + return true; if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) - return false; + return true; if (SM_I(sbi) && SM_I(sbi)->fcc_info && atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) + return true; + return false; +} + +static inline bool is_idle(struct f2fs_sb_info *sbi, int type) +{ + if (sbi->gc_mode == GC_URGENT_HIGH) + return true; + + if (is_inflight_io(sbi, type)) return false; if (sbi->gc_mode == GC_URGENT_LOW && @@ -3389,7 +3401,8 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); void f2fs_update_dirty_page(struct inode *inode, struct page *page); void f2fs_remove_dirty_inode(struct inode *inode); -int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); +int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, + bool from_cp); void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 3b53fdebf03da..66ac048cc8998 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -977,7 +977,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, { struct page *node_page; nid_t nid; - unsigned int ofs_in_node; + unsigned int ofs_in_node, max_addrs, base; block_t source_blkaddr; nid = le32_to_cpu(sum->nid); @@ -1003,6 +1003,21 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, return false; } + if (IS_INODE(node_page)) { + base = offset_in_addr(F2FS_INODE(node_page)); + max_addrs = DEF_ADDRS_PER_INODE; + } else { + base = 0; + max_addrs = DEF_ADDRS_PER_BLOCK; + } + + if (base + ofs_in_node >= max_addrs) { + f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u", + base, ofs_in_node, max_addrs, dni->ino, dni->nid); + f2fs_put_page(node_page, 1); + return false; + } + *nofs = ofs_of_node(node_page); source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); f2fs_put_page(node_page, 1); @@ -1641,8 +1656,9 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, get_valid_blocks(sbi, segno, false) == 0) seg_freed++; - if (__is_large_section(sbi) && segno + 1 < end_segno) - sbi->next_victim_seg[gc_type] = segno + 1; + if (__is_large_section(sbi)) + sbi->next_victim_seg[gc_type] = + (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO; skip: f2fs_put_page(sum_page, 0); } @@ -2027,8 +2043,6 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) if (err) return err; - set_sbi_flag(sbi, SBI_IS_RESIZEFS); - freeze_super(sbi->sb); down_write(&sbi->gc_lock); mutex_lock(&sbi->cp_mutex); @@ -2044,6 +2058,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) if (err) goto out_err; + set_sbi_flag(sbi, SBI_IS_RESIZEFS); err = free_segment_range(sbi, secs, false); if (err) goto recover_out; @@ -2067,6 +2082,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) f2fs_commit_super(sbi, false); } recover_out: + clear_sbi_flag(sbi, SBI_IS_RESIZEFS); if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); @@ -2079,6 +2095,5 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) mutex_unlock(&sbi->cp_mutex); up_write(&sbi->gc_lock); thaw_super(sbi->sb); - clear_sbi_flag(sbi, SBI_IS_RESIZEFS); return err; } diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 72ce131116791..c3c527afdd074 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -437,7 +437,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, struct dnode_of_data tdn = *dn; nid_t ino, nid; struct inode *inode; - unsigned int offset; + unsigned int offset, ofs_in_node, max_addrs; block_t bidx; int i; @@ -463,15 +463,24 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, got_it: /* Use the locked dnode page and inode */ nid = le32_to_cpu(sum.nid); + ofs_in_node = le16_to_cpu(sum.ofs_in_node); + + max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode); + if (ofs_in_node >= max_addrs) { + f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u", + ofs_in_node, dn->inode->i_ino, nid, max_addrs); + return -EFSCORRUPTED; + } + if (dn->inode->i_ino == nid) { tdn.nid = nid; if (!dn->inode_page_locked) lock_page(dn->inode_page); tdn.node_page = dn->inode_page; - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); + tdn.ofs_in_node = ofs_in_node; goto truncate_out; } else if (dn->nid == nid) { - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); + tdn.ofs_in_node = ofs_in_node; goto truncate_out; } @@ -661,6 +670,14 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, goto err; } + if (f2fs_is_valid_blkaddr(sbi, dest, + DATA_GENERIC_ENHANCE_UPDATE)) { + f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u", + dest, inode->i_ino, dn.ofs_in_node); + err = -EFSCORRUPTED; + goto err; + } + /* write dummy data page */ f2fs_replace_block(sbi, &dn, src, dest, ni.version, false, false); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 19224e7d2ad04..7c90d93f4e435 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -536,31 +536,38 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) else f2fs_build_free_nids(sbi, false, false); - if (!is_idle(sbi, REQ_TIME) && - (!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi))) + if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) || + excess_prefree_segs(sbi)) + goto do_sync; + + /* there is background inflight IO or foreground operation recently */ + if (is_inflight_io(sbi, REQ_TIME) || + (!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem))) return; + /* exceed periodical checkpoint timeout threshold */ + if (f2fs_time_over(sbi, CP_TIME)) + goto do_sync; + /* checkpoint is the only way to shrink partial cached entries */ - if (!f2fs_available_free_memory(sbi, NAT_ENTRIES) || - !f2fs_available_free_memory(sbi, INO_ENTRIES) || - excess_prefree_segs(sbi) || - excess_dirty_nats(sbi) || - excess_dirty_nodes(sbi) || - f2fs_time_over(sbi, CP_TIME)) { - if (test_opt(sbi, DATA_FLUSH) && from_bg) { - struct blk_plug plug; - - mutex_lock(&sbi->flush_lock); - - blk_start_plug(&plug); - f2fs_sync_dirty_inodes(sbi, FILE_INODE); - blk_finish_plug(&plug); + if (f2fs_available_free_memory(sbi, NAT_ENTRIES) && + f2fs_available_free_memory(sbi, INO_ENTRIES)) + return; - mutex_unlock(&sbi->flush_lock); - } - f2fs_sync_fs(sbi->sb, true); - stat_inc_bg_cp_count(sbi->stat_info); +do_sync: + if (test_opt(sbi, DATA_FLUSH) && from_bg) { + struct blk_plug plug; + + mutex_lock(&sbi->flush_lock); + + blk_start_plug(&plug); + f2fs_sync_dirty_inodes(sbi, FILE_INODE, false); + blk_finish_plug(&plug); + + mutex_unlock(&sbi->flush_lock); } + f2fs_sync_fs(sbi->sb, true); + stat_inc_bg_cp_count(sbi->stat_info); } static int __submit_flush_wait(struct f2fs_sb_info *sbi, @@ -1525,7 +1532,7 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, if (i + 1 < dpolicy->granularity) break; - if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered) + if (i + 1 < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered) return __issue_discard_cmd_orderly(sbi, dpolicy); pend_list = &dcc->pend_list[i]; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index ccfb6c5a8fbc0..fba413ced9826 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -267,10 +267,10 @@ static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb, static inline void limit_reserve_root(struct f2fs_sb_info *sbi) { - block_t limit = min((sbi->user_block_count << 1) / 1000, + block_t limit = min((sbi->user_block_count >> 3), sbi->user_block_count - sbi->reserved_blocks); - /* limit is 0.2% */ + /* limit is 12.5% */ if (test_opt(sbi, RESERVE_ROOT) && F2FS_OPTION(sbi).root_reserved_blocks > limit) { F2FS_OPTION(sbi).root_reserved_blocks = limit; diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c index 15ba36926fad7..cff94d095d0fe 100644 --- a/fs/f2fs/verity.c +++ b/fs/f2fs/verity.c @@ -261,13 +261,14 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode, pgoff_t index, unsigned long num_ra_pages) { - DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); struct page *page; index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT; page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED); if (!page || !PageUptodate(page)) { + DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); + if (page) put_page(page); else if (num_ra_pages > 1) diff --git a/fs/fcntl.c b/fs/fcntl.c index 71b43538fa44c..fcf34f83bf6a8 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -148,12 +148,17 @@ void f_delown(struct file *filp) pid_t f_getown(struct file *filp) { - pid_t pid; - read_lock(&filp->f_owner.lock); - pid = pid_vnr(filp->f_owner.pid); - if (filp->f_owner.pid_type == PIDTYPE_PGID) - pid = -pid; - read_unlock(&filp->f_owner.lock); + pid_t pid = 0; + + read_lock_irq(&filp->f_owner.lock); + rcu_read_lock(); + if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) { + pid = pid_vnr(filp->f_owner.pid); + if (filp->f_owner.pid_type == PIDTYPE_PGID) + pid = -pid; + } + rcu_read_unlock(); + read_unlock_irq(&filp->f_owner.lock); return pid; } @@ -200,11 +205,14 @@ static int f_setown_ex(struct file *filp, unsigned long arg) static int f_getown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex __user *owner_p = (void __user *)arg; - struct f_owner_ex owner; + struct f_owner_ex owner = {}; int ret = 0; - read_lock(&filp->f_owner.lock); - owner.pid = pid_vnr(filp->f_owner.pid); + read_lock_irq(&filp->f_owner.lock); + rcu_read_lock(); + if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) + owner.pid = pid_vnr(filp->f_owner.pid); + rcu_read_unlock(); switch (filp->f_owner.pid_type) { case PIDTYPE_PID: owner.type = F_OWNER_TID; @@ -223,7 +231,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg) ret = -EINVAL; break; } - read_unlock(&filp->f_owner.lock); + read_unlock_irq(&filp->f_owner.lock); if (!ret) { ret = copy_to_user(owner_p, &owner, sizeof(owner)); @@ -241,10 +249,10 @@ static int f_getowner_uids(struct file *filp, unsigned long arg) uid_t src[2]; int err; - read_lock(&filp->f_owner.lock); + read_lock_irq(&filp->f_owner.lock); src[0] = from_kuid(user_ns, filp->f_owner.uid); src[1] = from_kuid(user_ns, filp->f_owner.euid); - read_unlock(&filp->f_owner.lock); + read_unlock_irq(&filp->f_owner.lock); err = put_user(src[0], &dst[0]); err |= put_user(src[1], &dst[1]); diff --git a/fs/file.c b/fs/file.c index 8431dfde036cc..97a0cd31faec4 100644 --- a/fs/file.c +++ b/fs/file.c @@ -22,6 +22,8 @@ #include #include +#include "internal.h" + unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; /* our min() is unusable in constant expressions ;-/ */ @@ -780,9 +782,8 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) } /* - * variant of __close_fd that gets a ref on the file for later fput. - * The caller must ensure that filp_close() called on the file, and then - * an fput(). + * See close_fd_get_file() below, this variant assumes current->files->file_lock + * is held. */ int __close_fd_get_file(unsigned int fd, struct file **res) { @@ -790,26 +791,39 @@ int __close_fd_get_file(unsigned int fd, struct file **res) struct file *file; struct fdtable *fdt; - spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) - goto out_unlock; + goto out_err; file = fdt->fd[fd]; if (!file) - goto out_unlock; + goto out_err; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); - spin_unlock(&files->file_lock); get_file(file); *res = file; return 0; - -out_unlock: - spin_unlock(&files->file_lock); +out_err: *res = NULL; return -ENOENT; } +/* + * variant of close_fd that gets a ref on the file for later fput. + * The caller must ensure that filp_close() called on the file, and then + * an fput(). + */ +int close_fd_get_file(unsigned int fd, struct file **res) +{ + struct files_struct *files = current->files; + int ret; + + spin_lock(&files->file_lock); + ret = __close_fd_get_file(fd, res); + spin_unlock(&files->file_lock); + + return ret; +} + void do_close_on_exec(struct files_struct *files) { unsigned i; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 8e95a75a4559c..80a9e50392a09 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -205,7 +205,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) if (inode && fuse_is_bad(inode)) goto invalid; else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || - (flags & LOOKUP_REVAL)) { + (flags & (LOOKUP_EXCL | LOOKUP_REVAL))) { struct fuse_entry_out outarg; FUSE_ARGS(args); struct fuse_forget_link *forget; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d1bc96ee6eb3d..504389568dac5 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -3275,10 +3275,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, .mode = mode }; int err; - bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || - (mode & FALLOC_FL_PUNCH_HOLE); - - bool block_faults = FUSE_IS_DAX(inode) && lock_inode; + bool block_faults = FUSE_IS_DAX(inode) && + (!(mode & FALLOC_FL_KEEP_SIZE) || + (mode & FALLOC_FL_PUNCH_HOLE)); if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; @@ -3286,22 +3285,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, if (fm->fc->no_fallocate) return -EOPNOTSUPP; - if (lock_inode) { - inode_lock(inode); - if (block_faults) { - down_write(&fi->i_mmap_sem); - err = fuse_dax_break_layouts(inode, 0, 0); - if (err) - goto out; - } + inode_lock(inode); + if (block_faults) { + down_write(&fi->i_mmap_sem); + err = fuse_dax_break_layouts(inode, 0, 0); + if (err) + goto out; + } - if (mode & FALLOC_FL_PUNCH_HOLE) { - loff_t endbyte = offset + length - 1; + if (mode & FALLOC_FL_PUNCH_HOLE) { + loff_t endbyte = offset + length - 1; - err = fuse_writeback_range(inode, offset, endbyte); - if (err) - goto out; - } + err = fuse_writeback_range(inode, offset, endbyte); + if (err) + goto out; } if (!(mode & FALLOC_FL_KEEP_SIZE) && @@ -3311,6 +3308,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, goto out; } + err = file_modified(file); + if (err) + goto out; + if (!(mode & FALLOC_FL_KEEP_SIZE)) set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); @@ -3347,8 +3348,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, if (block_faults) up_write(&fi->i_mmap_sem); - if (lock_inode) - inode_unlock(inode); + inode_unlock(inode); fuse_flush_time_update(inode); diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index bc267832310c7..d5294e663df50 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -77,8 +77,10 @@ static void fuse_add_dirent_to_cache(struct file *file, goto unlock; addr = kmap_atomic(page); - if (!offset) + if (!offset) { clear_page(addr); + SetPageUptodate(page); + } memcpy(addr + offset, dirent, reclen); kunmap_atomic(addr); fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen; @@ -516,6 +518,12 @@ static int fuse_readdir_cached(struct file *file, struct dir_context *ctx) page = find_get_page_flags(file->f_mapping, index, FGP_ACCESSED | FGP_LOCK); + /* Page gone missing, then re-added to cache, but not initialized? */ + if (page && !PageUptodate(page)) { + unlock_page(page); + put_page(page); + page = NULL; + } spin_lock(&fi->rdc.lock); if (!page) { /* diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index b9ed6a6dbcf51..648f7336043f6 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -182,7 +182,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) pr_warn("Invalid superblock size\n"); return -EINVAL; } - + if (sb->sb_bsize_shift != ffs(sb->sb_bsize) - 1) { + pr_warn("Invalid block size shift\n"); + return -EINVAL; + } return 0; } @@ -381,8 +384,10 @@ static int init_names(struct gfs2_sbd *sdp, int silent) if (!table[0]) table = sdp->sd_vfs->s_id; - strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN); - strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN); + BUILD_BUG_ON(GFS2_LOCKNAME_LEN > GFS2_FSNAME_LEN); + + strscpy(sdp->sd_proto_name, proto, GFS2_LOCKNAME_LEN); + strscpy(sdp->sd_table_name, table, GFS2_LOCKNAME_LEN); table = sdp->sd_table_name; while ((table = strchr(table, '/'))) @@ -1414,13 +1419,13 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param) switch (o) { case Opt_lockproto: - strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN); + strscpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN); break; case Opt_locktable: - strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN); + strscpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN); break; case Opt_hostdata: - strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN); + strscpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN); break; case Opt_spectator: args->ar_spectator = 1; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index f35a37c65e5ff..6fbde990e0566 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -454,14 +454,16 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) /* panic? */ return -EIO; + res = -EIO; + if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN) + goto out; fd.search_key->cat = HFS_I(main_inode)->cat_key; if (hfs_brec_find(&fd)) - /* panic? */ goto out; if (S_ISDIR(main_inode->i_mode)) { if (fd.entrylength < sizeof(struct hfs_cat_dir)) - /* panic? */; + goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_dir)); if (rec.type != HFS_CDR_DIR || @@ -474,6 +476,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_dir)); } else if (HFS_IS_RSRC(inode)) { + if (fd.entrylength < sizeof(struct hfs_cat_file)) + goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); hfs_inode_write_fork(inode, rec.file.RExtRec, @@ -482,7 +486,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) sizeof(struct hfs_cat_file)); } else { if (fd.entrylength < sizeof(struct hfs_cat_file)) - /* panic? */; + goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); if (rec.type != HFS_CDR_FIL || @@ -499,9 +503,10 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); } + res = 0; out: hfs_find_exit(&fd); - return 0; + return res; } static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry, diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c index 39f5e343bf4d4..fdb0edb8a607d 100644 --- a/fs/hfs/trans.c +++ b/fs/hfs/trans.c @@ -109,7 +109,7 @@ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr if (nls_io) { wchar_t ch; - while (srclen > 0) { + while (srclen > 0 && dstlen > 0) { size = nls_io->char2uni(src, srclen, &ch); if (size < 0) { ch = '?'; diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index a92de5199ec33..c438680ef9f75 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -198,6 +198,8 @@ struct hfsplus_sb_info { #define HFSPLUS_SB_HFSX 3 #define HFSPLUS_SB_CASEFOLD 4 #define HFSPLUS_SB_NOBARRIER 5 +#define HFSPLUS_SB_UID 6 +#define HFSPLUS_SB_GID 7 static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb) { diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index e3da9e96b8357..c60d5ceb0d31c 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -187,11 +187,11 @@ static void hfsplus_get_perms(struct inode *inode, mode = be16_to_cpu(perms->mode); i_uid_write(inode, be32_to_cpu(perms->owner)); - if (!i_uid_read(inode) && !mode) + if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode)) inode->i_uid = sbi->uid; i_gid_write(inode, be32_to_cpu(perms->group)); - if (!i_gid_read(inode) && !mode) + if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode)) inode->i_gid = sbi->gid; if (dir) { @@ -497,8 +497,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) if (type == HFSPLUS_FOLDER) { struct hfsplus_cat_folder *folder = &entry.folder; - if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) - /* panic? */; + WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder)); hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, sizeof(struct hfsplus_cat_folder)); hfsplus_get_perms(inode, &folder->permissions, 1); @@ -518,8 +517,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) } else if (type == HFSPLUS_FILE) { struct hfsplus_cat_file *file = &entry.file; - if (fd->entrylength < sizeof(struct hfsplus_cat_file)) - /* panic? */; + WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file)); hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, sizeof(struct hfsplus_cat_file)); @@ -576,8 +574,7 @@ int hfsplus_cat_write_inode(struct inode *inode) if (S_ISDIR(main_inode->i_mode)) { struct hfsplus_cat_folder *folder = &entry.folder; - if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) - /* panic? */; + WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder)); hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, sizeof(struct hfsplus_cat_folder)); /* simple node checks? */ @@ -602,8 +599,7 @@ int hfsplus_cat_write_inode(struct inode *inode) } else { struct hfsplus_cat_file *file = &entry.file; - if (fd.entrylength < sizeof(struct hfsplus_cat_file)) - /* panic? */; + WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file)); hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, sizeof(struct hfsplus_cat_file)); hfsplus_inode_write_fork(inode, &file->data_fork); diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c index 047e05c575601..c94a58762ad6d 100644 --- a/fs/hfsplus/options.c +++ b/fs/hfsplus/options.c @@ -140,6 +140,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) if (!uid_valid(sbi->uid)) { pr_err("invalid uid specified\n"); return 0; + } else { + set_bit(HFSPLUS_SB_UID, &sbi->flags); } break; case opt_gid: @@ -151,6 +153,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) if (!gid_valid(sbi->gid)) { pr_err("invalid gid specified\n"); return 0; + } else { + set_bit(HFSPLUS_SB_GID, &sbi->flags); } break; case opt_part: diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a2f43f1a85f8d..5181e6d4e18ca 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1261,7 +1261,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par case Opt_size: /* memparse() will accept a K/M/G without a digit */ - if (!isdigit(param->string[0])) + if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->max_size_opt = memparse(param->string, &rest); ctx->max_val_type = SIZE_STD; @@ -1271,7 +1271,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par case Opt_nr_inodes: /* memparse() will accept a K/M/G without a digit */ - if (!isdigit(param->string[0])) + if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->nr_inodes = memparse(param->string, &rest); return 0; @@ -1287,7 +1287,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par case Opt_min_size: /* memparse() will accept a K/M/G without a digit */ - if (!isdigit(param->string[0])) + if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->min_size_opt = memparse(param->string, &rest); ctx->min_val_type = SIZE_STD; diff --git a/fs/inode.c b/fs/inode.c index 638d5d5bf42df..9f49e0bdc2f77 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -168,8 +168,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_wb_frn_history = 0; #endif - if (security_inode_alloc(inode)) - goto out; spin_lock_init(&inode->i_lock); lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); @@ -202,11 +200,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_fsnotify_mask = 0; #endif inode->i_flctx = NULL; + + if (unlikely(security_inode_alloc(inode))) + return -ENOMEM; this_cpu_inc(nr_inodes); return 0; -out: - return -ENOMEM; } EXPORT_SYMBOL(inode_init_always); diff --git a/fs/internal.h b/fs/internal.h index 5155f6ce95c79..06d313b9beecb 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -77,6 +77,8 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *, long do_rmdir(int dfd, struct filename *name); long do_unlinkat(int dfd, struct filename *name); int may_linkat(struct path *link); +int do_renameat2(int olddfd, struct filename *oldname, int newdfd, + struct filename *newname, unsigned int flags); /* * namespace.c @@ -132,6 +134,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *, const char *, const struct open_flags *); extern struct open_how build_open_how(int flags, umode_t mode); extern int build_open_flags(const struct open_how *how, struct open_flags *op); +extern int __close_fd_get_file(unsigned int fd, struct file **res); long do_sys_ftruncate(unsigned int fd, loff_t length, int small); int chmod_common(const struct path *path, umode_t mode); diff --git a/fs/io-wq.c b/fs/io-wq.c deleted file mode 100644 index 3d5fc76b92d01..0000000000000 --- a/fs/io-wq.c +++ /dev/null @@ -1,1242 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Basic worker thread pool for io_uring - * - * Copyright (C) 2019 Jens Axboe - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../kernel/sched/sched.h" -#include "io-wq.h" - -#define WORKER_IDLE_TIMEOUT (5 * HZ) - -enum { - IO_WORKER_F_UP = 1, /* up and active */ - IO_WORKER_F_RUNNING = 2, /* account as running */ - IO_WORKER_F_FREE = 4, /* worker on free list */ - IO_WORKER_F_FIXED = 8, /* static idle worker */ - IO_WORKER_F_BOUND = 16, /* is doing bounded work */ -}; - -enum { - IO_WQ_BIT_EXIT = 0, /* wq exiting */ - IO_WQ_BIT_CANCEL = 1, /* cancel work on list */ - IO_WQ_BIT_ERROR = 2, /* error on setup */ -}; - -enum { - IO_WQE_FLAG_STALLED = 1, /* stalled on hash */ -}; - -/* - * One for each thread in a wqe pool - */ -struct io_worker { - refcount_t ref; - unsigned flags; - struct hlist_nulls_node nulls_node; - struct list_head all_list; - struct task_struct *task; - struct io_wqe *wqe; - - struct io_wq_work *cur_work; - spinlock_t lock; - - struct rcu_head rcu; - struct mm_struct *mm; -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *blkcg_css; -#endif - const struct cred *cur_creds; - const struct cred *saved_creds; - struct files_struct *restore_files; - struct nsproxy *restore_nsproxy; - struct fs_struct *restore_fs; -}; - -#if BITS_PER_LONG == 64 -#define IO_WQ_HASH_ORDER 6 -#else -#define IO_WQ_HASH_ORDER 5 -#endif - -#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER) - -struct io_wqe_acct { - unsigned nr_workers; - unsigned max_workers; - atomic_t nr_running; -}; - -enum { - IO_WQ_ACCT_BOUND, - IO_WQ_ACCT_UNBOUND, -}; - -/* - * Per-node worker thread pool - */ -struct io_wqe { - struct { - raw_spinlock_t lock; - struct io_wq_work_list work_list; - unsigned long hash_map; - unsigned flags; - } ____cacheline_aligned_in_smp; - - int node; - struct io_wqe_acct acct[2]; - - struct hlist_nulls_head free_list; - struct list_head all_list; - - struct io_wq *wq; - struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; -}; - -/* - * Per io_wq state - */ -struct io_wq { - struct io_wqe **wqes; - unsigned long state; - - free_work_fn *free_work; - io_wq_work_fn *do_work; - - struct task_struct *manager; - struct user_struct *user; - refcount_t refs; - struct completion done; - - struct hlist_node cpuhp_node; - - refcount_t use_refs; -}; - -static enum cpuhp_state io_wq_online; - -static bool io_worker_get(struct io_worker *worker) -{ - return refcount_inc_not_zero(&worker->ref); -} - -static void io_worker_release(struct io_worker *worker) -{ - if (refcount_dec_and_test(&worker->ref)) - wake_up_process(worker->task); -} - -/* - * Note: drops the wqe->lock if returning true! The caller must re-acquire - * the lock in that case. Some callers need to restart handling if this - * happens, so we can't just re-acquire the lock on behalf of the caller. - */ -static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) -{ - bool dropped_lock = false; - - if (worker->saved_creds) { - revert_creds(worker->saved_creds); - worker->cur_creds = worker->saved_creds = NULL; - } - - if (current->files != worker->restore_files) { - __acquire(&wqe->lock); - raw_spin_unlock_irq(&wqe->lock); - dropped_lock = true; - - task_lock(current); - current->files = worker->restore_files; - current->nsproxy = worker->restore_nsproxy; - task_unlock(current); - } - - if (current->fs != worker->restore_fs) - current->fs = worker->restore_fs; - - /* - * If we have an active mm, we need to drop the wq lock before unusing - * it. If we do, return true and let the caller retry the idle loop. - */ - if (worker->mm) { - if (!dropped_lock) { - __acquire(&wqe->lock); - raw_spin_unlock_irq(&wqe->lock); - dropped_lock = true; - } - __set_current_state(TASK_RUNNING); - kthread_unuse_mm(worker->mm); - mmput(worker->mm); - worker->mm = NULL; - } - -#ifdef CONFIG_BLK_CGROUP - if (worker->blkcg_css) { - kthread_associate_blkcg(NULL); - worker->blkcg_css = NULL; - } -#endif - if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - return dropped_lock; -} - -static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, - struct io_wq_work *work) -{ - if (work->flags & IO_WQ_WORK_UNBOUND) - return &wqe->acct[IO_WQ_ACCT_UNBOUND]; - - return &wqe->acct[IO_WQ_ACCT_BOUND]; -} - -static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe, - struct io_worker *worker) -{ - if (worker->flags & IO_WORKER_F_BOUND) - return &wqe->acct[IO_WQ_ACCT_BOUND]; - - return &wqe->acct[IO_WQ_ACCT_UNBOUND]; -} - -static void io_worker_exit(struct io_worker *worker) -{ - struct io_wqe *wqe = worker->wqe; - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); - - /* - * If we're not at zero, someone else is holding a brief reference - * to the worker. Wait for that to go away. - */ - set_current_state(TASK_INTERRUPTIBLE); - if (!refcount_dec_and_test(&worker->ref)) - schedule(); - __set_current_state(TASK_RUNNING); - - preempt_disable(); - current->flags &= ~PF_IO_WORKER; - if (worker->flags & IO_WORKER_F_RUNNING) - atomic_dec(&acct->nr_running); - if (!(worker->flags & IO_WORKER_F_BOUND)) - atomic_dec(&wqe->wq->user->processes); - worker->flags = 0; - preempt_enable(); - - raw_spin_lock_irq(&wqe->lock); - hlist_nulls_del_rcu(&worker->nulls_node); - list_del_rcu(&worker->all_list); - if (__io_worker_unuse(wqe, worker)) { - __release(&wqe->lock); - raw_spin_lock_irq(&wqe->lock); - } - acct->nr_workers--; - raw_spin_unlock_irq(&wqe->lock); - - kfree_rcu(worker, rcu); - if (refcount_dec_and_test(&wqe->wq->refs)) - complete(&wqe->wq->done); -} - -static inline bool io_wqe_run_queue(struct io_wqe *wqe) - __must_hold(wqe->lock) -{ - if (!wq_list_empty(&wqe->work_list) && - !(wqe->flags & IO_WQE_FLAG_STALLED)) - return true; - return false; -} - -/* - * Check head of free list for an available worker. If one isn't available, - * caller must wake up the wq manager to create one. - */ -static bool io_wqe_activate_free_worker(struct io_wqe *wqe) - __must_hold(RCU) -{ - struct hlist_nulls_node *n; - struct io_worker *worker; - - n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list)); - if (is_a_nulls(n)) - return false; - - worker = hlist_nulls_entry(n, struct io_worker, nulls_node); - if (io_worker_get(worker)) { - wake_up_process(worker->task); - io_worker_release(worker); - return true; - } - - return false; -} - -/* - * We need a worker. If we find a free one, we're good. If not, and we're - * below the max number of workers, wake up the manager to create one. - */ -static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) -{ - bool ret; - - /* - * Most likely an attempt to queue unbounded work on an io_wq that - * wasn't setup with any unbounded workers. - */ - if (unlikely(!acct->max_workers)) - pr_warn_once("io-wq is not configured for unbound workers"); - - rcu_read_lock(); - ret = io_wqe_activate_free_worker(wqe); - rcu_read_unlock(); - - if (!ret && acct->nr_workers < acct->max_workers) - wake_up_process(wqe->wq->manager); -} - -static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker) -{ - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); - - atomic_inc(&acct->nr_running); -} - -static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker) - __must_hold(wqe->lock) -{ - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); - - if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) - io_wqe_wake_worker(wqe, acct); -} - -static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker) -{ - allow_kernel_signal(SIGINT); - - current->flags |= PF_IO_WORKER; - - worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); - worker->restore_files = current->files; - worker->restore_nsproxy = current->nsproxy; - worker->restore_fs = current->fs; - io_wqe_inc_running(wqe, worker); -} - -/* - * Worker will start processing some work. Move it to the busy list, if - * it's currently on the freelist - */ -static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, - struct io_wq_work *work) - __must_hold(wqe->lock) -{ - bool worker_bound, work_bound; - - if (worker->flags & IO_WORKER_F_FREE) { - worker->flags &= ~IO_WORKER_F_FREE; - hlist_nulls_del_init_rcu(&worker->nulls_node); - } - - /* - * If worker is moving from bound to unbound (or vice versa), then - * ensure we update the running accounting. - */ - worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0; - work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0; - if (worker_bound != work_bound) { - io_wqe_dec_running(wqe, worker); - if (work_bound) { - worker->flags |= IO_WORKER_F_BOUND; - wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--; - wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++; - atomic_dec(&wqe->wq->user->processes); - } else { - worker->flags &= ~IO_WORKER_F_BOUND; - wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++; - wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--; - atomic_inc(&wqe->wq->user->processes); - } - io_wqe_inc_running(wqe, worker); - } -} - -/* - * No work, worker going to sleep. Move to freelist, and unuse mm if we - * have one attached. Dropping the mm may potentially sleep, so we drop - * the lock in that case and return success. Since the caller has to - * retry the loop in that case (we changed task state), we don't regrab - * the lock if we return success. - */ -static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) - __must_hold(wqe->lock) -{ - if (!(worker->flags & IO_WORKER_F_FREE)) { - worker->flags |= IO_WORKER_F_FREE; - hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); - } - - return __io_worker_unuse(wqe, worker); -} - -static inline unsigned int io_get_work_hash(struct io_wq_work *work) -{ - return work->flags >> IO_WQ_HASH_SHIFT; -} - -static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) - __must_hold(wqe->lock) -{ - struct io_wq_work_node *node, *prev; - struct io_wq_work *work, *tail; - unsigned int hash; - - wq_list_for_each(node, prev, &wqe->work_list) { - work = container_of(node, struct io_wq_work, list); - - /* not hashed, can run anytime */ - if (!io_wq_is_hashed(work)) { - wq_list_del(&wqe->work_list, node, prev); - return work; - } - - /* hashed, can run if not already running */ - hash = io_get_work_hash(work); - if (!(wqe->hash_map & BIT(hash))) { - wqe->hash_map |= BIT(hash); - /* all items with this hash lie in [work, tail] */ - tail = wqe->hash_tail[hash]; - wqe->hash_tail[hash] = NULL; - wq_list_cut(&wqe->work_list, &tail->list, prev); - return work; - } - } - - return NULL; -} - -static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) -{ - if (worker->mm) { - kthread_unuse_mm(worker->mm); - mmput(worker->mm); - worker->mm = NULL; - } - - if (mmget_not_zero(work->identity->mm)) { - kthread_use_mm(work->identity->mm); - worker->mm = work->identity->mm; - return; - } - - /* failed grabbing mm, ensure work gets cancelled */ - work->flags |= IO_WQ_WORK_CANCEL; -} - -static inline void io_wq_switch_blkcg(struct io_worker *worker, - struct io_wq_work *work) -{ -#ifdef CONFIG_BLK_CGROUP - if (!(work->flags & IO_WQ_WORK_BLKCG)) - return; - if (work->identity->blkcg_css != worker->blkcg_css) { - kthread_associate_blkcg(work->identity->blkcg_css); - worker->blkcg_css = work->identity->blkcg_css; - } -#endif -} - -static void io_wq_switch_creds(struct io_worker *worker, - struct io_wq_work *work) -{ - const struct cred *old_creds = override_creds(work->identity->creds); - - worker->cur_creds = work->identity->creds; - if (worker->saved_creds) - put_cred(old_creds); /* creds set by previous switch */ - else - worker->saved_creds = old_creds; -} - -static void io_impersonate_work(struct io_worker *worker, - struct io_wq_work *work) -{ - if ((work->flags & IO_WQ_WORK_FILES) && - current->files != work->identity->files) { - task_lock(current); - current->files = work->identity->files; - current->nsproxy = work->identity->nsproxy; - task_unlock(current); - if (!work->identity->files) { - /* failed grabbing files, ensure work gets cancelled */ - work->flags |= IO_WQ_WORK_CANCEL; - } - } - if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs) - current->fs = work->identity->fs; - if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm) - io_wq_switch_mm(worker, work); - if ((work->flags & IO_WQ_WORK_CREDS) && - worker->cur_creds != work->identity->creds) - io_wq_switch_creds(worker, work); - if (work->flags & IO_WQ_WORK_FSIZE) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize; - else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - io_wq_switch_blkcg(worker, work); -#ifdef CONFIG_AUDIT - current->loginuid = work->identity->loginuid; - current->sessionid = work->identity->sessionid; -#endif -} - -static void io_assign_current_work(struct io_worker *worker, - struct io_wq_work *work) -{ - if (work) { - /* flush pending signals before assigning new work */ - if (signal_pending(current)) - flush_signals(current); - cond_resched(); - } - -#ifdef CONFIG_AUDIT - current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET); - current->sessionid = AUDIT_SID_UNSET; -#endif - - spin_lock_irq(&worker->lock); - worker->cur_work = work; - spin_unlock_irq(&worker->lock); -} - -static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work); - -static void io_worker_handle_work(struct io_worker *worker) - __releases(wqe->lock) -{ - struct io_wqe *wqe = worker->wqe; - struct io_wq *wq = wqe->wq; - - do { - struct io_wq_work *work; -get_next: - /* - * If we got some work, mark us as busy. If we didn't, but - * the list isn't empty, it means we stalled on hashed work. - * Mark us stalled so we don't keep looking for work when we - * can't make progress, any work completion or insertion will - * clear the stalled flag. - */ - work = io_get_next_work(wqe); - if (work) - __io_worker_busy(wqe, worker, work); - else if (!wq_list_empty(&wqe->work_list)) - wqe->flags |= IO_WQE_FLAG_STALLED; - - raw_spin_unlock_irq(&wqe->lock); - if (!work) - break; - io_assign_current_work(worker, work); - - /* handle a whole dependent link */ - do { - struct io_wq_work *old_work, *next_hashed, *linked; - unsigned int hash = io_get_work_hash(work); - - next_hashed = wq_next_work(work); - io_impersonate_work(worker, work); - /* - * OK to set IO_WQ_WORK_CANCEL even for uncancellable - * work, the worker function will do the right thing. - */ - if (test_bit(IO_WQ_BIT_CANCEL, &wq->state)) - work->flags |= IO_WQ_WORK_CANCEL; - - old_work = work; - linked = wq->do_work(work); - - work = next_hashed; - if (!work && linked && !io_wq_is_hashed(linked)) { - work = linked; - linked = NULL; - } - io_assign_current_work(worker, work); - wq->free_work(old_work); - - if (linked) - io_wqe_enqueue(wqe, linked); - - if (hash != -1U && !next_hashed) { - raw_spin_lock_irq(&wqe->lock); - wqe->hash_map &= ~BIT_ULL(hash); - wqe->flags &= ~IO_WQE_FLAG_STALLED; - /* skip unnecessary unlock-lock wqe->lock */ - if (!work) - goto get_next; - raw_spin_unlock_irq(&wqe->lock); - } - } while (work); - - raw_spin_lock_irq(&wqe->lock); - } while (1); -} - -static int io_wqe_worker(void *data) -{ - struct io_worker *worker = data; - struct io_wqe *wqe = worker->wqe; - struct io_wq *wq = wqe->wq; - - io_worker_start(wqe, worker); - - while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { - set_current_state(TASK_INTERRUPTIBLE); -loop: - raw_spin_lock_irq(&wqe->lock); - if (io_wqe_run_queue(wqe)) { - __set_current_state(TASK_RUNNING); - io_worker_handle_work(worker); - goto loop; - } - /* drops the lock on success, retry */ - if (__io_worker_idle(wqe, worker)) { - __release(&wqe->lock); - goto loop; - } - raw_spin_unlock_irq(&wqe->lock); - if (signal_pending(current)) - flush_signals(current); - if (schedule_timeout(WORKER_IDLE_TIMEOUT)) - continue; - /* timed out, exit unless we're the fixed worker */ - if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || - !(worker->flags & IO_WORKER_F_FIXED)) - break; - } - - if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { - raw_spin_lock_irq(&wqe->lock); - if (!wq_list_empty(&wqe->work_list)) - io_worker_handle_work(worker); - else - raw_spin_unlock_irq(&wqe->lock); - } - - io_worker_exit(worker); - return 0; -} - -/* - * Called when a worker is scheduled in. Mark us as currently running. - */ -void io_wq_worker_running(struct task_struct *tsk) -{ - struct io_worker *worker = kthread_data(tsk); - struct io_wqe *wqe = worker->wqe; - - if (!(worker->flags & IO_WORKER_F_UP)) - return; - if (worker->flags & IO_WORKER_F_RUNNING) - return; - worker->flags |= IO_WORKER_F_RUNNING; - io_wqe_inc_running(wqe, worker); -} - -/* - * Called when worker is going to sleep. If there are no workers currently - * running and we have work pending, wake up a free one or have the manager - * set one up. - */ -void io_wq_worker_sleeping(struct task_struct *tsk) -{ - struct io_worker *worker = kthread_data(tsk); - struct io_wqe *wqe = worker->wqe; - - if (!(worker->flags & IO_WORKER_F_UP)) - return; - if (!(worker->flags & IO_WORKER_F_RUNNING)) - return; - - worker->flags &= ~IO_WORKER_F_RUNNING; - - raw_spin_lock_irq(&wqe->lock); - io_wqe_dec_running(wqe, worker); - raw_spin_unlock_irq(&wqe->lock); -} - -static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) -{ - struct io_wqe_acct *acct = &wqe->acct[index]; - struct io_worker *worker; - - worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); - if (!worker) - return false; - - refcount_set(&worker->ref, 1); - worker->nulls_node.pprev = NULL; - worker->wqe = wqe; - spin_lock_init(&worker->lock); - - worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node, - "io_wqe_worker-%d/%d", index, wqe->node); - if (IS_ERR(worker->task)) { - kfree(worker); - return false; - } - kthread_bind_mask(worker->task, cpumask_of_node(wqe->node)); - - raw_spin_lock_irq(&wqe->lock); - hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); - list_add_tail_rcu(&worker->all_list, &wqe->all_list); - worker->flags |= IO_WORKER_F_FREE; - if (index == IO_WQ_ACCT_BOUND) - worker->flags |= IO_WORKER_F_BOUND; - if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND)) - worker->flags |= IO_WORKER_F_FIXED; - acct->nr_workers++; - raw_spin_unlock_irq(&wqe->lock); - - if (index == IO_WQ_ACCT_UNBOUND) - atomic_inc(&wq->user->processes); - - refcount_inc(&wq->refs); - wake_up_process(worker->task); - return true; -} - -static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index) - __must_hold(wqe->lock) -{ - struct io_wqe_acct *acct = &wqe->acct[index]; - - /* if we have available workers or no work, no need */ - if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe)) - return false; - return acct->nr_workers < acct->max_workers; -} - -static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data) -{ - send_sig(SIGINT, worker->task, 1); - return false; -} - -/* - * Iterate the passed in list and call the specific function for each - * worker that isn't exiting - */ -static bool io_wq_for_each_worker(struct io_wqe *wqe, - bool (*func)(struct io_worker *, void *), - void *data) -{ - struct io_worker *worker; - bool ret = false; - - list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { - if (io_worker_get(worker)) { - /* no task if node is/was offline */ - if (worker->task) - ret = func(worker, data); - io_worker_release(worker); - if (ret) - break; - } - } - - return ret; -} - -static bool io_wq_worker_wake(struct io_worker *worker, void *data) -{ - wake_up_process(worker->task); - return false; -} - -/* - * Manager thread. Tasked with creating new workers, if we need them. - */ -static int io_wq_manager(void *data) -{ - struct io_wq *wq = data; - int node; - - /* create fixed workers */ - refcount_set(&wq->refs, 1); - for_each_node(node) { - if (!node_online(node)) - continue; - if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND)) - continue; - set_bit(IO_WQ_BIT_ERROR, &wq->state); - set_bit(IO_WQ_BIT_EXIT, &wq->state); - goto out; - } - - complete(&wq->done); - - while (!kthread_should_stop()) { - if (current->task_works) - task_work_run(); - - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - bool fork_worker[2] = { false, false }; - - if (!node_online(node)) - continue; - - raw_spin_lock_irq(&wqe->lock); - if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND)) - fork_worker[IO_WQ_ACCT_BOUND] = true; - if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND)) - fork_worker[IO_WQ_ACCT_UNBOUND] = true; - raw_spin_unlock_irq(&wqe->lock); - if (fork_worker[IO_WQ_ACCT_BOUND]) - create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND); - if (fork_worker[IO_WQ_ACCT_UNBOUND]) - create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND); - } - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ); - } - - if (current->task_works) - task_work_run(); - -out: - if (refcount_dec_and_test(&wq->refs)) { - complete(&wq->done); - return 0; - } - /* if ERROR is set and we get here, we have workers to wake */ - if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { - rcu_read_lock(); - for_each_node(node) - io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); - rcu_read_unlock(); - } - return 0; -} - -static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct, - struct io_wq_work *work) -{ - bool free_worker; - - if (!(work->flags & IO_WQ_WORK_UNBOUND)) - return true; - if (atomic_read(&acct->nr_running)) - return true; - - rcu_read_lock(); - free_worker = !hlist_nulls_empty(&wqe->free_list); - rcu_read_unlock(); - if (free_worker) - return true; - - if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers && - !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN))) - return false; - - return true; -} - -static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) -{ - struct io_wq *wq = wqe->wq; - - do { - struct io_wq_work *old_work = work; - - work->flags |= IO_WQ_WORK_CANCEL; - work = wq->do_work(work); - wq->free_work(old_work); - } while (work); -} - -static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) -{ - unsigned int hash; - struct io_wq_work *tail; - - if (!io_wq_is_hashed(work)) { -append: - wq_list_add_tail(&work->list, &wqe->work_list); - return; - } - - hash = io_get_work_hash(work); - tail = wqe->hash_tail[hash]; - wqe->hash_tail[hash] = work; - if (!tail) - goto append; - - wq_list_add_after(&work->list, &tail->list, &wqe->work_list); -} - -static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) -{ - struct io_wqe_acct *acct = io_work_get_acct(wqe, work); - bool do_wake; - unsigned long flags; - - /* - * Do early check to see if we need a new unbound worker, and if we do, - * if we're allowed to do so. This isn't 100% accurate as there's a - * gap between this check and incrementing the value, but that's OK. - * It's close enough to not be an issue, fork() has the same delay. - */ - if (unlikely(!io_wq_can_queue(wqe, acct, work))) { - io_run_cancel(work, wqe); - return; - } - - raw_spin_lock_irqsave(&wqe->lock, flags); - io_wqe_insert_work(wqe, work); - wqe->flags &= ~IO_WQE_FLAG_STALLED; - do_wake = (work->flags & IO_WQ_WORK_CONCURRENT) || - !atomic_read(&acct->nr_running); - raw_spin_unlock_irqrestore(&wqe->lock, flags); - - if (do_wake) - io_wqe_wake_worker(wqe, acct); -} - -void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) -{ - struct io_wqe *wqe = wq->wqes[numa_node_id()]; - - io_wqe_enqueue(wqe, work); -} - -/* - * Work items that hash to the same value will not be done in parallel. - * Used to limit concurrent writes, generally hashed by inode. - */ -void io_wq_hash_work(struct io_wq_work *work, void *val) -{ - unsigned int bit; - - bit = hash_ptr(val, IO_WQ_HASH_ORDER); - work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); -} - -void io_wq_cancel_all(struct io_wq *wq) -{ - int node; - - set_bit(IO_WQ_BIT_CANCEL, &wq->state); - - rcu_read_lock(); - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - - io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL); - } - rcu_read_unlock(); -} - -struct io_cb_cancel_data { - work_cancel_fn *fn; - void *data; - int nr_running; - int nr_pending; - bool cancel_all; -}; - -static bool io_wq_worker_cancel(struct io_worker *worker, void *data) -{ - struct io_cb_cancel_data *match = data; - unsigned long flags; - - /* - * Hold the lock to avoid ->cur_work going out of scope, caller - * may dereference the passed in work. - */ - spin_lock_irqsave(&worker->lock, flags); - if (worker->cur_work && - !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) && - match->fn(worker->cur_work, match->data)) { - send_sig(SIGINT, worker->task, 1); - match->nr_running++; - } - spin_unlock_irqrestore(&worker->lock, flags); - - return match->nr_running && !match->cancel_all; -} - -static inline void io_wqe_remove_pending(struct io_wqe *wqe, - struct io_wq_work *work, - struct io_wq_work_node *prev) -{ - unsigned int hash = io_get_work_hash(work); - struct io_wq_work *prev_work = NULL; - - if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { - if (prev) - prev_work = container_of(prev, struct io_wq_work, list); - if (prev_work && io_get_work_hash(prev_work) == hash) - wqe->hash_tail[hash] = prev_work; - else - wqe->hash_tail[hash] = NULL; - } - wq_list_del(&wqe->work_list, &work->list, prev); -} - -static void io_wqe_cancel_pending_work(struct io_wqe *wqe, - struct io_cb_cancel_data *match) -{ - struct io_wq_work_node *node, *prev; - struct io_wq_work *work; - unsigned long flags; - -retry: - raw_spin_lock_irqsave(&wqe->lock, flags); - wq_list_for_each(node, prev, &wqe->work_list) { - work = container_of(node, struct io_wq_work, list); - if (!match->fn(work, match->data)) - continue; - io_wqe_remove_pending(wqe, work, prev); - raw_spin_unlock_irqrestore(&wqe->lock, flags); - io_run_cancel(work, wqe); - match->nr_pending++; - if (!match->cancel_all) - return; - - /* not safe to continue after unlock */ - goto retry; - } - raw_spin_unlock_irqrestore(&wqe->lock, flags); -} - -static void io_wqe_cancel_running_work(struct io_wqe *wqe, - struct io_cb_cancel_data *match) -{ - rcu_read_lock(); - io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); - rcu_read_unlock(); -} - -enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, - void *data, bool cancel_all) -{ - struct io_cb_cancel_data match = { - .fn = cancel, - .data = data, - .cancel_all = cancel_all, - }; - int node; - - /* - * First check pending list, if we're lucky we can just remove it - * from there. CANCEL_OK means that the work is returned as-new, - * no completion will be posted for it. - */ - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - - io_wqe_cancel_pending_work(wqe, &match); - if (match.nr_pending && !match.cancel_all) - return IO_WQ_CANCEL_OK; - } - - /* - * Now check if a free (going busy) or busy worker has the work - * currently running. If we find it there, we'll return CANCEL_RUNNING - * as an indication that we attempt to signal cancellation. The - * completion will run normally in this case. - */ - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - - io_wqe_cancel_running_work(wqe, &match); - if (match.nr_running && !match.cancel_all) - return IO_WQ_CANCEL_RUNNING; - } - - if (match.nr_running) - return IO_WQ_CANCEL_RUNNING; - if (match.nr_pending) - return IO_WQ_CANCEL_OK; - return IO_WQ_CANCEL_NOTFOUND; -} - -struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) -{ - int ret = -ENOMEM, node; - struct io_wq *wq; - - if (WARN_ON_ONCE(!data->free_work || !data->do_work)) - return ERR_PTR(-EINVAL); - if (WARN_ON_ONCE(!bounded)) - return ERR_PTR(-EINVAL); - - wq = kzalloc(sizeof(*wq), GFP_KERNEL); - if (!wq) - return ERR_PTR(-ENOMEM); - - wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL); - if (!wq->wqes) - goto err_wq; - - ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); - if (ret) - goto err_wqes; - - wq->free_work = data->free_work; - wq->do_work = data->do_work; - - /* caller must already hold a reference to this */ - wq->user = data->user; - - ret = -ENOMEM; - for_each_node(node) { - struct io_wqe *wqe; - int alloc_node = node; - - if (!node_online(alloc_node)) - alloc_node = NUMA_NO_NODE; - wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); - if (!wqe) - goto err; - wq->wqes[node] = wqe; - wqe->node = alloc_node; - wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; - atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0); - if (wq->user) { - wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = - task_rlimit(current, RLIMIT_NPROC); - } - atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0); - wqe->wq = wq; - raw_spin_lock_init(&wqe->lock); - INIT_WQ_LIST(&wqe->work_list); - INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); - INIT_LIST_HEAD(&wqe->all_list); - } - - init_completion(&wq->done); - - wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager"); - if (!IS_ERR(wq->manager)) { - wake_up_process(wq->manager); - wait_for_completion(&wq->done); - if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { - ret = -ENOMEM; - goto err; - } - refcount_set(&wq->use_refs, 1); - reinit_completion(&wq->done); - return wq; - } - - ret = PTR_ERR(wq->manager); - complete(&wq->done); -err: - cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); - for_each_node(node) - kfree(wq->wqes[node]); -err_wqes: - kfree(wq->wqes); -err_wq: - kfree(wq); - return ERR_PTR(ret); -} - -bool io_wq_get(struct io_wq *wq, struct io_wq_data *data) -{ - if (data->free_work != wq->free_work || data->do_work != wq->do_work) - return false; - - return refcount_inc_not_zero(&wq->use_refs); -} - -static void __io_wq_destroy(struct io_wq *wq) -{ - int node; - - cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); - - set_bit(IO_WQ_BIT_EXIT, &wq->state); - if (wq->manager) - kthread_stop(wq->manager); - - rcu_read_lock(); - for_each_node(node) - io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); - rcu_read_unlock(); - - wait_for_completion(&wq->done); - - for_each_node(node) - kfree(wq->wqes[node]); - kfree(wq->wqes); - kfree(wq); -} - -void io_wq_destroy(struct io_wq *wq) -{ - if (refcount_dec_and_test(&wq->use_refs)) - __io_wq_destroy(wq); -} - -struct task_struct *io_wq_get_task(struct io_wq *wq) -{ - return wq->manager; -} - -static bool io_wq_worker_affinity(struct io_worker *worker, void *data) -{ - struct task_struct *task = worker->task; - struct rq_flags rf; - struct rq *rq; - - rq = task_rq_lock(task, &rf); - do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node)); - task->flags |= PF_NO_SETAFFINITY; - task_rq_unlock(rq, task, &rf); - return false; -} - -static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); - int i; - - rcu_read_lock(); - for_each_node(i) - io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL); - rcu_read_unlock(); - return 0; -} - -static __init int io_wq_init(void) -{ - int ret; - - ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online", - io_wq_cpu_online, NULL); - if (ret < 0) - return ret; - io_wq_online = ret; - return 0; -} -subsys_initcall(io_wq_init); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 98cfa73cb165b..fa24b407a9dcb 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -581,7 +581,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) journal->j_running_transaction = NULL; start_time = ktime_get(); commit_transaction->t_log_start = journal->j_head; - wake_up(&journal->j_wait_transaction_locked); + wake_up_all(&journal->j_wait_transaction_locked); write_unlock(&journal->j_state_lock); jbd_debug(3, "JBD2: commit phase 2a\n"); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index b748329bb0bab..6689d235de8a4 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -924,10 +924,16 @@ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks) for (i = j_fc_off - 1; i >= j_fc_off - num_blks; i--) { bh = journal->j_fc_wbuf[i]; wait_on_buffer(bh); + /* + * Update j_fc_off so jbd2_fc_release_bufs can release remain + * buffer head. + */ + if (unlikely(!buffer_uptodate(bh))) { + journal->j_fc_off = i + 1; + return -EIO; + } put_bh(bh); journal->j_fc_wbuf[i] = NULL; - if (unlikely(!buffer_uptodate(bh))) - return -EIO; } return 0; diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 1e07dfac4d811..1ae1697fe99bd 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -256,6 +256,7 @@ static int fc_do_one_pass(journal_t *journal, err = journal->j_fc_replay_callback(journal, bh, pass, next_fc_block - journal->j_fc_first, expected_commit_id); + brelse(bh); next_fc_block++; if (err < 0 || err == JBD2_FC_REPLAY_STOP) break; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 0f1cef90fa7d6..86472212cce17 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -173,7 +173,7 @@ static void wait_transaction_locked(journal_t *journal) int need_to_start; tid_t tid = journal->j_running_transaction->t_tid; - prepare_to_wait(&journal->j_wait_transaction_locked, &wait, + prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); @@ -199,7 +199,7 @@ static void wait_transaction_switching(journal_t *journal) read_unlock(&journal->j_state_lock); return; } - prepare_to_wait(&journal->j_wait_transaction_locked, &wait, + prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); read_unlock(&journal->j_state_lock); /* @@ -894,7 +894,7 @@ void jbd2_journal_unlock_updates (journal_t *journal) write_lock(&journal->j_state_lock); --journal->j_barrier_count; write_unlock(&journal->j_state_lock); - wake_up(&journal->j_wait_transaction_locked); + wake_up_all(&journal->j_wait_transaction_locked); } static void warn_dirty_buffer(struct buffer_head *bh) diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 0ce17ea8fa8a1..2c9493011aec3 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -155,7 +155,7 @@ int dbMount(struct inode *ipbmap) struct bmap *bmp; struct dbmap_disk *dbmp_le; struct metapage *mp; - int i; + int i, err; /* * allocate/initialize the in-memory bmap descriptor @@ -170,8 +170,8 @@ int dbMount(struct inode *ipbmap) BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { - kfree(bmp); - return -EIO; + err = -EIO; + goto err_kfree_bmp; } /* copy the on-disk bmap descriptor to its in-memory version. */ @@ -181,9 +181,8 @@ int dbMount(struct inode *ipbmap) bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); if (!bmp->db_numag) { - release_metapage(mp); - kfree(bmp); - return -EINVAL; + err = -EINVAL; + goto err_release_metapage; } bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); @@ -194,6 +193,16 @@ int dbMount(struct inode *ipbmap) bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); + if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) { + err = -EINVAL; + goto err_release_metapage; + } + + if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) { + err = -EINVAL; + goto err_release_metapage; + } + for (i = 0; i < MAXAG; i++) bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]); bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize); @@ -214,6 +223,12 @@ int dbMount(struct inode *ipbmap) BMAP_LOCK_INIT(bmp); return (0); + +err_release_metapage: + release_metapage(mp); +err_kfree_bmp: + kfree(bmp); + return err; } diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index afb39e1bbe3bf..8b3c86a502daa 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -1519,8 +1519,11 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, mutex_lock(&kernfs_mutex); kn = kernfs_find_ns(parent, name, ns); - if (kn) + if (kn) { + kernfs_get(kn); __kernfs_remove(kn); + kernfs_put(kn); + } mutex_unlock(&kernfs_mutex); diff --git a/fs/libfs.c b/fs/libfs.c index 7124c2e8df2f5..aa0fbd720409a 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -955,8 +955,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, EXPORT_SYMBOL_GPL(simple_attr_read); /* interpret the buffer as a number to call the set function with */ -ssize_t simple_attr_write(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) +static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf, + size_t len, loff_t *ppos, bool is_signed) { struct simple_attr *attr; unsigned long long val; @@ -977,7 +977,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, goto out; attr->set_buf[size] = '\0'; - ret = kstrtoull(attr->set_buf, 0, &val); + if (is_signed) + ret = kstrtoll(attr->set_buf, 0, &val); + else + ret = kstrtoull(attr->set_buf, 0, &val); if (ret) goto out; ret = attr->set(attr->data, val); @@ -987,8 +990,21 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, mutex_unlock(&attr->mutex); return ret; } + +ssize_t simple_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return simple_attr_write_xsigned(file, buf, len, ppos, false); +} EXPORT_SYMBOL_GPL(simple_attr_write); +ssize_t simple_attr_write_signed(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return simple_attr_write_xsigned(file, buf, len, ppos, true); +} +EXPORT_SYMBOL_GPL(simple_attr_write_signed); + /** * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation * @sb: filesystem to do the file handle conversion on diff --git a/fs/locks.c b/fs/locks.c index 32c948fe29448..12d72c3d8756c 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2813,6 +2813,29 @@ int vfs_cancel_lock(struct file *filp, struct file_lock *fl) } EXPORT_SYMBOL_GPL(vfs_cancel_lock); +/** + * vfs_inode_has_locks - are any file locks held on @inode? + * @inode: inode to check for locks + * + * Return true if there are any FL_POSIX or FL_FLOCK locks currently + * set on @inode. + */ +bool vfs_inode_has_locks(struct inode *inode) +{ + struct file_lock_context *ctx; + bool ret; + + ctx = smp_load_acquire(&inode->i_flctx); + if (!ctx) + return false; + + spin_lock(&ctx->flc_lock); + ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock); + spin_unlock(&ctx->flc_lock); + return ret; +} +EXPORT_SYMBOL_GPL(vfs_inode_has_locks); + #ifdef CONFIG_PROC_FS #include #include diff --git a/fs/mbcache.c b/fs/mbcache.c index 97c54d3a22276..95b047256d093 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -11,7 +11,7 @@ /* * Mbcache is a simple key-value store. Keys need not be unique, however * key-value pairs are expected to be unique (we use this fact in - * mb_cache_entry_delete()). + * mb_cache_entry_delete_or_get()). * * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. * Ext4 also uses it for deduplication of xattr values stored in inodes. @@ -90,12 +90,19 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, return -ENOMEM; INIT_LIST_HEAD(&entry->e_list); - /* One ref for hash, one ref returned */ - atomic_set(&entry->e_refcnt, 1); + /* + * We create entry with two references. One reference is kept by the + * hash table, the other reference is used to protect us from + * mb_cache_entry_delete_or_get() until the entry is fully setup. This + * avoids nesting of cache->c_list_lock into hash table bit locks which + * is problematic for RT. + */ + atomic_set(&entry->e_refcnt, 2); entry->e_key = key; entry->e_value = value; - entry->e_reusable = reusable; - entry->e_referenced = 0; + entry->e_flags = 0; + if (reusable) + set_bit(MBE_REUSABLE_B, &entry->e_flags); head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { @@ -107,24 +114,41 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, } hlist_bl_add_head(&entry->e_hash_list, head); hlist_bl_unlock(head); - spin_lock(&cache->c_list_lock); list_add_tail(&entry->e_list, &cache->c_list); - /* Grab ref for LRU list */ - atomic_inc(&entry->e_refcnt); cache->c_entry_count++; spin_unlock(&cache->c_list_lock); + mb_cache_entry_put(cache, entry); return 0; } EXPORT_SYMBOL(mb_cache_entry_create); -void __mb_cache_entry_free(struct mb_cache_entry *entry) +void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry) { + struct hlist_bl_head *head; + + head = mb_cache_entry_head(cache, entry->e_key); + hlist_bl_lock(head); + hlist_bl_del(&entry->e_hash_list); + hlist_bl_unlock(head); kmem_cache_free(mb_entry_cache, entry); } EXPORT_SYMBOL(__mb_cache_entry_free); +/* + * mb_cache_entry_wait_unused - wait to be the last user of the entry + * + * @entry - entry to work on + * + * Wait to be the last user of the entry. + */ +void mb_cache_entry_wait_unused(struct mb_cache_entry *entry) +{ + wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2); +} +EXPORT_SYMBOL(mb_cache_entry_wait_unused); + static struct mb_cache_entry *__entry_find(struct mb_cache *cache, struct mb_cache_entry *entry, u32 key) @@ -142,10 +166,10 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache, while (node) { entry = hlist_bl_entry(node, struct mb_cache_entry, e_hash_list); - if (entry->e_key == key && entry->e_reusable) { - atomic_inc(&entry->e_refcnt); + if (entry->e_key == key && + test_bit(MBE_REUSABLE_B, &entry->e_flags) && + atomic_inc_not_zero(&entry->e_refcnt)) goto out; - } node = node->next; } entry = NULL; @@ -205,10 +229,9 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(entry, node, head, e_hash_list) { - if (entry->e_key == key && entry->e_value == value) { - atomic_inc(&entry->e_refcnt); + if (entry->e_key == key && entry->e_value == value && + atomic_inc_not_zero(&entry->e_refcnt)) goto out; - } } entry = NULL; out: @@ -217,7 +240,7 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, } EXPORT_SYMBOL(mb_cache_entry_get); -/* mb_cache_entry_delete - remove a cache entry +/* mb_cache_entry_delete - try to remove a cache entry * @cache - cache we work with * @key - key * @value - value @@ -254,6 +277,43 @@ void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value) } EXPORT_SYMBOL(mb_cache_entry_delete); +/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users + * @cache - cache we work with + * @key - key + * @value - value + * + * Remove entry from cache @cache with key @key and value @value. The removal + * happens only if the entry is unused. The function returns NULL in case the + * entry was successfully removed or there's no entry in cache. Otherwise the + * function grabs reference of the entry that we failed to delete because it + * still has users and return it. + */ +struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, + u32 key, u64 value) +{ + struct mb_cache_entry *entry; + + entry = mb_cache_entry_get(cache, key, value); + if (!entry) + return NULL; + + /* + * Drop the ref we got from mb_cache_entry_get() and the initial hash + * ref if we are the last user + */ + if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2) + return entry; + + spin_lock(&cache->c_list_lock); + if (!list_empty(&entry->e_list)) + list_del_init(&entry->e_list); + cache->c_entry_count--; + spin_unlock(&cache->c_list_lock); + __mb_cache_entry_free(cache, entry); + return NULL; +} +EXPORT_SYMBOL(mb_cache_entry_delete_or_get); + /* mb_cache_entry_touch - cache entry got used * @cache - cache the entry belongs to * @entry - entry that got used @@ -263,7 +323,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete); void mb_cache_entry_touch(struct mb_cache *cache, struct mb_cache_entry *entry) { - entry->e_referenced = 1; + set_bit(MBE_REFERENCED_B, &entry->e_flags); } EXPORT_SYMBOL(mb_cache_entry_touch); @@ -281,34 +341,24 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, unsigned long nr_to_scan) { struct mb_cache_entry *entry; - struct hlist_bl_head *head; unsigned long shrunk = 0; spin_lock(&cache->c_list_lock); while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list); - if (entry->e_referenced) { - entry->e_referenced = 0; + /* Drop initial hash reference if there is no user */ + if (test_bit(MBE_REFERENCED_B, &entry->e_flags) || + atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) { + clear_bit(MBE_REFERENCED_B, &entry->e_flags); list_move_tail(&entry->e_list, &cache->c_list); continue; } list_del_init(&entry->e_list); cache->c_entry_count--; - /* - * We keep LRU list reference so that entry doesn't go away - * from under us. - */ spin_unlock(&cache->c_list_lock); - head = mb_cache_entry_head(cache, entry->e_key); - hlist_bl_lock(head); - if (!hlist_bl_unhashed(&entry->e_hash_list)) { - hlist_bl_del_init(&entry->e_hash_list); - atomic_dec(&entry->e_refcnt); - } - hlist_bl_unlock(head); - if (mb_cache_entry_put(cache, entry)) - shrunk++; + __mb_cache_entry_free(cache, entry); + shrunk++; cond_resched(); spin_lock(&cache->c_list_lock); } @@ -400,11 +450,6 @@ void mb_cache_destroy(struct mb_cache *cache) * point. */ list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { - if (!hlist_bl_unhashed(&entry->e_hash_list)) { - hlist_bl_del_init(&entry->e_hash_list); - atomic_dec(&entry->e_refcnt); - } else - WARN_ON(1); list_del(&entry->e_list); WARN_ON(atomic_read(&entry->e_refcnt) != 1); mb_cache_entry_put(cache, entry); diff --git a/fs/namei.c b/fs/namei.c index eba2f13d229df..4159c140fa473 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -529,6 +529,8 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name) p->stack = p->internal; p->dfd = dfd; p->name = name; + p->path.mnt = NULL; + p->path.dentry = NULL; p->total_link_count = old ? old->total_link_count : 0; p->saved = old; current->nameidata = p; @@ -602,6 +604,8 @@ static void terminate_walk(struct nameidata *nd) rcu_read_unlock(); } nd->depth = 0; + nd->path.mnt = NULL; + nd->path.dentry = NULL; } /* path_put is needed afterwards regardless of success or failure */ @@ -630,6 +634,11 @@ static inline bool legitimize_path(struct nameidata *nd, static bool legitimize_links(struct nameidata *nd) { int i; + if (unlikely(nd->flags & LOOKUP_CACHED)) { + drop_links(nd); + nd->depth = 0; + return false; + } for (i = 0; i < nd->depth; i++) { struct saved *last = nd->stack + i; if (unlikely(!legitimize_path(nd, &last->link, last->seq))) { @@ -705,19 +714,19 @@ static bool try_to_unlazy(struct nameidata *nd) } /** - * unlazy_child - try to switch to ref-walk mode. + * try_to_unlazy_next - try to switch to ref-walk mode. * @nd: nameidata pathwalk data - * @dentry: child of nd->path.dentry - * @seq: seq number to check dentry against - * Returns: 0 on success, -ECHILD on failure + * @dentry: next dentry to step into + * @seq: seq number to check @dentry against + * Returns: true on success, false on failure * - * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry - * for ref-walk mode. @dentry must be a path found by a do_lookup call on - * @nd. Must be called from rcu-walk context. - * Nothing should touch nameidata between unlazy_child() failure and + * Similar to to try_to_unlazy(), but here we have the next dentry already + * picked by rcu-walk and want to legitimize that in addition to the current + * nd->path and nd->root for ref-walk mode. Must be called from rcu-walk context. + * Nothing should touch nameidata between try_to_unlazy_next() failure and * terminate_walk(). */ -static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq) +static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry, unsigned seq) { BUG_ON(!(nd->flags & LOOKUP_RCU)); @@ -747,7 +756,7 @@ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned se if (unlikely(!legitimize_root(nd))) goto out_dput; rcu_read_unlock(); - return 0; + return true; out2: nd->path.mnt = NULL; @@ -755,11 +764,11 @@ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned se nd->path.dentry = NULL; out: rcu_read_unlock(); - return -ECHILD; + return false; out_dput: rcu_read_unlock(); dput(dentry); - return -ECHILD; + return false; } static inline int d_revalidate(struct dentry *dentry, unsigned int flags) @@ -792,6 +801,7 @@ static int complete_walk(struct nameidata *nd) */ if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED))) nd->root.mnt = NULL; + nd->flags &= ~LOOKUP_CACHED; if (!try_to_unlazy(nd)) return -ECHILD; } @@ -1374,7 +1384,7 @@ static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry, return -ENOENT; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 0; - if (unlazy_child(nd, dentry, seq)) + if (!try_to_unlazy_next(nd, dentry, seq)) return -ECHILD; // *path might've been clobbered by __follow_mount_rcu() path->mnt = nd->path.mnt; @@ -1495,7 +1505,7 @@ static struct dentry *lookup_fast(struct nameidata *nd, status = d_revalidate(dentry, nd->flags); if (likely(status > 0)) return dentry; - if (unlazy_child(nd, dentry, seq)) + if (!try_to_unlazy_next(nd, dentry, seq)) return ERR_PTR(-ECHILD); if (unlikely(status == -ECHILD)) /* we'd been told to redo it in non-rcu mode */ @@ -2204,6 +2214,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags) int error; const char *s = nd->name->name; + /* LOOKUP_CACHED requires RCU, ask caller to retry */ + if ((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED) + return ERR_PTR(-EAGAIN); + if (!*s) flags &= ~LOOKUP_RCU; if (flags & LOOKUP_RCU) @@ -2233,8 +2247,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags) } nd->root.mnt = NULL; - nd->path.mnt = NULL; - nd->path.dentry = NULL; /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */ if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) { @@ -4341,8 +4353,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, } EXPORT_SYMBOL(vfs_rename); -static int do_renameat2(int olddfd, const char __user *oldname, int newdfd, - const char __user *newname, unsigned int flags) +int do_renameat2(int olddfd, struct filename *from, int newdfd, + struct filename *to, unsigned int flags) { struct dentry *old_dentry, *new_dentry; struct dentry *trap; @@ -4350,32 +4362,30 @@ static int do_renameat2(int olddfd, const char __user *oldname, int newdfd, struct qstr old_last, new_last; int old_type, new_type; struct inode *delegated_inode = NULL; - struct filename *from; - struct filename *to; unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET; bool should_retry = false; - int error; + int error = -EINVAL; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) - return -EINVAL; + goto put_both; if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && (flags & RENAME_EXCHANGE)) - return -EINVAL; + goto put_both; if (flags & RENAME_EXCHANGE) target_flags = 0; retry: - from = filename_parentat(olddfd, getname(oldname), lookup_flags, - &old_path, &old_last, &old_type); + from = filename_parentat(olddfd, from, lookup_flags, &old_path, + &old_last, &old_type); if (IS_ERR(from)) { error = PTR_ERR(from); - goto exit; + goto put_new; } - to = filename_parentat(newdfd, getname(newname), lookup_flags, - &new_path, &new_last, &new_type); + to = filename_parentat(newdfd, to, lookup_flags, &new_path, &new_last, + &new_type); if (IS_ERR(to)) { error = PTR_ERR(to); goto exit1; @@ -4468,34 +4478,40 @@ static int do_renameat2(int olddfd, const char __user *oldname, int newdfd, if (retry_estale(error, lookup_flags)) should_retry = true; path_put(&new_path); - putname(to); exit1: path_put(&old_path); - putname(from); if (should_retry) { should_retry = false; lookup_flags |= LOOKUP_REVAL; goto retry; } -exit: +put_both: + if (!IS_ERR(from)) + putname(from); +put_new: + if (!IS_ERR(to)) + putname(to); return error; } SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags) { - return do_renameat2(olddfd, oldname, newdfd, newname, flags); + return do_renameat2(olddfd, getname(oldname), newdfd, getname(newname), + flags); } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { - return do_renameat2(olddfd, oldname, newdfd, newname, 0); + return do_renameat2(olddfd, getname(oldname), newdfd, getname(newname), + 0); } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { - return do_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); + return do_renameat2(AT_FDCWD, getname(oldname), AT_FDCWD, + getname(newname), 0); } int readlink_copy(char __user *buffer, int buflen, const char *link) @@ -4633,7 +4649,7 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; - void *fsdata; + void *fsdata = NULL; int err; unsigned int flags = 0; if (nofs) diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index d6ac2c4f88b6b..1eb6c7a142ff0 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -228,8 +228,7 @@ static int nfs_delegation_claim_opens(struct inode *inode, * */ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred, - fmode_t type, - const nfs4_stateid *stateid, + fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit) { struct nfs_delegation *delegation; @@ -239,25 +238,24 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred, delegation = rcu_dereference(NFS_I(inode)->delegation); if (delegation != NULL) { spin_lock(&delegation->lock); - if (nfs4_is_valid_delegation(delegation, 0)) { - nfs4_stateid_copy(&delegation->stateid, stateid); - delegation->type = type; - delegation->pagemod_limit = pagemod_limit; - oldcred = delegation->cred; - delegation->cred = get_cred(cred); - clear_bit(NFS_DELEGATION_NEED_RECLAIM, - &delegation->flags); - spin_unlock(&delegation->lock); - rcu_read_unlock(); - put_cred(oldcred); - trace_nfs4_reclaim_delegation(inode, type); - return; - } - /* We appear to have raced with a delegation return. */ + nfs4_stateid_copy(&delegation->stateid, stateid); + delegation->type = type; + delegation->pagemod_limit = pagemod_limit; + oldcred = delegation->cred; + delegation->cred = get_cred(cred); + clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); + if (test_and_clear_bit(NFS_DELEGATION_REVOKED, + &delegation->flags)) + atomic_long_inc(&nfs_active_delegations); spin_unlock(&delegation->lock); + rcu_read_unlock(); + put_cred(oldcred); + trace_nfs4_reclaim_delegation(inode, type); + } else { + rcu_read_unlock(); + nfs_inode_set_delegation(inode, cred, type, stateid, + pagemod_limit); } - rcu_read_unlock(); - nfs_inode_set_delegation(inode, cred, type, stateid, pagemod_limit); } static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync) diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index ae5ed3a074943..deecfb50dd7e3 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -783,6 +783,12 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, return &fl->generic_hdr; } +static bool +filelayout_lseg_is_striped(const struct nfs4_filelayout_segment *flseg) +{ + return flseg->num_fh > 1; +} + /* * filelayout_pg_test(). Called by nfs_can_coalesce_requests() * @@ -803,6 +809,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, size = pnfs_generic_pg_test(pgio, prev, req); if (!size) return 0; + else if (!filelayout_lseg_is_striped(FILELAYOUT_LSEG(pgio->pg_lseg))) + return size; /* see if req and prev are in the same stripe */ if (prev) { diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 2bcbe38afe2e7..1f03445b5cb43 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -147,7 +147,7 @@ struct vfsmount *nfs_d_automount(struct path *path) struct nfs_fs_context *ctx; struct fs_context *fc; struct vfsmount *mnt = ERR_PTR(-ENOMEM); - struct nfs_server *server = NFS_SERVER(d_inode(path->dentry)); + struct nfs_server *server = NFS_SB(path->dentry->d_sb); struct nfs_client *client = server->nfs_client; int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); int ret; diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 0e6437b08a3a5..252c99c76a42d 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -346,6 +346,7 @@ int nfs40_init_client(struct nfs_client *clp) ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE, "NFSv4.0 transport Slot table"); if (ret) { + nfs4_shutdown_slot_table(tbl); kfree(tbl); return ret; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 03f09399abf4f..ee46ab09e3306 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -130,6 +130,11 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry, if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) return NULL; + label->lfs = 0; + label->pi = 0; + label->len = 0; + label->label = NULL; + err = security_dentry_init_security(dentry, sattr->ia_mode, &dentry->d_name, (void **)&label->label, &label->len); if (err == 0) @@ -2121,18 +2126,18 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context } static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, - fmode_t fmode) + fmode_t fmode) { struct nfs4_state *newstate; + struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); + int openflags = opendata->o_arg.open_flags; int ret; if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) return 0; - opendata->o_arg.open_flags = 0; opendata->o_arg.fmode = fmode; - opendata->o_arg.share_access = nfs4_map_atomic_open_share( - NFS_SB(opendata->dentry->d_sb), - fmode, 0); + opendata->o_arg.share_access = + nfs4_map_atomic_open_share(server, fmode, openflags); memset(&opendata->o_res, 0, sizeof(opendata->o_res)); memset(&opendata->c_res, 0, sizeof(opendata->c_res)); nfs4_init_opendata_res(opendata); @@ -2708,10 +2713,15 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s struct nfs4_opendata *opendata; int ret; - opendata = nfs4_open_recoverdata_alloc(ctx, state, - NFS4_OPEN_CLAIM_FH); + opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); if (IS_ERR(opendata)) return PTR_ERR(opendata); + /* + * We're not recovering a delegation, so ask for no delegation. + * Otherwise the recovery thread could deadlock with an outstanding + * delegation return. + */ + opendata->o_arg.open_flags = O_DIRECT; ret = nfs4_open_recover(opendata, state); if (ret == -ESTALE) d_drop(ctx->dentry); @@ -3793,7 +3803,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr, int *opened) { struct nfs4_state *state; - struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; + struct nfs4_label l, *label; label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); @@ -4557,7 +4567,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags) { struct nfs_server *server = NFS_SERVER(dir); - struct nfs4_label l, *ilabel = NULL; + struct nfs4_label l, *ilabel; struct nfs_open_context *ctx; struct nfs4_state *state; int status = 0; @@ -4916,7 +4926,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, struct nfs4_exception exception = { .interruptible = true, }; - struct nfs4_label l, *label = NULL; + struct nfs4_label l, *label; int err; label = nfs4_label_init_security(dir, dentry, sattr, &l); @@ -4957,7 +4967,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, struct nfs4_exception exception = { .interruptible = true, }; - struct nfs4_label l, *label = NULL; + struct nfs4_label l, *label; int err; label = nfs4_label_init_security(dir, dentry, sattr, &l); @@ -5078,7 +5088,7 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, struct nfs4_exception exception = { .interruptible = true, }; - struct nfs4_label l, *label = NULL; + struct nfs4_label l, *label; int err; label = nfs4_label_init_security(dir, dentry, sattr, &l); @@ -7014,6 +7024,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) { struct nfs4_lockdata *data = calldata; struct nfs4_lock_state *lsp = data->lsp; + struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry)); dprintk("%s: begin!\n", __func__); @@ -7023,8 +7034,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) data->rpc_status = task->tk_status; switch (task->tk_status) { case 0: - renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), - data->timestamp); + renew_lease(server, data->timestamp); if (data->arg.new_lock && !data->cancelled) { data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) @@ -7045,6 +7055,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) if (!nfs4_stateid_match(&data->arg.open_stateid, &lsp->ls_state->open_stateid)) goto out_restart; + else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN) + goto out_restart; } else if (!nfs4_stateid_match(&data->arg.lock_stateid, &lsp->ls_stateid)) goto out_restart; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a8fe8f84c5ae0..175b2e064003e 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1226,6 +1226,8 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) if (IS_ERR(task)) { printk(KERN_ERR "%s: kthread_run: %ld\n", __func__, PTR_ERR(task)); + if (!nfs_client_init_is_complete(clp)) + nfs_mark_client_ready(clp, PTR_ERR(task)); nfs4_clear_state_manager_bit(clp); nfs_put_client(clp); module_put(THIS_MODULE); @@ -1777,6 +1779,7 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) { + set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); /* Mark all delegations for reclaim */ nfs_delegation_mark_reclaim(clp); nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); @@ -2642,6 +2645,7 @@ static void nfs4_state_manager(struct nfs_client *clp) if (status < 0) goto out_error; nfs4_state_end_reclaim_reboot(clp); + continue; } /* Detect expired delegations... */ diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e2f0e3446e22a..f1e599553f2be 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4166,19 +4166,17 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, p = xdr_inline_decode(xdr, len); if (unlikely(!p)) return -EIO; + bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL; if (len < NFS4_MAXLABELLEN) { - if (label) { - if (label->len) { - if (label->len < len) - return -ERANGE; - memcpy(label->label, p, len); - } + if (label && label->len) { + if (label->len < len) + return -ERANGE; + memcpy(label->label, p, len); label->len = len; label->pi = pi; label->lfs = lfs; status = NFS_ATTR_FATTR_V4_SECURITY_LABEL; } - bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL; } else printk(KERN_WARNING "%s: label too long (%u)!\n", __func__, len); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 4034102010f05..b3fcc27b95648 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1029,22 +1029,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx) if (ctx && ctx->bsize) sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits); - if (server->nfs_client->rpc_ops->version != 2) { - /* The VFS shouldn't apply the umask to mode bits. We will do - * so ourselves when necessary. + switch (server->nfs_client->rpc_ops->version) { + case 2: + sb->s_time_gran = 1000; + sb->s_time_min = 0; + sb->s_time_max = U32_MAX; + break; + case 3: + /* + * The VFS shouldn't apply the umask to mode bits. + * We will do so ourselves when necessary. */ sb->s_flags |= SB_POSIXACL; sb->s_time_gran = 1; - sb->s_export_op = &nfs_export_ops; - } else - sb->s_time_gran = 1000; - - if (server->nfs_client->rpc_ops->version != 4) { sb->s_time_min = 0; sb->s_time_max = U32_MAX; - } else { + sb->s_export_op = &nfs_export_ops; + break; + case 4: + sb->s_flags |= SB_POSIXACL; + sb->s_time_gran = 1; sb->s_time_min = S64_MIN; sb->s_time_max = S64_MAX; + if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) + sb->s_export_op = &nfs_export_ops; + break; } sb->s_magic = NFS_SUPER_MAGIC; diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 7346acda9d767..02d3d2f0e6168 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -42,9 +42,6 @@ struct nfsd_net { bool grace_ended; time64_t boot_time; - /* internal mount of the "nfsd" pseudofilesystem: */ - struct vfsmount *nfsd_mnt; - struct dentry *nfsd_client_dir; /* @@ -121,6 +118,9 @@ struct nfsd_net { wait_queue_head_t ntf_wq; atomic_t ntf_refcnt; + /* Allow umount to wait for nfsd state cleanup */ + struct completion nfsd_shutdown_complete; + /* * clientid and stateid data for construction of net unique COPY * stateids. diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 7325592b456e5..af2064e36ac61 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -915,11 +915,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c args.authflavor = clp->cl_cred.cr_flavor; clp->cl_cb_ident = conn->cb_ident; } else { - if (!conn->cb_xprt) { - trace_nfsd_cb_setup_err(clp, -EINVAL); + if (!conn->cb_xprt) return -EINVAL; - } - clp->cl_cb_conn.cb_xprt = conn->cb_xprt; clp->cl_cb_session = ses; args.bc_xprt = conn->cb_xprt; args.prognumber = clp->cl_cb_session->se_cb_prog; @@ -939,6 +936,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c rpc_shutdown_client(client); return -ENOMEM; } + + if (clp->cl_minorversion != 0) + clp->cl_cb_conn.cb_xprt = conn->cb_xprt; clp->cl_cb_client = client; clp->cl_cb_cred = cred; trace_nfsd_cb_setup(clp); diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index f9b730c43192d..83c4e68839537 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -815,8 +815,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, princhash.data = memdup_user( &ci->cc_princhash.cp_data, princhashlen); - if (IS_ERR_OR_NULL(princhash.data)) + if (IS_ERR_OR_NULL(princhash.data)) { + kfree(name.data); return -EFAULT; + } princhash.len = princhashlen; } else princhash.len = 0; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f1b503bec2221..cb13a16496320 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -507,15 +507,26 @@ find_any_file(struct nfs4_file *f) return ret; } -static struct nfsd_file *find_deleg_file(struct nfs4_file *f) +static struct nfsd_file *find_any_file_locked(struct nfs4_file *f) { - struct nfsd_file *ret = NULL; + lockdep_assert_held(&f->fi_lock); + + if (f->fi_fds[O_RDWR]) + return f->fi_fds[O_RDWR]; + if (f->fi_fds[O_WRONLY]) + return f->fi_fds[O_WRONLY]; + if (f->fi_fds[O_RDONLY]) + return f->fi_fds[O_RDONLY]; + return NULL; +} + +static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f) +{ + lockdep_assert_held(&f->fi_lock); - spin_lock(&f->fi_lock); if (f->fi_deleg_file) - ret = nfsd_file_get(f->fi_deleg_file); - spin_unlock(&f->fi_lock); - return ret; + return f->fi_deleg_file; + return NULL; } static atomic_long_t num_delegations; @@ -843,6 +854,7 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) static void nfs4_free_deleg(struct nfs4_stid *stid) { + WARN_ON(!list_empty(&stid->sc_cp_list)); kmem_cache_free(deleg_slab, stid); atomic_long_dec(&num_delegations); } @@ -1358,6 +1370,7 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid) release_all_access(stp); if (stp->st_stateowner) nfs4_put_stateowner(stp->st_stateowner); + WARN_ON(!list_empty(&stid->sc_cp_list)); kmem_cache_free(stateid_slab, stid); } @@ -2460,9 +2473,11 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) ols = openlockstateid(st); oo = ols->st_stateowner; nf = st->sc_file; - file = find_any_file(nf); + + spin_lock(&nf->fi_lock); + file = find_any_file_locked(nf); if (!file) - return 0; + goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2484,8 +2499,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) seq_printf(s, ", "); nfs4_show_owner(s, oo); seq_printf(s, " }\n"); - nfsd_file_put(file); - +out: + spin_unlock(&nf->fi_lock); return 0; } @@ -2499,9 +2514,10 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) ols = openlockstateid(st); oo = ols->st_stateowner; nf = st->sc_file; - file = find_any_file(nf); + spin_lock(&nf->fi_lock); + file = find_any_file_locked(nf); if (!file) - return 0; + goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2521,8 +2537,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) seq_printf(s, ", "); nfs4_show_owner(s, oo); seq_printf(s, " }\n"); - nfsd_file_put(file); - +out: + spin_unlock(&nf->fi_lock); return 0; } @@ -2534,9 +2550,10 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) ds = delegstateid(st); nf = st->sc_file; - file = find_deleg_file(nf); + spin_lock(&nf->fi_lock); + file = find_deleg_file_locked(nf); if (!file) - return 0; + goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2552,8 +2569,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) seq_printf(s, ", "); nfs4_show_fname(s, file); seq_printf(s, " }\n"); - nfsd_file_put(file); - +out: + spin_unlock(&nf->fi_lock); return 0; } @@ -6207,6 +6224,7 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) struct nfs4_client *clp = s->st_stid.sc_client; bool unhashed; LIST_HEAD(reaplist); + struct nfs4_ol_stateid *stp; spin_lock(&clp->cl_lock); unhashed = unhash_open_stateid(s, &reaplist); @@ -6215,6 +6233,8 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) if (unhashed) put_ol_stateid_locked(s, &reaplist); spin_unlock(&clp->cl_lock); + list_for_each_entry(stp, &reaplist, st_locks) + nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); free_ol_stateid_reaplist(&reaplist); } else { spin_unlock(&clp->cl_lock); @@ -7374,14 +7394,9 @@ nfs4_state_start_net(struct net *net) struct nfsd_net *nn = net_generic(net, nfsd_net_id); int ret; - ret = get_nfsdfs(net); - if (ret) - return ret; ret = nfs4_state_create_net(net); - if (ret) { - mntput(nn->nfsd_mnt); + if (ret) return ret; - } locks_start_grace(net, &nn->nfsd4_manager); nfsd4_client_tracking_init(net); if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) @@ -7451,7 +7466,6 @@ nfs4_state_shutdown_net(struct net *net) nfsd4_client_tracking_exit(net); nfs4_state_destroy_net(net); - mntput(nn->nfsd_mnt); } void diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 46f825cf53f4f..1e3410a9b03eb 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3405,6 +3405,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, case nfserr_noent: xdr_truncate_encode(xdr, start_offset); goto skip_entry; + case nfserr_jukebox: + /* + * The pseudoroot should only display dentries that lead to + * exports. If we get EJUKEBOX here, then we can't tell whether + * this entry should be included. Just fail the whole READDIR + * with NFS4ERR_DELAY in that case, and hope that the situation + * will resolve itself by the client's next attempt. + */ + if (cd->rd_fhp->fh_export->ex_flags & NFSEXP_V4ROOT) + goto fail; + fallthrough; default: /* * If the client requested the RDATTR_ERROR attribute, @@ -3871,7 +3882,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, if (resp->xdr.buf->page_len && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) { WARN_ON_ONCE(1); - return nfserr_resource; + return nfserr_serverfault; } xdr_commit_encode(xdr); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index dedec4771ecc2..c4b11560ac1b6 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1417,6 +1417,8 @@ static void nfsd_umount(struct super_block *sb) { struct net *net = sb->s_fs_info; + nfsd_shutdown_threads(net); + kill_litter_super(sb); put_net(net); } @@ -1429,18 +1431,6 @@ static struct file_system_type nfsd_fs_type = { }; MODULE_ALIAS_FS("nfsd"); -int get_nfsdfs(struct net *net) -{ - struct nfsd_net *nn = net_generic(net, nfsd_net_id); - struct vfsmount *mnt; - - mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); - if (IS_ERR(mnt)) - return PTR_ERR(mnt); - nn->nfsd_mnt = mnt; - return 0; -} - #ifdef CONFIG_PROC_FS static int create_proc_exports_entry(void) { diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index cb742e17e04a9..4362d295ed340 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -85,13 +85,12 @@ int nfsd_get_nrthreads(int n, int *, struct net *); int nfsd_set_nrthreads(int n, int *, struct net *); int nfsd_pool_stats_open(struct inode *, struct file *); int nfsd_pool_stats_release(struct inode *, struct file *); +void nfsd_shutdown_threads(struct net *net); void nfsd_destroy(struct net *net); bool i_am_nfsd(void); -int get_nfsdfs(struct net *); - struct nfsdfs_client { struct kref cl_ref; void (*cl_release)(struct kref *kref); diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 9323e30a7eafe..2e61a565cdbd8 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -426,8 +426,8 @@ static void nfsd_shutdown_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - nfsd_file_cache_shutdown_net(net); nfs4_state_shutdown_net(net); + nfsd_file_cache_shutdown_net(net); if (nn->lockd_up) { lockd_down(net); nn->lockd_up = false; @@ -600,6 +600,37 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = { .svo_module = THIS_MODULE, }; +static void nfsd_complete_shutdown(struct net *net) +{ + struct nfsd_net *nn = net_generic(net, nfsd_net_id); + + WARN_ON(!mutex_is_locked(&nfsd_mutex)); + + nn->nfsd_serv = NULL; + complete(&nn->nfsd_shutdown_complete); +} + +void nfsd_shutdown_threads(struct net *net) +{ + struct nfsd_net *nn = net_generic(net, nfsd_net_id); + struct svc_serv *serv; + + mutex_lock(&nfsd_mutex); + serv = nn->nfsd_serv; + if (serv == NULL) { + mutex_unlock(&nfsd_mutex); + return; + } + + svc_get(serv); + /* Kill outstanding nfsd threads */ + serv->sv_ops->svo_setup(serv, NULL, 0); + nfsd_destroy(net); + mutex_unlock(&nfsd_mutex); + /* Wait for shutdown of nfsd_serv to complete */ + wait_for_completion(&nn->nfsd_shutdown_complete); +} + bool i_am_nfsd(void) { return kthread_func(current) == nfsd; @@ -622,11 +653,13 @@ int nfsd_create_serv(struct net *net) &nfsd_thread_sv_ops); if (nn->nfsd_serv == NULL) return -ENOMEM; + init_completion(&nn->nfsd_shutdown_complete); nn->nfsd_serv->sv_maxconn = nn->max_connections; error = svc_bind(nn->nfsd_serv, net); if (error < 0) { svc_destroy(nn->nfsd_serv); + nfsd_complete_shutdown(net); return error; } @@ -675,7 +708,7 @@ void nfsd_destroy(struct net *net) svc_shutdown_net(nn->nfsd_serv, net); svc_destroy(nn->nfsd_serv); if (destroy) - nn->nfsd_serv = NULL; + nfsd_complete_shutdown(net); } int nfsd_set_nrthreads(int n, int *nthreads, struct net *net) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index c852bb5ff2121..b09ead06a2490 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -570,6 +570,7 @@ __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos, ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, u64 dst_pos, u64 count) { + ssize_t ret; /* * Limit copy to 4MB to prevent indefinitely blocking an nfsd @@ -580,7 +581,12 @@ ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, * limit like this and pipeline multiple COPY requests. */ count = min_t(u64, count, 1 << 22); - return vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0); + ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0); + + if (ret == -EOPNOTSUPP || ret == -EXDEV) + ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, + COPY_FILE_SPLICE); + return ret; } __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp, @@ -1014,6 +1020,10 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt); since = READ_ONCE(file->f_wb_err); if (flags & RWF_SYNC) { + if (verf) + nfsd_copy_boot_verifier(verf, + net_generic(SVC_NET(rqstp), + nfsd_net_id)); host_err = vfs_iter_write(file, &iter, &pos, flags); if (host_err < 0) nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp), diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 77efd69213a3d..65cd599cb2ab6 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -480,9 +480,18 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, 0, &bh, &submit_ptr); if (ret) { - if (ret != -EEXIST) - return ret; - goto out_check; + if (likely(ret == -EEXIST)) + goto out_check; + if (ret == -ENOENT) { + /* + * Block address translation failed due to invalid + * value of 'ptr'. In this case, return internal code + * -EINVAL (broken bmap) to notify bmap layer of fatal + * metadata corruption. + */ + ret = -EINVAL; + } + return ret; } if (ra) { diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index 1a3d183027b9e..8fedc7104320d 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -111,6 +111,13 @@ static void nilfs_dat_commit_free(struct inode *dat, kunmap_atomic(kaddr); nilfs_dat_commit_entry(dat, req); + + if (unlikely(req->pr_desc_bh == NULL || req->pr_bitmap_bh == NULL)) { + nilfs_error(dat->i_sb, + "state inconsistency probably due to duplicate use of vblocknr = %llu", + (unsigned long long)req->pr_entry_nr); + return; + } nilfs_palloc_commit_free_entry(dat, req); } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 95684fa3c985a..fb594edc0837c 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -332,6 +332,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; + struct buffer_head *bh; int err = -ENOMEM; ino_t ino; @@ -347,11 +348,25 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) ii->i_state = BIT(NILFS_I_NEW); ii->i_root = root; - err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); + err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ + if (unlikely(ino < NILFS_USER_INO)) { + nilfs_warn(sb, + "inode bitmap is inconsistent for reserved inodes"); + do { + brelse(bh); + err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); + if (unlikely(err)) + goto failed_ifile_create_inode; + } while (ino < NILFS_USER_INO); + + nilfs_info(sb, "repaired inode bitmap for reserved inodes"); + } + ii->i_bh = bh; + atomic64_inc(&root->inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; @@ -444,6 +459,8 @@ int nilfs_read_inode_common(struct inode *inode, inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); + if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode)) + return -EIO; /* this inode is for metadata and corrupted */ if (inode->i_nlink == 0) return -ESTALE; /* this inode is deleted */ diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 07d26f61f22aa..3a1dea5d14484 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -1129,7 +1129,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) minseg = range[0] + segbytes - 1; do_div(minseg, segbytes); + + if (range[1] < 4096) + goto out; + maxseg = NILFS_SB2_OFFSET_BYTES(range[1]); + if (maxseg < segbytes) + goto out; + do_div(maxseg, segbytes); maxseg--; diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 8350c2eaee75a..5ee4973525f01 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -322,7 +322,7 @@ void nilfs_relax_pressure_in_lock(struct super_block *sb) struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; - if (!sci || !sci->sc_flush_request) + if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request) return; set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); @@ -880,9 +880,11 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) nilfs_mdt_mark_dirty(nilfs->ns_cpfile); nilfs_cpfile_put_checkpoint( nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); - } else - WARN_ON(err == -EINVAL || err == -ENOENT); - + } else if (err == -EINVAL || err == -ENOENT) { + nilfs_error(sci->sc_super, + "checkpoint creation failed due to metadata corruption."); + err = -EIO; + } return err; } @@ -896,7 +898,11 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, &raw_cp, &bh_cp); if (unlikely(err)) { - WARN_ON(err == -EINVAL || err == -ENOENT); + if (err == -EINVAL || err == -ENOENT) { + nilfs_error(sci->sc_super, + "checkpoint finalization failed due to metadata corruption."); + err = -EIO; + } goto failed_ibh; } raw_cp->cp_snapshot_list.ssl_next = 0; @@ -2242,7 +2248,7 @@ int nilfs_construct_segment(struct super_block *sb) struct nilfs_transaction_info *ti; int err; - if (!sci) + if (sb_rdonly(sb) || unlikely(!sci)) return -EROFS; /* A call inside transactions causes a deadlock. */ @@ -2281,7 +2287,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, struct nilfs_transaction_info ti; int err = 0; - if (!sci) + if (sb_rdonly(sb) || unlikely(!sci)) return -EROFS; nilfs_transaction_lock(sb, &ti, 0); @@ -2777,11 +2783,12 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) if (nilfs->ns_writer) { /* - * This happens if the filesystem was remounted - * read/write after nilfs_error degenerated it into a - * read-only mount. + * This happens if the filesystem is made read-only by + * __nilfs_error or nilfs_remount and then remounted + * read/write. In these cases, reuse the existing + * writer. */ - nilfs_detach_log_writer(sb); + return 0; } nilfs->ns_writer = nilfs_segctor_new(sb, root); @@ -2791,10 +2798,9 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); err = nilfs_segctor_start_thread(nilfs->ns_writer); - if (err) { - kfree(nilfs->ns_writer); - nilfs->ns_writer = NULL; - } + if (unlikely(err)) + nilfs_detach_log_writer(sb); + return err; } diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 63722475e17e1..51f4cb060231f 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -495,14 +495,22 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) { struct buffer_head *bh; + void *kaddr; + struct nilfs_segment_usage *su; int ret; + down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); if (!ret) { mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(sufile); + kaddr = kmap_atomic(bh->b_page); + su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); + nilfs_segment_usage_set_dirty(su); + kunmap_atomic(kaddr); brelse(bh); } + up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index b9d30e8c43b06..7751848687635 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -408,6 +408,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) if (newsize > devsize) goto out; + /* + * Prevent underflow in second superblock position calculation. + * The exact minimum size check is done in nilfs_sufile_resize(). + */ + if (newsize < 4096) { + ret = -ENOSPC; + goto out; + } + /* * Write lock is required to protect some functions depending * on the number of segments, the number of reserved segments, @@ -1133,8 +1142,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) goto out; if (*flags & SB_RDONLY) { - /* Shutting down log writer */ - nilfs_detach_log_writer(sb); sb->s_flags |= SB_RDONLY; /* diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index c20ebecd7bc24..38a1206cf9481 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "nilfs.h" #include "segment.h" @@ -192,6 +193,34 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, return ret; } +/** + * nilfs_get_blocksize - get block size from raw superblock data + * @sb: super block instance + * @sbp: superblock raw data buffer + * @blocksize: place to store block size + * + * nilfs_get_blocksize() calculates the block size from the block size + * exponent information written in @sbp and stores it in @blocksize, + * or aborts with an error message if it's too large. + * + * Return Value: On success, 0 is returned. If the block size is too + * large, -EINVAL is returned. + */ +static int nilfs_get_blocksize(struct super_block *sb, + struct nilfs_super_block *sbp, int *blocksize) +{ + unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); + + if (unlikely(shift_bits > + ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)) { + nilfs_err(sb, "too large filesystem blocksize: 2 ^ %u KiB", + shift_bits); + return -EINVAL; + } + *blocksize = BLOCK_SIZE << shift_bits; + return 0; +} + /** * load_nilfs - load and recover the nilfs * @nilfs: the_nilfs structure to be released @@ -245,11 +274,15 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); /* verify consistency between two super blocks */ - blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size); + err = nilfs_get_blocksize(sb, sbp[0], &blocksize); + if (err) + goto scan_error; + if (blocksize != nilfs->ns_blocksize) { nilfs_warn(sb, "blocksize differs between two super blocks (%d != %d)", blocksize, nilfs->ns_blocksize); + err = -EINVAL; goto scan_error; } @@ -443,11 +476,33 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp) return crc == le32_to_cpu(sbp->s_sum); } -static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) +/** + * nilfs_sb2_bad_offset - check the location of the second superblock + * @sbp: superblock raw data buffer + * @offset: byte offset of second superblock calculated from device size + * + * nilfs_sb2_bad_offset() checks if the position on the second + * superblock is valid or not based on the filesystem parameters + * stored in @sbp. If @offset points to a location within the segment + * area, or if the parameters themselves are not normal, it is + * determined to be invalid. + * + * Return Value: true if invalid, false if valid. + */ +static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) { - return offset < ((le64_to_cpu(sbp->s_nsegments) * - le32_to_cpu(sbp->s_blocks_per_segment)) << - (le32_to_cpu(sbp->s_log_block_size) + 10)); + unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); + u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); + u64 nsegments = le64_to_cpu(sbp->s_nsegments); + u64 index; + + if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS || + shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS) + return true; + + index = offset >> (shift_bits + BLOCK_SIZE_BITS); + do_div(index, blocks_per_segment); + return index < nsegments; } static void nilfs_release_super_block(struct the_nilfs *nilfs) @@ -489,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, { struct nilfs_super_block **sbp = nilfs->ns_sbp; struct buffer_head **sbh = nilfs->ns_sbh; - u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size); + u64 sb2off, devsize = nilfs->ns_bdev->bd_inode->i_size; int valid[2], swp = 0; + if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) { + nilfs_err(sb, "device size too small"); + return -EINVAL; + } + sb2off = NILFS_SB2_OFFSET_BYTES(devsize); + sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, &sbh[0]); sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); @@ -586,9 +647,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) if (err) goto failed_sbh; - blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); - if (blocksize < NILFS_MIN_BLOCK_SIZE || - blocksize > NILFS_MAX_BLOCK_SIZE) { + err = nilfs_get_blocksize(sb, sbp, &blocksize); + if (err) + goto failed_sbh; + + if (blocksize < NILFS_MIN_BLOCK_SIZE) { nilfs_err(sb, "couldn't mount because of unsupported filesystem blocksize %d", blocksize); @@ -690,9 +753,7 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) { unsigned long ncleansegs; - down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); - up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; return 0; } diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 914e991731300..c0881d39d36a9 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -594,17 +594,37 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name, for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) { u8 *mrec_end = (u8 *)ctx->mrec + le32_to_cpu(ctx->mrec->bytes_allocated); - u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) + - a->name_length * sizeof(ntfschar); - if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end || - name_end > mrec_end) + u8 *name_end; + + /* check whether ATTR_RECORD wrap */ + if ((u8 *)a < (u8 *)ctx->mrec) + break; + + /* check whether Attribute Record Header is within bounds */ + if ((u8 *)a > mrec_end || + (u8 *)a + sizeof(ATTR_RECORD) > mrec_end) + break; + + /* check whether ATTR_RECORD's name is within bounds */ + name_end = (u8 *)a + le16_to_cpu(a->name_offset) + + a->name_length * sizeof(ntfschar); + if (name_end > mrec_end) break; + ctx->attr = a; if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) || a->type == AT_END)) return -ENOENT; if (unlikely(!a->length)) break; + + /* check whether ATTR_RECORD's length wrap */ + if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a) + break; + /* check whether ATTR_RECORD's length is within bounds */ + if ((u8 *)a + le32_to_cpu(a->length) > mrec_end) + break; + if (a->type != type) continue; /* diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index cf222c9225d6d..645c4b1b23de1 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -1829,6 +1829,13 @@ int ntfs_read_inode_mount(struct inode *vi) goto err_out; } + /* Sanity check offset to the first attribute */ + if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) { + ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.", + le16_to_cpu(m->attrs_offset)); + goto err_out; + } + /* Need this to sanity check attribute list references to $MFT. */ vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number); diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 0d7e948cb29c9..7f69422d5191d 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -2092,7 +2092,8 @@ static bool load_system_files(ntfs_volume *vol) // TODO: Initialize security. /* Get the extended system files' directory inode. */ vol->extend_ino = ntfs_iget(sb, FILE_Extend); - if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) { + if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino) || + !S_ISDIR(vol->extend_ino->i_mode)) { if (!IS_ERR(vol->extend_ino)) iput(vol->extend_ino); ntfs_error(sb, "Failed to load $Extend."); diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index db52e843002a0..0534800a472a1 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -159,7 +159,7 @@ static void ocfs2_queue_replay_slots(struct ocfs2_super *osb, replay_map->rm_state = REPLAY_DONE; } -static void ocfs2_free_replay_slots(struct ocfs2_super *osb) +void ocfs2_free_replay_slots(struct ocfs2_super *osb) { struct ocfs2_replay_map *replay_map = osb->replay_map; diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index bfe611ed1b1d7..eb7a21bac71ef 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -152,6 +152,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb); void ocfs2_recovery_exit(struct ocfs2_super *osb); int ocfs2_compute_replay_slots(struct ocfs2_super *osb); +void ocfs2_free_replay_slots(struct ocfs2_super *osb); /* * Journal Control: * Initialize, Load, Shutdown, Wipe a journal. diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index c46bf7f581a14..856474b0a1ae7 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -231,6 +231,7 @@ static int ocfs2_mknod(struct inode *dir, handle_t *handle = NULL; struct ocfs2_super *osb; struct ocfs2_dinode *dirfe; + struct ocfs2_dinode *fe = NULL; struct buffer_head *new_fe_bh = NULL; struct inode *inode = NULL; struct ocfs2_alloc_context *inode_ac = NULL; @@ -381,6 +382,7 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } + fe = (struct ocfs2_dinode *) new_fe_bh->b_data; if (S_ISDIR(mode)) { status = ocfs2_fill_new_dir(osb, handle, dir, inode, new_fe_bh, data_ac, meta_ac); @@ -453,8 +455,11 @@ static int ocfs2_mknod(struct inode *dir, leave: if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && fe) + ocfs2_set_links_count(fe, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) @@ -631,18 +636,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, return status; } - status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, + return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, parent_fe_bh, handle, inode_ac, fe_blkno, suballoc_loc, suballoc_bit); - if (status < 0) { - u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit); - int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode, - inode_ac->ac_bh, suballoc_bit, bg_blkno, 1); - if (tmp) - mlog_errno(tmp); - } - - return status; } static int ocfs2_mkdir(struct inode *dir, @@ -2023,8 +2019,11 @@ static int ocfs2_symlink(struct inode *dir, ocfs2_clusters_to_bytes(osb->sb, 1)); if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && fe) + ocfs2_set_links_count(fe, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 03eacb249f379..5e272e257b0bb 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -705,6 +705,8 @@ static struct ctl_table_header *ocfs2_table_header; static int __init ocfs2_stack_glue_init(void) { + int ret; + strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB); ocfs2_table_header = register_sysctl_table(ocfs2_root_table); @@ -714,7 +716,11 @@ static int __init ocfs2_stack_glue_init(void) return -ENOMEM; /* or something. */ } - return ocfs2_sysfs_init(); + ret = ocfs2_sysfs_init(); + if (ret) + unregister_sysctl_table(ocfs2_table_header); + + return ret; } static void __exit ocfs2_stack_glue_exit(void) diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index c0e5f1bad499f..3e0b2e3e00ad1 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -984,28 +984,27 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { status = -EINVAL; - goto read_super_error; + goto out; } /* probe for superblock */ status = ocfs2_sb_probe(sb, &bh, §or_size, &stats); if (status < 0) { mlog(ML_ERROR, "superblock probe failed!\n"); - goto read_super_error; + goto out; } status = ocfs2_initialize_super(sb, bh, sector_size, &stats); - osb = OCFS2_SB(sb); - if (status < 0) { - mlog_errno(status); - goto read_super_error; - } brelse(bh); bh = NULL; + if (status < 0) + goto out; + + osb = OCFS2_SB(sb); if (!ocfs2_check_set_options(sb, &parsed_options)) { status = -EINVAL; - goto read_super_error; + goto out_super; } osb->s_mount_opt = parsed_options.mount_opt; osb->s_atime_quantum = parsed_options.atime_quantum; @@ -1022,7 +1021,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) status = ocfs2_verify_userspace_stack(osb, &parsed_options); if (status) - goto read_super_error; + goto out_super; sb->s_magic = OCFS2_SUPER_MAGIC; @@ -1036,7 +1035,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) status = -EACCES; mlog(ML_ERROR, "Readonly device detected but readonly " "mount was not specified.\n"); - goto read_super_error; + goto out_super; } /* You should not be able to start a local heartbeat @@ -1045,7 +1044,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) status = -EROFS; mlog(ML_ERROR, "Local heartbeat specified on readonly " "device.\n"); - goto read_super_error; + goto out_super; } status = ocfs2_check_journals_nolocks(osb); @@ -1054,9 +1053,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) mlog(ML_ERROR, "Recovery required on readonly " "file system, but write access is " "unavailable.\n"); - else - mlog_errno(status); - goto read_super_error; + goto out_super; } ocfs2_set_ro_flag(osb, 1); @@ -1072,10 +1069,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) } status = ocfs2_verify_heartbeat(osb); - if (status < 0) { - mlog_errno(status); - goto read_super_error; - } + if (status < 0) + goto out_super; osb->osb_debug_root = debugfs_create_dir(osb->uuid_str, ocfs2_debugfs_root); @@ -1089,15 +1084,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) status = ocfs2_mount_volume(sb); if (status < 0) - goto read_super_error; + goto out_debugfs; if (osb->root_inode) inode = igrab(osb->root_inode); if (!inode) { status = -EIO; - mlog_errno(status); - goto read_super_error; + goto out_dismount; } osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL, @@ -1105,7 +1099,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) if (!osb->osb_dev_kset) { status = -ENOMEM; mlog(ML_ERROR, "Unable to create device kset %s.\n", sb->s_id); - goto read_super_error; + goto out_dismount; } /* Create filecheck sysfs related directories/files at @@ -1114,14 +1108,13 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) status = -ENOMEM; mlog(ML_ERROR, "Unable to create filecheck sysfs directory at " "/sys/fs/ocfs2/%s/filecheck.\n", sb->s_id); - goto read_super_error; + goto out_dismount; } root = d_make_root(inode); if (!root) { status = -ENOMEM; - mlog_errno(status); - goto read_super_error; + goto out_dismount; } sb->s_root = root; @@ -1168,17 +1161,22 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) return status; -read_super_error: - brelse(bh); - - if (status) - mlog_errno(status); +out_dismount: + atomic_set(&osb->vol_state, VOLUME_DISABLED); + wake_up(&osb->osb_mount_event); + ocfs2_free_replay_slots(osb); + ocfs2_dismount_volume(sb, 1); + goto out; - if (osb) { - atomic_set(&osb->vol_state, VOLUME_DISABLED); - wake_up(&osb->osb_mount_event); - ocfs2_dismount_volume(sb, 1); - } +out_debugfs: + debugfs_remove_recursive(osb->osb_debug_root); +out_super: + ocfs2_release_system_inodes(osb); + kfree(osb->recovery_map); + ocfs2_delete_osb(osb); + kfree(osb); +out: + mlog_errno(status); return status; } @@ -1787,11 +1785,10 @@ static int ocfs2_get_sector(struct super_block *sb, static int ocfs2_mount_volume(struct super_block *sb) { int status = 0; - int unlock_super = 0; struct ocfs2_super *osb = OCFS2_SB(sb); if (ocfs2_is_hard_readonly(osb)) - goto leave; + goto out; mutex_init(&osb->obs_trim_fs_mutex); @@ -1801,44 +1798,58 @@ static int ocfs2_mount_volume(struct super_block *sb) if (status == -EBADR && ocfs2_userspace_stack(osb)) mlog(ML_ERROR, "couldn't mount because cluster name on" " disk does not match the running cluster name.\n"); - goto leave; + goto out; } status = ocfs2_super_lock(osb, 1); if (status < 0) { mlog_errno(status); - goto leave; + goto out_dlm; } - unlock_super = 1; /* This will load up the node map and add ourselves to it. */ status = ocfs2_find_slot(osb); if (status < 0) { mlog_errno(status); - goto leave; + goto out_super_lock; } /* load all node-local system inodes */ status = ocfs2_init_local_system_inodes(osb); if (status < 0) { mlog_errno(status); - goto leave; + goto out_super_lock; } status = ocfs2_check_volume(osb); if (status < 0) { mlog_errno(status); - goto leave; + goto out_system_inodes; } status = ocfs2_truncate_log_init(osb); - if (status < 0) + if (status < 0) { mlog_errno(status); + goto out_check_volume; + } -leave: - if (unlock_super) - ocfs2_super_unlock(osb, 1); + ocfs2_super_unlock(osb, 1); + return 0; +out_check_volume: + ocfs2_free_replay_slots(osb); +out_system_inodes: + if (osb->local_alloc_state == OCFS2_LA_ENABLED) + ocfs2_shutdown_local_alloc(osb); + ocfs2_release_system_inodes(osb); + /* before journal shutdown, we should release slot_info */ + ocfs2_free_slot_info(osb); + ocfs2_journal_shutdown(osb); +out_super_lock: + ocfs2_super_unlock(osb, 1); +out_dlm: + ocfs2_dlm_shutdown(osb, 0); +out: return status; } diff --git a/fs/open.c b/fs/open.c index 3aaaad47d9cac..b3fbb4300fc96 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1099,6 +1099,12 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) lookup_flags |= LOOKUP_BENEATH; if (how->resolve & RESOLVE_IN_ROOT) lookup_flags |= LOOKUP_IN_ROOT; + if (how->resolve & RESOLVE_CACHED) { + /* Don't bother even trying for create/truncate/tmpfile open */ + if (flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + return -EAGAIN; + lookup_flags |= LOOKUP_CACHED; + } op->lookup_flags = lookup_flags; return 0; diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index 29eaa45443727..1b508f5433846 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -194,15 +194,10 @@ void orangefs_debugfs_init(int debug_mask) */ static void orangefs_kernel_debug_init(void) { - int rc = -ENOMEM; - char *k_buffer = NULL; + static char k_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { }; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); - k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); - if (!k_buffer) - goto out; - if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { strcpy(k_buffer, kernel_debug_string); strcat(k_buffer, "\n"); @@ -213,15 +208,14 @@ static void orangefs_kernel_debug_init(void) debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer, &kernel_debug_fops); - -out: - gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); } void orangefs_debugfs_cleanup(void) { debugfs_remove_recursive(debug_dir); + kfree(debug_help_string); + debug_help_string = NULL; } /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ @@ -297,18 +291,13 @@ static int help_show(struct seq_file *m, void *v) /* * initialize the client-debug file. */ -static int orangefs_client_debug_init(void) +static void orangefs_client_debug_init(void) { - int rc = -ENOMEM; - char *c_buffer = NULL; + static char c_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { }; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); - c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); - if (!c_buffer) - goto out; - if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { strcpy(c_buffer, client_debug_string); strcat(c_buffer, "\n"); @@ -322,13 +311,6 @@ static int orangefs_client_debug_init(void) debug_dir, c_buffer, &kernel_debug_fops); - - rc = 0; - -out: - - gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); - return rc; } /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/ @@ -671,6 +653,7 @@ int orangefs_prepare_debugfs_help_string(int at_boot) memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE); strlcat(debug_help_string, new, string_size); mutex_unlock(&orangefs_help_file_lock); + kfree(new); } rc = 0; diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c index 74a3d6337ef43..ac9c91b83868f 100644 --- a/fs/orangefs/orangefs-mod.c +++ b/fs/orangefs/orangefs-mod.c @@ -141,7 +141,7 @@ static int __init orangefs_init(void) gossip_err("%s: could not initialize device subsystem %d!\n", __func__, ret); - goto cleanup_device; + goto cleanup_sysfs; } ret = register_filesystem(&orangefs_fs_type); @@ -152,11 +152,11 @@ static int __init orangefs_init(void) goto out; } - orangefs_sysfs_exit(); - -cleanup_device: orangefs_dev_cleanup(); +cleanup_sysfs: + orangefs_sysfs_exit(); + sysfs_init_failed: orangefs_debugfs_cleanup(); diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index d0e5cde277022..8ebd9f2b1c95b 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -586,28 +586,42 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, goto out_revert_creds; } - err = -ENOMEM; - override_cred = prepare_creds(); - if (override_cred) { + if (!attr->hardlink) { + err = -ENOMEM; + override_cred = prepare_creds(); + if (!override_cred) + goto out_revert_creds; + /* + * In the creation cases(create, mkdir, mknod, symlink), + * ovl should transfer current's fs{u,g}id to underlying + * fs. Because underlying fs want to initialize its new + * inode owner using current's fs{u,g}id. And in this + * case, the @inode is a new inode that is initialized + * in inode_init_owner() to current's fs{u,g}id. So use + * the inode's i_{u,g}id to override the cred's fs{u,g}id. + * + * But in the other hardlink case, ovl_link() does not + * create a new inode, so just use the ovl mounter's + * fs{u,g}id. + */ override_cred->fsuid = inode->i_uid; override_cred->fsgid = inode->i_gid; - if (!attr->hardlink) { - err = security_dentry_create_files_as(dentry, - attr->mode, &dentry->d_name, old_cred, - override_cred); - if (err) { - put_cred(override_cred); - goto out_revert_creds; - } + err = security_dentry_create_files_as(dentry, + attr->mode, &dentry->d_name, old_cred, + override_cred); + if (err) { + put_cred(override_cred); + goto out_revert_creds; } put_cred(override_creds(override_cred)); put_cred(override_cred); - - if (!ovl_dentry_is_whiteout(dentry)) - err = ovl_create_upper(dentry, inode, attr); - else - err = ovl_create_over_whiteout(dentry, inode, attr); } + + if (!ovl_dentry_is_whiteout(dentry)) + err = ovl_create_upper(dentry, inode, attr); + else + err = ovl_create_over_whiteout(dentry, inode, attr); + out_revert_creds: revert_creds(old_cred); return err; diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index b019f27c13601..0e734c8b4dfa2 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -531,9 +531,16 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len const struct cred *old_cred; int ret; + inode_lock(inode); + /* Update mode */ + ovl_copyattr(ovl_inode_real(inode), inode); + ret = file_remove_privs(file); + if (ret) + goto out_unlock; + ret = ovl_real_fdget(file, &real); if (ret) - return ret; + goto out_unlock; old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fallocate(real.file, mode, offset, len); @@ -544,6 +551,9 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len fdput(real); +out_unlock: + inode_unlock(inode); + return ret; } @@ -687,14 +697,23 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, const struct cred *old_cred; loff_t ret; + inode_lock(inode_out); + if (op != OVL_DEDUPE) { + /* Update mode */ + ovl_copyattr(ovl_inode_real(inode_out), inode_out); + ret = file_remove_privs(file_out); + if (ret) + goto out_unlock; + } + ret = ovl_real_fdget(file_out, &real_out); if (ret) - return ret; + goto out_unlock; ret = ovl_real_fdget(file_in, &real_in); if (ret) { fdput(real_out); - return ret; + goto out_unlock; } old_cred = ovl_override_creds(file_inode(file_out)->i_sb); @@ -723,6 +742,9 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, fdput(real_in); fdput(real_out); +out_unlock: + inode_unlock(inode_out); + return ret; } diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 45c596dfe3a36..e3cd5a00f880d 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -138,11 +138,16 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry, unsigned int flags, bool weak) { struct ovl_entry *oe = dentry->d_fsdata; + struct inode *inode = d_inode_rcu(dentry); struct dentry *upper; unsigned int i; int ret = 1; - upper = ovl_dentry_upper(dentry); + /* Careful in RCU mode */ + if (!inode) + return -ECHILD; + + upper = ovl_i_dentry_upper(inode); if (upper) ret = ovl_revalidate_real(upper, flags, weak); diff --git a/fs/pnode.c b/fs/pnode.c index 1106137c747a3..468e4e65a615d 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -244,7 +244,7 @@ static int propagate_one(struct mount *m) } do { struct mount *parent = last_source->mnt_parent; - if (last_source == first_source) + if (peers(last_source, first_source)) break; done = parent->mnt_master == p; if (done && peers(n, parent)) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 070d2df8ab9cf..cd7c6c4af83ad 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "internal.h" static const struct dentry_operations proc_sys_dentry_operations; @@ -1380,6 +1381,38 @@ struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *tab } EXPORT_SYMBOL(register_sysctl); +/** + * __register_sysctl_init() - register sysctl table to path + * @path: path name for sysctl base + * @table: This is the sysctl table that needs to be registered to the path + * @table_name: The name of sysctl table, only used for log printing when + * registration fails + * + * The sysctl interface is used by userspace to query or modify at runtime + * a predefined value set on a variable. These variables however have default + * values pre-set. Code which depends on these variables will always work even + * if register_sysctl() fails. If register_sysctl() fails you'd just loose the + * ability to query or modify the sysctls dynamically at run time. Chances of + * register_sysctl() failing on init are extremely low, and so for both reasons + * this function does not return any error as it is used by initialization code. + * + * Context: Can only be called after your respective sysctl base path has been + * registered. So for instance, most base directories are registered early on + * init before init levels are processed through proc_sys_init() and + * sysctl_init(). + */ +void __init __register_sysctl_init(const char *path, struct ctl_table *table, + const char *table_name) +{ + struct ctl_table_header *hdr = register_sysctl(path, table); + + if (unlikely(!hdr)) { + pr_err("failed when register_sysctl %s to %s\n", table_name, path); + return; + } + kmemleak_not_leak(hdr); +} + static char *append_path(const char *path, char *pos, const char *name) { int namelen; diff --git a/fs/proc/self.c b/fs/proc/self.c index a4012154e1096..72cd69bcaf4ad 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c @@ -16,13 +16,6 @@ static const char *proc_self_get_link(struct dentry *dentry, pid_t tgid = task_tgid_nr_ns(current, ns); char *name; - /* - * Not currently supported. Once we can inherit all of struct pid, - * we can allow this. - */ - if (current->flags & PF_IO_WORKER) - return ERR_PTR(-EOPNOTSUPP); - if (!tgid) return ERR_PTR(-ENOENT); /* max length of unsigned int in decimal + NULL term */ diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index ef18f0d71b11b..39b1038076c3e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -714,9 +714,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, page = device_private_entry_to_page(swpent); } if (page) { - int mapcount = page_mapcount(page); - - if (mapcount >= 2) + if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte)) mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); else mss->private_hugetlb += huge_page_size(hstate_vma(vma)); @@ -951,7 +949,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) vma = vma->vm_next; } - show_vma_header_prefix(m, priv->mm->mmap->vm_start, + show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0, last_vma_end, 0, 0, 0, 0); seq_pad(m, ' '); seq_puts(m, "[rollup]\n"); diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c index d56681d86d28a..a553273fbd417 100644 --- a/fs/proc/thread_self.c +++ b/fs/proc/thread_self.c @@ -17,13 +17,6 @@ static const char *proc_thread_self_get_link(struct dentry *dentry, pid_t pid = task_pid_nr_ns(current, ns); char *name; - /* - * Not currently supported. Once we can inherit all of struct pid, - * we can allow this. - */ - if (current->flags & PF_IO_WORKER) - return ERR_PTR(-EOPNOTSUPP); - if (!pid) return ERR_PTR(-ENOENT); name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC); diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig index 8efe60487b486..71dbe9a2533f0 100644 --- a/fs/pstore/Kconfig +++ b/fs/pstore/Kconfig @@ -118,6 +118,7 @@ config PSTORE_CONSOLE config PSTORE_PMSG bool "Log user space messages" depends on PSTORE + select RT_MUTEXES help When the option is enabled, pstore will export a character interface /dev/pmsg0 to log user space messages. On reboot diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c index d8542ec2f38c6..18cf94b597e05 100644 --- a/fs/pstore/pmsg.c +++ b/fs/pstore/pmsg.c @@ -7,9 +7,10 @@ #include #include #include +#include #include "internal.h" -static DEFINE_MUTEX(pmsg_lock); +static DEFINE_RT_MUTEX(pmsg_lock); static ssize_t write_pmsg(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -28,9 +29,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf, if (!access_ok(buf, count)) return -EFAULT; - mutex_lock(&pmsg_lock); + rt_mutex_lock(&pmsg_lock); ret = psinfo->write_user(&record, buf); - mutex_unlock(&pmsg_lock); + rt_mutex_unlock(&pmsg_lock); return ret ? ret : count; } diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index ca6d8a8672856..98e579ce0d633 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -730,6 +730,7 @@ static int ramoops_probe(struct platform_device *pdev) /* Make sure we didn't get bogus platform data pointer. */ if (!pdata) { pr_err("NULL platform data\n"); + err = -EINVAL; goto fail_out; } @@ -737,6 +738,7 @@ static int ramoops_probe(struct platform_device *pdev) !pdata->ftrace_size && !pdata->pmsg_size)) { pr_err("The memory size and the record/console size must be " "non-zero\n"); + err = -EINVAL; goto fail_out; } diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index aa8e0b65ff1ae..184cb97c83bdd 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -425,7 +425,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size, phys_addr_t addr = page_start + i * PAGE_SIZE; pages[i] = pfn_to_page(addr >> PAGE_SHIFT); } - vaddr = vmap(pages, page_count, VM_MAP, prot); + /* + * VM_IOREMAP used here to bypass this region during vread() + * and kmap_atomic() (i.e. kcore) to avoid __va() failures. + */ + vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); kfree(pages); /* diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c index 3ce89216670c9..b50fc33f2ab29 100644 --- a/fs/pstore/zone.c +++ b/fs/pstore/zone.c @@ -761,7 +761,7 @@ static inline int notrace psz_kmsg_write_record(struct psz_context *cxt, /* avoid destroying old data, allocate a new one */ len = zone->buffer_size + sizeof(*zone->buffer); zone->oldbuf = zone->buffer; - zone->buffer = kzalloc(len, GFP_KERNEL); + zone->buffer = kzalloc(len, GFP_ATOMIC); if (!zone->buffer) { zone->buffer = zone->oldbuf; return -ENOMEM; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 65f123d5809bd..ad255f8ab5c55 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2319,6 +2319,8 @@ static int vfs_setup_quota_inode(struct inode *inode, int type) struct super_block *sb = inode->i_sb; struct quota_info *dqopt = sb_dqopt(sb); + if (is_bad_inode(inode)) + return -EUCLEAN; if (!S_ISREG(inode->i_mode)) return -EACCES; if (IS_RDONLY(inode)) diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 1a188fbdf34e5..07948f6ac84ef 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -80,6 +80,35 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) return ret; } +static inline int do_check_range(struct super_block *sb, const char *val_name, + uint val, uint min_val, uint max_val) +{ + if (val < min_val || val > max_val) { + quota_error(sb, "Getting %s %u out of range %u-%u", + val_name, val, min_val, max_val); + return -EUCLEAN; + } + + return 0; +} + +static int check_dquot_block_header(struct qtree_mem_dqinfo *info, + struct qt_disk_dqdbheader *dh) +{ + int err = 0; + + err = do_check_range(info->dqi_sb, "dqdh_next_free", + le32_to_cpu(dh->dqdh_next_free), 0, + info->dqi_blocks - 1); + if (err) + return err; + err = do_check_range(info->dqi_sb, "dqdh_prev_free", + le32_to_cpu(dh->dqdh_prev_free), 0, + info->dqi_blocks - 1); + + return err; +} + /* Remove empty block from list and return it */ static int get_free_dqblk(struct qtree_mem_dqinfo *info) { @@ -94,6 +123,9 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info) ret = read_blk(info, blk, buf); if (ret < 0) goto out_buf; + ret = check_dquot_block_header(info, dh); + if (ret) + goto out_buf; info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); } else { @@ -241,6 +273,9 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, *err = read_blk(info, blk, buf); if (*err < 0) goto out_buf; + *err = check_dquot_block_header(info, dh); + if (*err) + goto out_buf; } else { blk = get_free_dqblk(info); if ((int)blk < 0) { @@ -433,6 +468,9 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, goto out_buf; } dh = (struct qt_disk_dqdbheader *)buf; + ret = check_dquot_block_header(info, dh); + if (ret) + goto out_buf; le16_add_cpu(&dh->dqdh_entries, -1); if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ ret = remove_free_dqentry(info, buf, blk); diff --git a/fs/read_write.c b/fs/read_write.c index 75f764b434184..0066acb6b380d 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1388,28 +1388,6 @@ ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, } EXPORT_SYMBOL(generic_copy_file_range); -static ssize_t do_copy_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - size_t len, unsigned int flags) -{ - /* - * Although we now allow filesystems to handle cross sb copy, passing - * a file of the wrong filesystem type to filesystem driver can result - * in an attempt to dereference the wrong type of ->private_data, so - * avoid doing that until we really have a good reason. NFS defines - * several different file_system_type structures, but they all end up - * using the same ->copy_file_range() function pointer. - */ - if (file_out->f_op->copy_file_range && - file_out->f_op->copy_file_range == file_in->f_op->copy_file_range) - return file_out->f_op->copy_file_range(file_in, pos_in, - file_out, pos_out, - len, flags); - - return generic_copy_file_range(file_in, pos_in, file_out, pos_out, len, - flags); -} - /* * Performs necessary checks before doing a file copy * @@ -1431,6 +1409,26 @@ static int generic_copy_file_checks(struct file *file_in, loff_t pos_in, if (ret) return ret; + /* + * We allow some filesystems to handle cross sb copy, but passing + * a file of the wrong filesystem type to filesystem driver can result + * in an attempt to dereference the wrong type of ->private_data, so + * avoid doing that until we really have a good reason. + * + * nfs and cifs define several different file_system_type structures + * and several different sets of file_operations, but they all end up + * using the same ->copy_file_range() function pointer. + */ + if (flags & COPY_FILE_SPLICE) { + /* cross sb splice is allowed */ + } else if (file_out->f_op->copy_file_range) { + if (file_in->f_op->copy_file_range != + file_out->f_op->copy_file_range) + return -EXDEV; + } else if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) { + return -EXDEV; + } + /* Don't touch certain kinds of inodes */ if (IS_IMMUTABLE(inode_out)) return -EPERM; @@ -1473,8 +1471,9 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, size_t len, unsigned int flags) { ssize_t ret; + bool splice = flags & COPY_FILE_SPLICE; - if (flags != 0) + if (flags & ~COPY_FILE_SPLICE) return -EINVAL; ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len, @@ -1496,26 +1495,43 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, file_start_write(file_out); /* - * Try cloning first, this is supported by more file systems, and - * more efficient if both clone and copy are supported (e.g. NFS). + * Cloning is supported by more file systems, so we implement copy on + * same sb using clone, but for filesystems where both clone and copy + * are supported (e.g. nfs,cifs), we only call the copy method. */ - if (file_in->f_op->remap_file_range && - file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) { - loff_t cloned; + if (!splice && file_out->f_op->copy_file_range) { + ret = file_out->f_op->copy_file_range(file_in, pos_in, + file_out, pos_out, + len, flags); + goto done; + } - cloned = file_in->f_op->remap_file_range(file_in, pos_in, + if (!splice && file_in->f_op->remap_file_range && + file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) { + ret = file_in->f_op->remap_file_range(file_in, pos_in, file_out, pos_out, min_t(loff_t, MAX_RW_COUNT, len), REMAP_FILE_CAN_SHORTEN); - if (cloned > 0) { - ret = cloned; + if (ret > 0) goto done; - } } - ret = do_copy_file_range(file_in, pos_in, file_out, pos_out, len, - flags); - WARN_ON_ONCE(ret == -EOPNOTSUPP); + /* + * We can get here for same sb copy of filesystems that do not implement + * ->copy_file_range() in case filesystem does not support clone or in + * case filesystem supports clone but rejected the clone request (e.g. + * because it was not block aligned). + * + * In both cases, fall back to kernel copy so we are able to maintain a + * consistent story about which filesystems support copy_file_range() + * and which filesystems do not, that will allow userspace tools to + * make consistent desicions w.r.t using copy_file_range(). + * + * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE. + */ + ret = generic_copy_file_range(file_in, pos_in, file_out, pos_out, len, + flags); + done: if (ret > 0) { fsnotify_access(file_in); @@ -1566,6 +1582,10 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in, pos_out = f_out.file->f_pos; } + ret = -EINVAL; + if (flags != 0) + goto out; + ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len, flags); if (ret > 0) { diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 1594687582f0c..84cc136d41dd4 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -695,6 +695,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod out_failed: reiserfs_write_unlock(dir->i_sb); + reiserfs_security_free(&security); return retval; } @@ -778,6 +779,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode out_failed: reiserfs_write_unlock(dir->i_sb); + reiserfs_security_free(&security); return retval; } @@ -876,6 +878,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode retval = journal_end(&th); out_failed: reiserfs_write_unlock(dir->i_sb); + reiserfs_security_free(&security); return retval; } @@ -1191,6 +1194,7 @@ static int reiserfs_symlink(struct inode *parent_dir, retval = journal_end(&th); out_failed: reiserfs_write_unlock(parent_dir->i_sb); + reiserfs_security_free(&security); return retval; } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 913f5af9bf248..0ebb6e6849082 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -1437,7 +1437,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) unsigned long safe_mask = 0; unsigned int commit_max_age = (unsigned int)-1; struct reiserfs_journal *journal = SB_JOURNAL(s); - char *new_opts; int err; char *qf_names[REISERFS_MAXQUOTAS]; unsigned int qfmt = 0; @@ -1445,10 +1444,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) int i; #endif - new_opts = kstrdup(arg, GFP_KERNEL); - if (arg && !new_opts) - return -ENOMEM; - sync_filesystem(s); reiserfs_write_lock(s); @@ -1599,7 +1594,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) out_err_unlock: reiserfs_write_unlock(s); out_err: - kfree(new_opts); return err; } diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index 20be9a0e5870e..59d87f9f72fb4 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -49,6 +49,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode, int error; sec->name = NULL; + sec->value = NULL; /* Don't add selinux attributes on xattrs - they'll never get used */ if (IS_PRIVATE(dir)) @@ -94,7 +95,6 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th, void reiserfs_security_free(struct reiserfs_security_handle *sec) { - kfree(sec->name); kfree(sec->value); sec->name = NULL; sec->value = NULL; diff --git a/fs/splice.c b/fs/splice.c index 6610e55c0e2ab..866d5c2367b23 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -806,15 +806,17 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, { struct pipe_inode_info *pipe; long ret, bytes; + umode_t i_mode; size_t len; int i, flags, more; /* - * We require the input to be seekable, as we don't want to randomly - * drop data for eg socket -> socket splicing. Use the piped splicing - * for that! + * We require the input being a regular file, as we don't want to + * randomly drop data for eg socket -> socket splicing. Use the + * piped splicing for that! */ - if (unlikely(!(in->f_mode & FMODE_LSEEK))) + i_mode = file_inode(in)->i_mode; + if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) return -EINVAL; /* diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index b3fdc8212c5f5..95f8e89017689 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -183,7 +183,7 @@ static inline int squashfs_block_size(__le32 raw) #define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\ sizeof(u64)) /* xattr id lookup table defines */ -#define SQUASHFS_XATTR_BYTES(A) ((A) * sizeof(struct squashfs_xattr_id)) +#define SQUASHFS_XATTR_BYTES(A) (((u64) (A)) * sizeof(struct squashfs_xattr_id)) #define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \ SQUASHFS_METADATA_SIZE) diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index 166e98806265b..8f9445e290e72 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h @@ -63,7 +63,7 @@ struct squashfs_sb_info { long long bytes_used; unsigned int inodes; unsigned int fragments; - int xattr_ids; + unsigned int xattr_ids; unsigned int ids; }; #endif diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h index d8a270d3ac4cb..f1a463d8bfa02 100644 --- a/fs/squashfs/xattr.h +++ b/fs/squashfs/xattr.h @@ -10,12 +10,12 @@ #ifdef CONFIG_SQUASHFS_XATTR extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64, - u64 *, int *); + u64 *, unsigned int *); extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, unsigned int *, unsigned long long *); #else static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, - u64 start, u64 *xattr_table_start, int *xattr_ids) + u64 start, u64 *xattr_table_start, unsigned int *xattr_ids) { struct squashfs_xattr_id_table *id_table; diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c index 087cab8c78f4e..c8469c656e0dc 100644 --- a/fs/squashfs/xattr_id.c +++ b/fs/squashfs/xattr_id.c @@ -56,7 +56,7 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, * Read uncompressed xattr id lookup table indexes from disk into memory */ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, - u64 *xattr_table_start, int *xattr_ids) + u64 *xattr_table_start, unsigned int *xattr_ids) { struct squashfs_sb_info *msblk = sb->s_fs_info; unsigned int len, indexes; diff --git a/fs/super.c b/fs/super.c index bae3fe80f852e..7629f9dd031cc 100644 --- a/fs/super.c +++ b/fs/super.c @@ -293,7 +293,7 @@ static void __put_super(struct super_block *s) WARN_ON(s->s_inode_lru.node); WARN_ON(!list_empty(&s->s_mounts)); security_sb_free(s); - fscrypt_sb_free(s); + fscrypt_destroy_keyring(s); put_user_ns(s->s_user_ns); kfree(s->s_subtype); call_rcu(&s->rcu, destroy_super_rcu); @@ -454,6 +454,7 @@ void generic_shutdown_super(struct super_block *sb) evict_inodes(sb); /* only nonzero refcount inodes can have marks */ fsnotify_sb_delete(sb); + fscrypt_destroy_keyring(sb); if (sb->s_dio_done_wq) { destroy_workqueue(sb->s_dio_done_wq); diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index bcb67b0cabe7e..31f66053e2393 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -438,7 +438,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size) res += blocks; direct = 1; } - return blocks; + return res; } int sysv_getattr(const struct path *path, struct kstat *stat, diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index 8b7315c22f0d1..4b70571368526 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -139,6 +139,8 @@ struct tracefs_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; + /* Opt_* bitfield. */ + unsigned int opts; }; enum { @@ -239,6 +241,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) kgid_t gid; char *p; + opts->opts = 0; opts->mode = TRACEFS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { @@ -273,24 +276,36 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) * but traditionally tracefs has ignored all mount options */ } + + opts->opts |= BIT(token); } return 0; } -static int tracefs_apply_options(struct super_block *sb) +static int tracefs_apply_options(struct super_block *sb, bool remount) { struct tracefs_fs_info *fsi = sb->s_fs_info; struct inode *inode = sb->s_root->d_inode; struct tracefs_mount_opts *opts = &fsi->mount_opts; - inode->i_mode &= ~S_IALLUGO; - inode->i_mode |= opts->mode; + /* + * On remount, only reset mode/uid/gid if they were provided as mount + * options. + */ + + if (!remount || opts->opts & BIT(Opt_mode)) { + inode->i_mode &= ~S_IALLUGO; + inode->i_mode |= opts->mode; + } - inode->i_uid = opts->uid; + if (!remount || opts->opts & BIT(Opt_uid)) + inode->i_uid = opts->uid; - /* Set all the group ids to the mount option */ - set_gid(sb->s_root, opts->gid); + if (!remount || opts->opts & BIT(Opt_gid)) { + /* Set all the group ids to the mount option */ + set_gid(sb->s_root, opts->gid); + } return 0; } @@ -305,7 +320,7 @@ static int tracefs_remount(struct super_block *sb, int *flags, char *data) if (err) goto fail; - tracefs_apply_options(sb); + tracefs_apply_options(sb, true); fail: return err; @@ -357,7 +372,7 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent) sb->s_op = &tracefs_super_operations; - tracefs_apply_options(sb); + tracefs_apply_options(sb, false); return 0; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index d32b836f6ca74..2132bfab67f35 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -438,6 +438,12 @@ static int udf_get_block(struct inode *inode, sector_t block, iinfo->i_next_alloc_goal++; } + /* + * Block beyond EOF and prealloc extents? Just discard preallocation + * as it is not useful and complicates things. + */ + if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents) + udf_discard_prealloc(inode); udf_clear_extent_cache(inode); phys = inode_getblk(inode, block, &err, &new); if (!phys) @@ -487,8 +493,6 @@ static int udf_do_extend_file(struct inode *inode, uint32_t add; int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); struct super_block *sb = inode->i_sb; - struct kernel_lb_addr prealloc_loc = {}; - uint32_t prealloc_len = 0; struct udf_inode_info *iinfo; int err; @@ -509,19 +513,6 @@ static int udf_do_extend_file(struct inode *inode, ~(sb->s_blocksize - 1); } - /* Last extent are just preallocated blocks? */ - if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == - EXT_NOT_RECORDED_ALLOCATED) { - /* Save the extent so that we can reattach it to the end */ - prealloc_loc = last_ext->extLocation; - prealloc_len = last_ext->extLength; - /* Mark the extent as a hole */ - last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); - last_ext->extLocation.logicalBlockNum = 0; - last_ext->extLocation.partitionReferenceNum = 0; - } - /* Can we merge with the previous extent? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { @@ -549,7 +540,7 @@ static int udf_do_extend_file(struct inode *inode, * more extents, we may need to enter possible following * empty indirect extent. */ - if (new_block_bytes || prealloc_len) + if (new_block_bytes) udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); } @@ -583,17 +574,6 @@ static int udf_do_extend_file(struct inode *inode, } out: - /* Do we have some preallocated blocks saved? */ - if (prealloc_len) { - err = udf_add_aext(inode, last_pos, &prealloc_loc, - prealloc_len, 1); - if (err) - return err; - last_ext->extLocation = prealloc_loc; - last_ext->extLength = prealloc_len; - count++; - } - /* last_pos should point to the last written extent... */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) last_pos->offset -= sizeof(struct short_ad); @@ -609,13 +589,17 @@ static int udf_do_extend_file(struct inode *inode, static void udf_do_extend_final_block(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, - uint32_t final_block_len) + uint32_t new_elen) { - struct super_block *sb = inode->i_sb; uint32_t added_bytes; - added_bytes = final_block_len - - (last_ext->extLength & (sb->s_blocksize - 1)); + /* + * Extent already large enough? It may be already rounded up to block + * size... + */ + if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) + return; + added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); last_ext->extLength += added_bytes; UDF_I(inode)->i_lenExtents += added_bytes; @@ -632,12 +616,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize) int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = newsize >> sb->s_blocksize_bits, offset; - unsigned long partial_final_block; + loff_t new_elen; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); struct kernel_long_ad extent; int err = 0; - int within_final_block; + bool within_last_ext; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); @@ -646,8 +630,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize) else BUG(); + /* + * When creating hole in file, just don't bother with preserving + * preallocation. It likely won't be very useful anyway. + */ + udf_discard_prealloc(inode); + etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); - within_final_block = (etype != -1); + within_last_ext = (etype != -1); + /* We don't expect extents past EOF... */ + WARN_ON_ONCE(within_last_ext && + elen > ((loff_t)offset + 1) << inode->i_blkbits); if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { @@ -663,19 +656,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize) extent.extLength |= etype << 30; } - partial_final_block = newsize & (sb->s_blocksize - 1); + new_elen = ((loff_t)offset << inode->i_blkbits) | + (newsize & (sb->s_blocksize - 1)); /* File has extent covering the new size (could happen when extending * inside a block)? */ - if (within_final_block) { + if (within_last_ext) { /* Extending file within the last file block */ - udf_do_extend_final_block(inode, &epos, &extent, - partial_final_block); + udf_do_extend_final_block(inode, &epos, &extent, new_elen); } else { - loff_t add = ((loff_t)offset << sb->s_blocksize_bits) | - partial_final_block; - err = udf_do_extend_file(inode, &epos, &extent, add); + err = udf_do_extend_file(inode, &epos, &extent, new_elen); } if (err < 0) @@ -776,10 +767,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, goto out_free; } - /* Are we beyond EOF? */ + /* Are we beyond EOF and preallocated extent? */ if (etype == -1) { int ret; loff_t hole_len; + isBeyondEOF = true; if (count) { if (c) diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 9f3aced46c68f..58120d2f265fd 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -241,7 +241,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, poffset - lfi); else { if (!copy_name) { - copy_name = kmalloc(UDF_NAME_LEN, + copy_name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS); if (!copy_name) { fi = ERR_PTR(-ENOMEM); @@ -1090,8 +1090,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, return -EINVAL; ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); - if (IS_ERR(ofi)) { - retval = PTR_ERR(ofi); + if (!ofi || IS_ERR(ofi)) { + if (IS_ERR(ofi)) + retval = PTR_ERR(ofi); goto end_rename; } @@ -1100,8 +1101,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, brelse(ofibh.sbh); tloc = lelb_to_cpu(ocfi.icb.extLocation); - if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) - != old_inode->i_ino) + if (udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino) goto end_rename; nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi); diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c index 532cda99644ee..036ebd892b852 100644 --- a/fs/udf/truncate.c +++ b/fs/udf/truncate.c @@ -120,60 +120,42 @@ void udf_truncate_tail_extent(struct inode *inode) void udf_discard_prealloc(struct inode *inode) { - struct extent_position epos = { NULL, 0, {0, 0} }; + struct extent_position epos = {}; + struct extent_position prev_epos = {}; struct kernel_lb_addr eloc; uint32_t elen; uint64_t lbcount = 0; int8_t etype = -1, netype; - int adsize; struct udf_inode_info *iinfo = UDF_I(inode); + int bsize = 1 << inode->i_blkbits; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || - inode->i_size == iinfo->i_lenExtents) + ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize)) return; - if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) - adsize = sizeof(struct short_ad); - else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) - adsize = sizeof(struct long_ad); - else - adsize = 0; - epos.block = iinfo->i_location; /* Find the last extent in the file */ - while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { - etype = netype; + while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 0)) != -1) { + brelse(prev_epos.bh); + prev_epos = epos; + if (prev_epos.bh) + get_bh(prev_epos.bh); + + etype = udf_next_aext(inode, &epos, &eloc, &elen, 1); lbcount += elen; } if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { - epos.offset -= adsize; lbcount -= elen; - extent_trunc(inode, &epos, &eloc, etype, elen, 0); - if (!epos.bh) { - iinfo->i_lenAlloc = - epos.offset - - udf_file_entry_alloc_offset(inode); - mark_inode_dirty(inode); - } else { - struct allocExtDesc *aed = - (struct allocExtDesc *)(epos.bh->b_data); - aed->lengthAllocDescs = - cpu_to_le32(epos.offset - - sizeof(struct allocExtDesc)); - if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || - UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) - udf_update_tag(epos.bh->b_data, epos.offset); - else - udf_update_tag(epos.bh->b_data, - sizeof(struct allocExtDesc)); - mark_buffer_dirty_inode(epos.bh, inode); - } + udf_delete_aext(inode, prev_epos); + udf_free_blocks(inode->i_sb, inode, &eloc, 0, + DIV_ROUND_UP(elen, 1 << inode->i_blkbits)); } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = lbcount; brelse(epos.bh); + brelse(prev_epos.bh); } static void udf_update_alloc_ext_desc(struct inode *inode, diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index aef0da5d6f636..a3074a9d71a6a 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -974,7 +974,7 @@ static int resolve_userfault_fork(struct userfaultfd_ctx *ctx, int fd; fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new, - O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS)); + O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS)); if (fd < 0) return fd; @@ -1987,7 +1987,7 @@ SYSCALL_DEFINE1(userfaultfd, int, flags) mmgrab(ctx->mm); fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx, - O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); + O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS)); if (fd < 0) { mmdrop(ctx->mm); kmem_cache_free(userfaultfd_ctx_cachep, ctx); diff --git a/fs/xattr.c b/fs/xattr.c index cd7a563e8bcd4..5a03eaadf029f 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -1049,7 +1049,7 @@ static int xattr_list_one(char **buffer, ssize_t *remaining_size, ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size) { - bool trusted = capable(CAP_SYS_ADMIN); + bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN); struct simple_xattr *xattr; ssize_t remaining_size = size; int err = 0; diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index c667c63f2cb00..fa8aefe6b7ec3 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -358,19 +358,36 @@ xfs_dinode_verify_fork( int whichfork) { uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork); + mode_t mode = be16_to_cpu(dip->di_mode); + uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork); + uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork); - switch (XFS_DFORK_FORMAT(dip, whichfork)) { + /* + * For fork types that can contain local data, check that the fork + * format matches the size of local data contained within the fork. + * + * For all types, check that when the size says the should be in extent + * or btree format, the inode isn't claiming it is in local format. + */ + if (whichfork == XFS_DATA_FORK) { + if (S_ISDIR(mode) || S_ISLNK(mode)) { + if (be64_to_cpu(dip->di_size) <= fork_size && + fork_format != XFS_DINODE_FMT_LOCAL) + return __this_address; + } + + if (be64_to_cpu(dip->di_size) > fork_size && + fork_format == XFS_DINODE_FMT_LOCAL) + return __this_address; + } + + switch (fork_format) { case XFS_DINODE_FMT_LOCAL: /* - * no local regular files yet + * No local regular files yet. */ - if (whichfork == XFS_DATA_FORK) { - if (S_ISREG(be16_to_cpu(dip->di_mode))) - return __this_address; - if (be64_to_cpu(dip->di_size) > - XFS_DFORK_SIZE(dip, mp, whichfork)) - return __this_address; - } + if (S_ISREG(mode) && whichfork == XFS_DATA_FORK) + return __this_address; if (di_nextents) return __this_address; break; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1f61e085676b3..19008838df769 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -802,6 +802,7 @@ xfs_ialloc( xfs_buf_t **ialloc_context, xfs_inode_t **ipp) { + struct inode *dir = pip ? VFS_I(pip) : NULL; struct xfs_mount *mp = tp->t_mountp; xfs_ino_t ino; xfs_inode_t *ip; @@ -847,18 +848,17 @@ xfs_ialloc( return error; ASSERT(ip != NULL); inode = VFS_I(ip); - inode->i_mode = mode; set_nlink(inode, nlink); - inode->i_uid = current_fsuid(); inode->i_rdev = rdev; ip->i_d.di_projid = prid; - if (pip && XFS_INHERIT_GID(pip)) { - inode->i_gid = VFS_I(pip)->i_gid; - if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode)) - inode->i_mode |= S_ISGID; + if (dir && !(dir->i_mode & S_ISGID) && + (mp->m_flags & XFS_MOUNT_GRPID)) { + inode->i_uid = current_fsuid(); + inode->i_gid = dir->i_gid; + inode->i_mode = mode; } else { - inode->i_gid = current_fsgid(); + inode_init_owner(inode, dir, mode); } /* @@ -2669,14 +2669,13 @@ xfs_ifree_cluster( } /* - * This is called to return an inode to the inode free list. - * The inode should already be truncated to 0 length and have - * no pages associated with it. This routine also assumes that - * the inode is already a part of the transaction. + * This is called to return an inode to the inode free list. The inode should + * already be truncated to 0 length and have no pages associated with it. This + * routine also assumes that the inode is already a part of the transaction. * - * The on-disk copy of the inode will have been added to the list - * of unlinked inodes in the AGI. We need to remove the inode from - * that list atomically with respect to freeing it here. + * The on-disk copy of the inode will have been added to the list of unlinked + * inodes in the AGI. We need to remove the inode from that list atomically with + * respect to freeing it here. */ int xfs_ifree( @@ -2694,13 +2693,16 @@ xfs_ifree( ASSERT(ip->i_d.di_nblocks == 0); /* - * Pull the on-disk inode from the AGI unlinked list. + * Free the inode first so that we guarantee that the AGI lock is going + * to be taken before we remove the inode from the unlinked list. This + * makes the AGI lock -> unlinked list modification order the same as + * used in O_TMPFILE creation. */ - error = xfs_iunlink_remove(tp, ip); + error = xfs_difree(tp, ip->i_ino, &xic); if (error) return error; - error = xfs_difree(tp, ip->i_ino, &xic); + error = xfs_iunlink_remove(tp, ip); if (error) return error; diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index bf5cb6efb8c09..66a089a62c39f 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -394,6 +394,10 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, data_size = zonefs_check_zone_condition(inode, zone, false, false); } + } else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO && + data_size > isize) { + /* Do not expose garbage data */ + data_size = isize; } /* @@ -440,14 +444,22 @@ static void __zonefs_io_error(struct inode *inode, bool write) struct super_block *sb = inode->i_sb; struct zonefs_sb_info *sbi = ZONEFS_SB(sb); unsigned int noio_flag; - unsigned int nr_zones = - zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT); + unsigned int nr_zones = 1; struct zonefs_ioerr_data err = { .inode = inode, .write = write, }; int ret; + /* + * The only files that have more than one zone are conventional zone + * files with aggregated conventional zones, for which the inode zone + * size is always larger than the device zone size. + */ + if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev)) + nr_zones = zi->i_zone_size >> + (sbi->s_zone_sectors_shift + SECTOR_SHIFT); + /* * Memory allocations in blkdev_report_zones() can trigger a memory * reclaim which may in turn cause a recursion into zonefs as well as @@ -764,6 +776,24 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) ret = submit_bio_wait(bio); + /* + * If the file zone was written underneath the file system, the zone + * write pointer may not be where we expect it to be, but the zone + * append write can still succeed. So check manually that we wrote where + * we intended to, that is, at zi->i_wpoffset. + */ + if (!ret) { + sector_t wpsector = + zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT); + + if (bio->bi_iter.bi_sector != wpsector) { + zonefs_warn(inode->i_sb, + "Corrupted write pointer %llu for zone at %llu\n", + wpsector, zi->i_zsector); + ret = -EIO; + } + } + zonefs_file_write_dio_end_io(iocb, size, ret, 0); out_release: @@ -1364,6 +1394,14 @@ static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone, zi->i_ztype = type; zi->i_zsector = zone->start; zi->i_zone_size = zone->len << SECTOR_SHIFT; + if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT && + !(sbi->s_features & ZONEFS_F_AGGRCNV)) { + zonefs_err(sb, + "zone size %llu doesn't match device's zone sectors %llu\n", + zi->i_zone_size, + bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT); + return -EINVAL; + } zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE, zone->capacity << SECTOR_SHIFT); @@ -1406,11 +1444,11 @@ static struct dentry *zonefs_create_inode(struct dentry *parent, struct inode *dir = d_inode(parent); struct dentry *dentry; struct inode *inode; - int ret; + int ret = -ENOMEM; dentry = d_alloc_name(parent, name); if (!dentry) - return NULL; + return ERR_PTR(ret); inode = new_inode(parent->d_sb); if (!inode) @@ -1435,7 +1473,7 @@ static struct dentry *zonefs_create_inode(struct dentry *parent, dput: dput(dentry); - return NULL; + return ERR_PTR(ret); } struct zonefs_zone_data { @@ -1455,7 +1493,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, struct blk_zone *zone, *next, *end; const char *zgroup_name; char *file_name; - struct dentry *dir; + struct dentry *dir, *dent; unsigned int n = 0; int ret; @@ -1473,8 +1511,8 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, zgroup_name = "seq"; dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type); - if (!dir) { - ret = -ENOMEM; + if (IS_ERR(dir)) { + ret = PTR_ERR(dir); goto free; } @@ -1520,8 +1558,9 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, * Use the file number within its group as file name. */ snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n); - if (!zonefs_create_inode(dir, file_name, zone, type)) { - ret = -ENOMEM; + dent = zonefs_create_inode(dir, file_name, zone, type); + if (IS_ERR(dent)) { + ret = PTR_ERR(dent); goto free; } diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 34fb3431a8f36..292a5c40bd0c6 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -71,7 +71,7 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb); void ghes_unregister_vendor_record_notifier(struct notifier_block *nb); #endif -int ghes_estatus_pool_init(int num_ghes); +int ghes_estatus_pool_init(unsigned int num_ghes); /* From drivers/edac/ghes_edac.c */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index a0c4b99d28994..f40c9534f20be 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -205,12 +205,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #define tlb_needs_table_invalidate() (true) #endif +void tlb_remove_table_sync_one(void); + #else #ifdef tlb_needs_table_invalidate #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE #endif +static inline void tlb_remove_table_sync_one(void) { } + #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index a774361f28d40..d233f9e4b9c60 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -328,6 +328,7 @@ #define DATA_DATA \ *(.xiptext) \ *(DATA_MAIN) \ + *(.data..decrypted) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data*) \ @@ -972,7 +973,6 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT #define PERCPU_DECRYPTED_SECTION \ . = ALIGN(PAGE_SIZE); \ - *(.data..decrypted) \ *(.data..percpu..decrypted) \ . = ALIGN(PAGE_SIZE); #else diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index e8d68fbb6e3f6..d7e513243dd29 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -321,8 +321,16 @@ #define IMX8MP_CLK_AUDIO_AXI 310 #define IMX8MP_CLK_HSIO_AXI 311 #define IMX8MP_CLK_MEDIA_ISP 312 +#define IMX8MP_CLK_MEDIA_DISP2_PIX 313 +#define IMX8MP_CLK_CLKOUT1_SEL 314 +#define IMX8MP_CLK_CLKOUT1_DIV 315 +#define IMX8MP_CLK_CLKOUT1 316 +#define IMX8MP_CLK_CLKOUT2_SEL 317 +#define IMX8MP_CLK_CLKOUT2_DIV 318 +#define IMX8MP_CLK_CLKOUT2 319 +#define IMX8MP_CLK_USB_SUSP 320 -#define IMX8MP_CLK_END 313 +#define IMX8MP_CLK_END 321 #define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0 #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1 diff --git a/include/linux/ata.h b/include/linux/ata.h index 6e67aded28f8c..6d2d31b03b4de 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -565,6 +565,18 @@ struct ata_bmdma_prd { ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 2))) +#define ata_id_has_devslp(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))) +#define ata_id_has_ncq_autosense(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))) +#define ata_id_has_dipm(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3))) #define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) #define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) #define ata_id_u32(id,n) \ @@ -577,9 +589,6 @@ struct ata_bmdma_prd { #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) #define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) -#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) -#define ata_id_has_ncq_autosense(id) \ - ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)) static inline bool ata_id_has_hipm(const u16 *id) { @@ -591,17 +600,6 @@ static inline bool ata_id_has_hipm(const u16 *id) return val & (1 << 9); } -static inline bool ata_id_has_dipm(const u16 *id) -{ - u16 val = id[ATA_ID_FEATURE_SUPP]; - - if (val == 0 || val == 0xffff) - return false; - - return val & (1 << 3); -} - - static inline bool ata_id_has_fua(const u16 *id) { if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) @@ -770,16 +768,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id) static inline bool ata_id_has_sense_reporting(const u16 *id) { - if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15))) + return false; + if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14)) return false; - return id[ATA_ID_COMMAND_SET_3] & (1 << 6); + return id[ATA_ID_COMMAND_SET_3] & BIT(6); } static inline bool ata_id_sense_reporting_enabled(const u16 *id) { - if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + if (!ata_id_has_sense_reporting(id)) + return false; + /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */ + if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14)) return false; - return id[ATA_ID_COMMAND_SET_4] & (1 << 6); + return id[ATA_ID_COMMAND_SET_4] & BIT(6); } /** diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 391bc1480dfb1..4d37c69e76b17 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -45,7 +45,7 @@ struct bpf_reg_state { enum bpf_reg_type type; union { /* valid when type == PTR_TO_PACKET */ - u16 range; + int range; /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL @@ -290,6 +290,27 @@ struct bpf_verifier_state { iter < frame->allocated_stack / BPF_REG_SIZE; \ iter++, reg = bpf_get_spilled_reg(iter, frame)) +/* Invoke __expr over regsiters in __vst, setting __state and __reg */ +#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ + ({ \ + struct bpf_verifier_state *___vstate = __vst; \ + int ___i, ___j; \ + for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \ + struct bpf_reg_state *___regs; \ + __state = ___vstate->frame[___i]; \ + ___regs = __state->regs; \ + for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \ + __reg = &___regs[___j]; \ + (void)(__expr); \ + } \ + bpf_for_each_spilled_reg(___j, __state, __reg) { \ + if (!__reg) \ + continue; \ + (void)(__expr); \ + } \ + } \ + }) + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 20a2ff1c07a1b..e93e3faa82296 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -136,6 +136,17 @@ BUFFER_FNS(Defer_Completion, defer_completion) static __always_inline void set_buffer_uptodate(struct buffer_head *bh) { + /* + * If somebody else already set this uptodate, they will + * have done the memory barrier, and a reader will thus + * see *some* valid buffer state. + * + * Any other serialization (with IO errors or whatever that + * might clear the bit) has to come from other state (eg BH_Lock). + */ + if (test_bit(BH_Uptodate, &bh->b_state)) + return; + /* * make it consistent with folio_mark_uptodate * pairs with smp_load_acquire in buffer_uptodate diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h index 5755ae5a47122..6a869682c1207 100644 --- a/include/linux/can/platform/sja1000.h +++ b/include/linux/can/platform/sja1000.h @@ -14,7 +14,7 @@ #define OCR_MODE_TEST 0x01 #define OCR_MODE_NORMAL 0x02 #define OCR_MODE_CLOCK 0x03 -#define OCR_MODE_MASK 0x07 +#define OCR_MODE_MASK 0x03 #define OCR_TX0_INVERT 0x04 #define OCR_TX0_PULLDOWN 0x08 #define OCR_TX0_PULLUP 0x10 diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 455e9b9e2adf5..8287382d3d1db 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -288,6 +288,7 @@ enum { CEPH_SESSION_FLUSHMSG_ACK, CEPH_SESSION_FORCE_RO, CEPH_SESSION_REJECT, + CEPH_SESSION_REQUEST_FLUSH_MDLOG, }; extern const char *ceph_session_op_name(int op); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 618838c48313c..959b370733f09 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -68,6 +68,7 @@ struct css_task_iter { struct list_head iters_node; /* css_set->task_iters */ }; +extern struct file_system_type cgroup_fs_type; extern struct cgroup_root cgrp_dfl_root; extern struct css_set init_css_set; diff --git a/include/linux/clk.h b/include/linux/clk.h index 7fd6a1febcf4f..1814eabb7c204 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -418,6 +418,47 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, */ struct clk *devm_clk_get(struct device *dev, const char *id); +/** + * devm_clk_get_prepared - devm_clk_get() + clk_prepare() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * The returned clk (if valid) is prepared. Drivers must however assume + * that the clock is not enabled. + * + * The clock will automatically be unprepared and freed when the device + * is unbound from the bus. + */ +struct clk *devm_clk_get_prepared(struct device *dev, const char *id); + +/** + * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * The returned clk (if valid) is prepared and enabled. + * + * The clock will automatically be disabled, unprepared and freed + * when the device is unbound from the bus. + */ +struct clk *devm_clk_get_enabled(struct device *dev, const char *id); + /** * devm_clk_get_optional - lookup and obtain a managed reference to an optional * clock producer. @@ -429,6 +470,50 @@ struct clk *devm_clk_get(struct device *dev, const char *id); */ struct clk *devm_clk_get_optional(struct device *dev, const char *id); +/** + * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. If no such clk is found, it returns NULL + * which serves as a dummy clk. That's the only difference compared + * to devm_clk_get_prepared(). + * + * The returned clk (if valid) is prepared. Drivers must however + * assume that the clock is not enabled. + * + * The clock will automatically be unprepared and freed when the + * device is unbound from the bus. + */ +struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); + +/** + * devm_clk_get_optional_enabled - devm_clk_get_optional() + + * clk_prepare_enable() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. If no such clk is found, it returns NULL + * which serves as a dummy clk. That's the only difference compared + * to devm_clk_get_enabled(). + * + * The returned clk (if valid) is prepared and enabled. + * + * The clock will automatically be disabled, unprepared and freed + * when the device is unbound from the bus. + */ +struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); + /** * devm_get_clk_from_child - lookup and obtain a managed reference to a * clock producer from child node. @@ -773,12 +858,36 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) return NULL; } +static inline struct clk *devm_clk_get_prepared(struct device *dev, + const char *id) +{ + return NULL; +} + +static inline struct clk *devm_clk_get_enabled(struct device *dev, + const char *id) +{ + return NULL; +} + static inline struct clk *devm_clk_get_optional(struct device *dev, const char *id) { return NULL; } +static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, + const char *id) +{ + return NULL; +} + +static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, + const char *id) +{ + return NULL; +} + static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks) { diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 4cf524ccab430..ae2de4e1cd6fa 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -54,9 +54,6 @@ #define __compiletime_object_size(obj) __builtin_object_size(obj, 0) -#define __compiletime_warning(message) __attribute__((__warning__(message))) -#define __compiletime_error(message) __attribute__((__error__(message))) - #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) #define __latent_entropy __attribute__((latent_entropy)) #endif diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index b2a3f4f641a70..08eb06301791d 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -30,6 +30,7 @@ # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) # define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 +# define __GCC4_has_attribute___error__ 1 # define __GCC4_has_attribute___externally_visible__ 1 # define __GCC4_has_attribute___no_caller_saved_registers__ 0 # define __GCC4_has_attribute___noclone__ 1 @@ -37,6 +38,7 @@ # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) # define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9) # define __GCC4_has_attribute___fallthrough__ 0 +# define __GCC4_has_attribute___warning__ 1 #endif /* @@ -136,6 +138,17 @@ # define __designated_init #endif +/* + * Optional: only supported since clang >= 14.0 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute + */ +#if __has_attribute(__error__) +# define __compiletime_error(msg) __attribute__((__error__(msg))) +#else +# define __compiletime_error(msg) +#endif + /* * Optional: not supported by clang * @@ -272,6 +285,17 @@ */ #define __used __attribute__((__used__)) +/* + * Optional: only supported since clang >= 14.0 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute + */ +#if __has_attribute(__warning__) +# define __compiletime_warning(msg) __attribute__((__warning__(msg))) +#else +# define __compiletime_warning(msg) +#endif + /* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 2a1c202baa1fe..eb2bda017ccb7 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -281,12 +281,6 @@ struct ftrace_likely_data { #ifndef __compiletime_object_size # define __compiletime_object_size(obj) -1 #endif -#ifndef __compiletime_warning -# define __compiletime_warning(message) -#endif -#ifndef __compiletime_error -# define __compiletime_error(message) -#endif #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index d6c4cc9ecc77c..9a87215b5526c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -45,7 +45,7 @@ struct debugfs_u32_array { extern struct dentry *arch_debugfs_dir; -#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ +#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \ static int __fops ## _open(struct inode *inode, struct file *file) \ { \ __simple_attr_check_format(__fmt, 0ull); \ @@ -56,10 +56,16 @@ static const struct file_operations __fops = { \ .open = __fops ## _open, \ .release = simple_attr_release, \ .read = debugfs_attr_read, \ - .write = debugfs_attr_write, \ + .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \ .llseek = no_llseek, \ } +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ + DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false) + +#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \ + DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true) + typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); #if defined(CONFIG_DEBUG_FS) @@ -91,6 +97,8 @@ struct dentry *debugfs_create_automount(const char *name, void debugfs_remove(struct dentry *dentry); #define debugfs_remove_recursive debugfs_remove +void debugfs_lookup_and_remove(const char *name, struct dentry *parent); + const struct file_operations *debugfs_real_fops(const struct file *filp); int debugfs_file_get(struct dentry *dentry); @@ -100,6 +108,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); ssize_t debugfs_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); +ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); @@ -220,6 +230,10 @@ static inline void debugfs_remove(struct dentry *dentry) static inline void debugfs_remove_recursive(struct dentry *dentry) { } +static inline void debugfs_lookup_and_remove(const char *name, + struct dentry *parent) +{ } + const struct file_operations *debugfs_real_fops(const struct file *filp); static inline int debugfs_file_get(struct dentry *dentry) @@ -243,6 +257,13 @@ static inline ssize_t debugfs_attr_write(struct file *file, return -ENODEV; } +static inline ssize_t debugfs_attr_write_signed(struct file *file, + const char __user *buf, + size_t len, loff_t *ppos) +{ + return -ENODEV; +} + static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, char *new_name) { diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 121a2430d7f7e..fc4c7ad49cee3 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -146,8 +146,8 @@ struct devfreq_stats { * @work: delayed work for load monitoring. * @previous_freq: previously configured frequency value. * @last_status: devfreq user device info, performance statistics - * @data: Private data of the governor. The devfreq framework does not - * touch this. + * @data: devfreq driver pass to governors, governor should not change it. + * @governor_data: private data for governors, devfreq core doesn't touch it. * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs) * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs) * @scaling_min_freq: Limit minimum frequency requested by OPP interface @@ -183,7 +183,8 @@ struct devfreq { unsigned long previous_freq; struct devfreq_dev_status last_status; - void *data; /* private data for governors */ + void *data; + void *governor_data; struct dev_pm_qos_request user_min_freq_req; struct dev_pm_qos_request user_max_freq_req; diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index a57ee75342cf8..c0c6ea9ea7e36 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -50,9 +50,6 @@ struct _ddebug { #if defined(CONFIG_DYNAMIC_DEBUG_CORE) -/* exported for module authors to exercise >control */ -int dynamic_debug_exec_queries(const char *query, const char *modname); - int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname); extern int ddebug_remove_module(const char *mod_name); @@ -196,7 +193,7 @@ static inline int ddebug_remove_module(const char *mod) static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *modname) { - if (strstr(param, "dyndbg")) { + if (!strcmp(param, "dyndbg")) { /* avoid pr_warn(), which wants pr_fmt() fully defined */ printk(KERN_WARNING "dyndbg param is supported only in " "CONFIG_DYNAMIC_DEBUG builds\n"); @@ -216,12 +213,6 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, rowsize, groupsize, buf, len, ascii); \ } while (0) -static inline int dynamic_debug_exec_queries(const char *query, const char *modname) -{ - pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n"); - return 0; -} - #endif /* !CONFIG_DYNAMIC_DEBUG_CORE */ #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 3bac68fb7ff1c..0849903904209 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1108,8 +1108,6 @@ void efi_check_for_embedded_firmwares(void); static inline void efi_check_for_embedded_firmwares(void) { } #endif -efi_status_t efi_random_get_seed(void); - void efi_retrieve_tpm2_eventlog(void); /* @@ -1161,7 +1159,7 @@ void efi_retrieve_tpm2_eventlog(void); arch_efi_call_virt_teardown(); \ }) -#define EFI_RANDOM_SEED_SIZE 64U +#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE struct linux_efi_random_seed { u32 size; diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 7dff07713a073..46c42479f9501 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -69,7 +69,7 @@ #define EXIT_TO_USER_MODE_WORK \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \ + _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ ARCH_EXIT_TO_USER_MODE_WORK) /** @@ -259,12 +259,13 @@ static __always_inline void arch_exit_to_user_mode(void) { } #endif /** - * arch_do_signal - Architecture specific signal delivery function + * arch_do_signal_or_restart - Architecture specific signal delivery function * @regs: Pointer to currents pt_regs + * @has_signal: actual signal to handle * * Invoked from exit_to_user_mode_loop(). */ -void arch_do_signal(struct pt_regs *regs); +void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal); /** * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit() diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 0cef17afb41a6..9b93f8584ff7d 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -11,8 +11,8 @@ # define ARCH_XFER_TO_GUEST_MODE_WORK (0) #endif -#define XFER_TO_GUEST_MODE_WORK \ - (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +#define XFER_TO_GUEST_MODE_WORK \ + (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \ _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK) struct kvm_vcpu; diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index dc4fd8a6644dd..6cd2a92daf205 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -39,6 +39,7 @@ struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); +__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); @@ -61,7 +62,13 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) return ERR_PTR(-ENOSYS); } -static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) +static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n) +{ + return -ENOSYS; +} + +static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, + unsigned mask) { return -ENOSYS; } diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 921e750843e66..766fcd973beba 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -19,7 +19,7 @@ /* List of all valid flags for the how->resolve argument: */ #define VALID_RESOLVE_FLAGS \ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \ - RESOLVE_BENEATH | RESOLVE_IN_ROOT) + RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED) /* List of all open_how "versions". */ #define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */ diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index a32bf47c593e7..f1a99d3e55707 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -123,7 +123,7 @@ extern void __fd_install(struct files_struct *files, extern int __close_fd(struct files_struct *files, unsigned int fd); extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); -extern int __close_fd_get_file(unsigned int fd, struct file **res); +extern int close_fd_get_file(unsigned int fd, struct file **res); extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, struct files_struct **new_fdp); diff --git a/include/linux/fs.h b/include/linux/fs.h index c8f887641878f..74e19bccbf738 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1145,6 +1145,7 @@ extern int locks_delete_block(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); +bool vfs_inode_has_locks(struct inode *inode); extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); extern void lease_get_mtime(struct inode *, struct timespec64 *time); @@ -1257,6 +1258,11 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) return 0; } +static inline bool vfs_inode_has_locks(struct inode *inode) +{ + return false; +} + static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { return -ENOLCK; @@ -1437,7 +1443,7 @@ struct super_block { const struct xattr_handler **s_xattr; #ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; - struct key *s_master_keys; /* master crypto keys in use */ + struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */ #endif #ifdef CONFIG_FS_VERITY const struct fsverity_operations *s_vop; @@ -1817,6 +1823,14 @@ struct dir_context { */ #define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) +/* + * These flags control the behavior of vfs_copy_file_range(). + * They are not available to the user via syscall. + * + * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops + */ +#define COPY_FILE_SPLICE (1 << 0) + struct iov_iter; struct file_operations { @@ -3337,7 +3351,7 @@ void simple_transaction_set(struct file *file, size_t n); * All attributes contain a text representation of a numeric value * that are accessed with the get() and set() functions. */ -#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ +#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \ static int __fops ## _open(struct inode *inode, struct file *file) \ { \ __simple_attr_check_format(__fmt, 0ull); \ @@ -3348,10 +3362,16 @@ static const struct file_operations __fops = { \ .open = __fops ## _open, \ .release = simple_attr_release, \ .read = simple_attr_read, \ - .write = simple_attr_write, \ + .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \ .llseek = generic_file_llseek, \ } +#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ + DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false) + +#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \ + DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true) + static inline __printf(1, 2) void __simple_attr_check_format(const char *fmt, ...) { @@ -3366,6 +3386,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); ssize_t simple_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); +ssize_t simple_attr_write_signed(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index d0a1b8edfd9db..d0bc66fae7e01 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -193,7 +193,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy) } /* keyring.c */ -void fscrypt_sb_free(struct super_block *sb); +void fscrypt_destroy_keyring(struct super_block *sb); int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg); @@ -380,7 +380,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy) } /* keyring.c */ -static inline void fscrypt_sb_free(struct super_block *sb) +static inline void fscrypt_destroy_keyring(struct super_block *sb) { } diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 14e6202ce47f1..b25df1f8d48d3 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -345,4 +345,22 @@ static inline void copy_highpage(struct page *to, struct page *from) #endif +static inline void memcpy_from_page(char *to, struct page *page, + size_t offset, size_t len) +{ + char *from = kmap_atomic(page); + + memcpy(to, from + offset, len); + kunmap_atomic(from); +} + +static inline void memcpy_to_page(struct page *page, size_t offset, + const char *from, size_t len) +{ + char *to = kmap_atomic(page); + + memcpy(to + offset, from, len); + kunmap_atomic(to); +} + #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index b9fbb6d4150e2..99b73fc4a8246 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -144,7 +145,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); -bool isolate_huge_page(struct page *page, struct list_head *list); +int isolate_hugetlb(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); @@ -174,8 +175,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pd(struct vm_area_struct *vma, unsigned long address, hugepd_t hpd, int flags, int pdshift); -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmd, int flags); +struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, + int flags); struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags); struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, @@ -261,8 +262,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma, return NULL; } -static inline struct page *follow_huge_pmd(struct mm_struct *mm, - unsigned long address, pmd_t *pmd, int flags) +static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, + unsigned long address, int flags) { return NULL; } @@ -325,9 +326,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, return NULL; } -static inline bool isolate_huge_page(struct page *page, struct list_head *list) +static inline int isolate_hugetlb(struct page *page, struct list_head *list) { - return false; + return -EBUSY; } static inline void putback_active_hugepage(struct page *page) @@ -541,7 +542,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log) if (!page_size_log) return &default_hstate; - return size_to_hstate(1UL << page_size_log); + if (page_size_log < BITS_PER_LONG) + return size_to_hstate(1UL << page_size_log); + + return NULL; } static inline struct hstate *hstate_vma(struct vm_area_struct *vma) @@ -942,4 +946,16 @@ static inline __init void hugetlb_cma_check(void) } #endif +#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +static inline bool hugetlb_pmd_shared(pte_t *pte) +{ + return page_count(virt_to_page(pte)) > 1; +} +#else +static inline bool hugetlb_pmd_shared(pte_t *pte) +{ + return false; +} +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 1ce131f29f3b4..eada4d8d65879 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1269,6 +1269,8 @@ struct hv_ring_buffer_debug_info { int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info); +bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel); + /* Vmbus interface */ #define vmbus_driver_register(driver) \ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 04e96d688ba9e..5f45b785e794c 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -32,6 +32,7 @@ struct adis_timeout { u16 sw_reset_ms; u16 self_test_ms; }; + /** * struct adis_data - ADIS chip variant specific data * @read_delay: SPI delay for read operations in us @@ -45,10 +46,11 @@ struct adis_timeout { * @self_test_mask: Bitmask of supported self-test operations * @self_test_reg: Register address to request self test command * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg - * @status_error_msgs: Array of error messgaes + * @status_error_msgs: Array of error messages * @status_error_mask: Bitmask of errors supported by the device * @timeouts: Chip specific delays * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable + * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin * @has_paging: True if ADIS device has paged registers * @burst_reg_cmd: Register command that triggers burst * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined, @@ -77,6 +79,7 @@ struct adis_data { unsigned int status_error_mask; int (*enable_irq)(struct adis *adis, bool enable); + bool unmasked_drdy; bool has_paging; @@ -126,12 +129,12 @@ struct adis { unsigned long irq_flag; void *buffer; - uint8_t tx[10] ____cacheline_aligned; - uint8_t rx[4]; + u8 tx[10] ____cacheline_aligned; + u8 rx[4]; }; int adis_init(struct adis *adis, struct iio_dev *indio_dev, - struct spi_device *spi, const struct adis_data *data); + struct spi_device *spi, const struct adis_data *data); int __adis_reset(struct adis *adis); /** @@ -152,9 +155,9 @@ static inline int adis_reset(struct adis *adis) } int __adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int val, unsigned int size); + unsigned int val, unsigned int size); int __adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size); + unsigned int *val, unsigned int size); /** * __adis_write_reg_8() - Write single byte to a register (unlocked) @@ -163,7 +166,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, * @value: The value to write */ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, - uint8_t val) + u8 val) { return __adis_write_reg(adis, reg, val, 1); } @@ -175,7 +178,7 @@ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, - uint16_t val) + u16 val) { return __adis_write_reg(adis, reg, val, 2); } @@ -187,7 +190,7 @@ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, - uint32_t val) + u32 val) { return __adis_write_reg(adis, reg, val, 4); } @@ -199,7 +202,7 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, - uint16_t *val) + u16 *val) { unsigned int tmp; int ret; @@ -218,7 +221,7 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, - uint32_t *val) + u32 *val) { unsigned int tmp; int ret; @@ -238,7 +241,7 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, * @size: The size of the @value (in bytes) */ static inline int adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int val, unsigned int size) + unsigned int val, unsigned int size) { int ret; @@ -257,7 +260,7 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg, * @size: The size of the @val buffer */ static int adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size) + unsigned int *val, unsigned int size) { int ret; @@ -275,7 +278,7 @@ static int adis_read_reg(struct adis *adis, unsigned int reg, * @value: The value to write */ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, - uint8_t val) + u8 val) { return adis_write_reg(adis, reg, val, 1); } @@ -287,7 +290,7 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, - uint16_t val) + u16 val) { return adis_write_reg(adis, reg, val, 2); } @@ -299,7 +302,7 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, - uint32_t val) + u32 val) { return adis_write_reg(adis, reg, val, 4); } @@ -311,7 +314,7 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, - uint16_t *val) + u16 *val) { unsigned int tmp; int ret; @@ -330,7 +333,7 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, - uint32_t *val) + u32 *val) { unsigned int tmp; int ret; @@ -401,9 +404,20 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, __adis_update_bits_base(adis, reg, mask, val, 2)); \ }) -int adis_enable_irq(struct adis *adis, bool enable); int __adis_check_status(struct adis *adis); int __adis_initial_startup(struct adis *adis); +int __adis_enable_irq(struct adis *adis, bool enable); + +static inline int adis_enable_irq(struct adis *adis, bool enable) +{ + int ret; + + mutex_lock(&adis->state_lock); + ret = __adis_enable_irq(adis, enable); + mutex_unlock(&adis->state_lock); + + return ret; +} static inline int adis_check_status(struct adis *adis) { @@ -429,8 +443,8 @@ static inline int adis_initial_startup(struct adis *adis) } int adis_single_conversion(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, unsigned int error_mask, - int *val); + const struct iio_chan_spec *chan, + unsigned int error_mask, int *val); #define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \ .type = IIO_VOLTAGE, \ @@ -479,7 +493,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, .modified = 1, \ .channel2 = IIO_MOD_ ## mod, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ - info_sep, \ + (info_sep), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_shared_by_all = info_all, \ .address = (addr), \ @@ -513,7 +527,7 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); int adis_update_scan_mode(struct iio_dev *indio_dev, - const unsigned long *scan_mask); + const unsigned long *scan_mask); #else /* CONFIG_IIO_BUFFER */ @@ -537,7 +551,8 @@ static inline int devm_adis_probe_trigger(struct adis *adis, #ifdef CONFIG_DEBUG_FS int adis_debugfs_reg_access(struct iio_dev *indio_dev, - unsigned int reg, unsigned int writeval, unsigned int *readval); + unsigned int reg, unsigned int writeval, + unsigned int *readval); #else diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index b68fca08be27c..3088d94684c1c 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); +#ifdef CONFIG_INET +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); +#else +static inline int inet_gifconf(struct net_device *dev, char __user *buf, + int len, int size) +{ + return 0; +} +#endif void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index ee8299eb1f524..0652b4858ba62 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -61,6 +61,9 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. + * Users will enable it explicitly by enable_irq() or enable_nmi() + * later. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -74,6 +77,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_NO_AUTOEN 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 35b2d845704d9..649a4d7c241bc 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -5,50 +5,20 @@ #include #include -struct io_identity { - struct files_struct *files; - struct mm_struct *mm; -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *blkcg_css; -#endif - const struct cred *creds; - struct nsproxy *nsproxy; - struct fs_struct *fs; - unsigned long fsize; -#ifdef CONFIG_AUDIT - kuid_t loginuid; - unsigned int sessionid; -#endif - refcount_t count; -}; - -struct io_uring_task { - /* submission side */ - struct xarray xa; - struct wait_queue_head wait; - struct file *last; - struct percpu_counter inflight; - struct io_identity __identity; - struct io_identity *identity; - atomic_t in_idle; - bool sqpoll; -}; - #if defined(CONFIG_IO_URING) struct sock *io_uring_get_socket(struct file *file); -void __io_uring_task_cancel(void); -void __io_uring_files_cancel(struct files_struct *files); +void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); -static inline void io_uring_task_cancel(void) +static inline void io_uring_files_cancel(void) { - if (current->io_uring && !xa_empty(¤t->io_uring->xa)) - __io_uring_task_cancel(); + if (current->io_uring) + __io_uring_cancel(false); } -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_task_cancel(void) { - if (current->io_uring && !xa_empty(¤t->io_uring->xa)) - __io_uring_files_cancel(files); + if (current->io_uring) + __io_uring_cancel(true); } static inline void io_uring_free(struct task_struct *tsk) { @@ -63,7 +33,7 @@ static inline struct sock *io_uring_get_socket(struct file *file) static inline void io_uring_task_cancel(void) { } -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_files_cancel(void) { } static inline void io_uring_free(struct task_struct *tsk) diff --git a/include/linux/iova.h b/include/linux/iova.h index a0637abffee88..6c19b09e96634 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -132,7 +132,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) return iova >> iova_shift(iovad); } -#if IS_ENABLED(CONFIG_IOMMU_IOVA) +#if IS_REACHABLE(CONFIG_IOMMU_IOVA) int iova_cache_get(void); void iova_cache_put(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f5392d96d6886..394f10fc29aad 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -320,6 +320,7 @@ extern long (*panic_blink)(int state); __printf(1, 2) void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); +void check_panic_on_warn(const char *origin); extern void oops_enter(void); extern void oops_exit(void); extern bool oops_may_print(void); @@ -520,12 +521,6 @@ static inline u32 int_sqrt64(u64 x) } #endif -#ifdef CONFIG_SMP -extern unsigned int sysctl_oops_all_cpu_backtrace; -#else -#define sysctl_oops_all_cpu_backtrace 0 -#endif /* CONFIG_SMP */ - extern void bust_spinlocks(int yes); extern int panic_timeout; extern unsigned long panic_print; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 94871f12e5362..9cb0a3d7874f2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -911,6 +911,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); +long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg); int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); @@ -1489,6 +1491,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 20f1e3ff60130..591bc4cefe1d6 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h @@ -10,16 +10,29 @@ struct mb_cache; +/* Cache entry flags */ +enum { + MBE_REFERENCED_B = 0, + MBE_REUSABLE_B +}; + struct mb_cache_entry { /* List of entries in cache - protected by cache->c_list_lock */ struct list_head e_list; - /* Hash table list - protected by hash chain bitlock */ + /* + * Hash table list - protected by hash chain bitlock. The entry is + * guaranteed to be hashed while e_refcnt > 0. + */ struct hlist_bl_node e_hash_list; + /* + * Entry refcount. Once it reaches zero, entry is unhashed and freed. + * While refcount > 0, the entry is guaranteed to stay in the hash and + * e.g. mb_cache_entry_try_delete() will fail. + */ atomic_t e_refcnt; /* Key in hash - stable during lifetime of the entry */ u32 e_key; - u32 e_referenced:1; - u32 e_reusable:1; + unsigned long e_flags; /* User provided value - stable during lifetime of the entry */ u64 e_value; }; @@ -29,16 +42,24 @@ void mb_cache_destroy(struct mb_cache *cache); int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, u64 value, bool reusable); -void __mb_cache_entry_free(struct mb_cache_entry *entry); -static inline int mb_cache_entry_put(struct mb_cache *cache, - struct mb_cache_entry *entry) +void __mb_cache_entry_free(struct mb_cache *cache, + struct mb_cache_entry *entry); +void mb_cache_entry_wait_unused(struct mb_cache_entry *entry); +static inline void mb_cache_entry_put(struct mb_cache *cache, + struct mb_cache_entry *entry) { - if (!atomic_dec_and_test(&entry->e_refcnt)) - return 0; - __mb_cache_entry_free(entry); - return 1; + unsigned int cnt = atomic_dec_return(&entry->e_refcnt); + + if (cnt > 0) { + if (cnt <= 2) + wake_up_var(&entry->e_refcnt); + return; + } + __mb_cache_entry_free(cache, entry); } +struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, + u32 key, u64 value); void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, u64 value); diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index 1e02058113944..b0da04fe087bb 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -125,7 +125,11 @@ struct cmos_rtc_board_info { #define RTC_IO_EXTENT_USED RTC_IO_EXTENT #endif /* ARCH_RTC_LOCATION */ -unsigned int mc146818_get_time(struct rtc_time *time); +bool mc146818_does_rtc_work(void); +int mc146818_get_time(struct rtc_time *time); int mc146818_set_time(struct rtc_time *time); +bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param), + void *param); + #endif /* _MC146818RTC_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 41fbb4793394d..ae88362216a4e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -899,7 +899,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); struct mlx5_async_ctx { struct mlx5_core_dev *dev; atomic_t num_inflight; - struct wait_queue_head wait; + struct completion inflight_done; }; struct mlx5_async_work; diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 42df06c6b19ce..ef870d1f4f5f7 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -270,6 +270,7 @@ struct mmc_card { #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ +#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */ bool reenable_cmdq; /* Re-enable Command Queue */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index d9a65c6a8816f..545578fb814b0 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -445,7 +445,7 @@ static inline bool mmc_ready_for_data(u32 status) #define MMC_SECURE_TRIM1_ARG 0x80000001 #define MMC_SECURE_TRIM2_ARG 0x80008000 #define MMC_SECURE_ARGS 0x80000000 -#define MMC_TRIM_ARGS 0x00008001 +#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003 #define mmc_driver_type_mask(n) (1 << (n)) diff --git a/include/linux/namei.h b/include/linux/namei.h index a4bb992623c41..b9605b2b46e71 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -46,6 +46,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; #define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */ #define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */ #define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */ +#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */ /* LOOKUP_* flags which do scope-related checks based on the dirfd. */ #define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT) diff --git a/include/linux/net.h b/include/linux/net.h index 0dcd51feef02d..ae713c8513425 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -42,8 +42,6 @@ struct net; #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 -#define PROTO_CMSG_DATA_ONLY 0x0001 - #ifndef ARCH_HAS_SOCKET_TYPES /** * enum sock_type - Socket types @@ -138,7 +136,6 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, struct proto_ops { int family; - unsigned int flags; struct module *owner; int (*release) (struct socket *sock); int (*bind) (struct socket *sock, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6564fb4ac49e1..b478a16ef284d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -166,31 +166,38 @@ static inline bool dev_xmit_complete(int rc) * (unsigned long) so they can be read and written atomically. */ +#define NET_DEV_STAT(FIELD) \ + union { \ + unsigned long FIELD; \ + atomic_long_t __##FIELD; \ + } + struct net_device_stats { - unsigned long rx_packets; - unsigned long tx_packets; - unsigned long rx_bytes; - unsigned long tx_bytes; - unsigned long rx_errors; - unsigned long tx_errors; - unsigned long rx_dropped; - unsigned long tx_dropped; - unsigned long multicast; - unsigned long collisions; - unsigned long rx_length_errors; - unsigned long rx_over_errors; - unsigned long rx_crc_errors; - unsigned long rx_frame_errors; - unsigned long rx_fifo_errors; - unsigned long rx_missed_errors; - unsigned long tx_aborted_errors; - unsigned long tx_carrier_errors; - unsigned long tx_fifo_errors; - unsigned long tx_heartbeat_errors; - unsigned long tx_window_errors; - unsigned long rx_compressed; - unsigned long tx_compressed; + NET_DEV_STAT(rx_packets); + NET_DEV_STAT(tx_packets); + NET_DEV_STAT(rx_bytes); + NET_DEV_STAT(tx_bytes); + NET_DEV_STAT(rx_errors); + NET_DEV_STAT(tx_errors); + NET_DEV_STAT(rx_dropped); + NET_DEV_STAT(tx_dropped); + NET_DEV_STAT(multicast); + NET_DEV_STAT(collisions); + NET_DEV_STAT(rx_length_errors); + NET_DEV_STAT(rx_over_errors); + NET_DEV_STAT(rx_crc_errors); + NET_DEV_STAT(rx_frame_errors); + NET_DEV_STAT(rx_fifo_errors); + NET_DEV_STAT(rx_missed_errors); + NET_DEV_STAT(tx_aborted_errors); + NET_DEV_STAT(tx_carrier_errors); + NET_DEV_STAT(tx_fifo_errors); + NET_DEV_STAT(tx_heartbeat_errors); + NET_DEV_STAT(tx_window_errors); + NET_DEV_STAT(rx_compressed); + NET_DEV_STAT(tx_compressed); }; +#undef NET_DEV_STAT #include @@ -3201,14 +3208,6 @@ static inline bool dev_has_header(const struct net_device *dev) return dev->header_ops && dev->header_ops->create; } -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, - int len, int size); -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); -static inline int unregister_gifconf(unsigned int family) -{ - return register_gifconf(family, NULL); -} - #ifdef CONFIG_NET_FLOW_LIMIT #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { @@ -5264,4 +5263,9 @@ do { \ extern struct net_device *blackhole_netdev; +/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ +#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) +#define DEV_STATS_ADD(DEV, FIELD, VAL) \ + atomic_long_add((VAL), &(DEV)->stats.__##FIELD) + #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index ab192720e2d66..62f7e7e257c10 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -198,6 +198,9 @@ struct ip_set_region { u32 elements; /* Number of elements vs timeout */ }; +/* Max range where every element is added/deleted in one step */ +#define IPSET_MAX_RANGE (1<<14) + /* The core set type structure */ struct ip_set_type { struct list_head list; diff --git a/include/linux/nospec.h b/include/linux/nospec.h index c1e79f72cd892..9f0af4f116d98 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h @@ -11,6 +11,10 @@ struct task_struct; +#ifndef barrier_nospec +# define barrier_nospec() do { } while (0) +#endif + /** * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise * @index: array element index diff --git a/include/linux/nvme.h b/include/linux/nvme.h index bfed36e342ccb..fe39ed9e9303e 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -7,6 +7,7 @@ #ifndef _LINUX_NVME_H #define _LINUX_NVME_H +#include #include #include @@ -528,7 +529,7 @@ enum { NVME_CMD_EFFECTS_NCC = 1 << 2, NVME_CMD_EFFECTS_NIC = 1 << 3, NVME_CMD_EFFECTS_CCC = 1 << 4, - NVME_CMD_EFFECTS_CSE_MASK = 3 << 16, + NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16), NVME_CMD_EFFECTS_UUID_SEL = 1 << 19, }; diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 06409a6c40bcb..5e07f3cfad301 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -49,7 +49,7 @@ enum nvmem_type { * @word_size: Minimum read/write access granularity. * @stride: Minimum read/write access stride. * @priv: User context passed to read/write callbacks. - * @wp-gpio: Write protect pin + * @ignore_wp: Write Protect pin is managed by the provider. * * Note: A default "nvmem" name will be assigned to the device if * no name is specified in its configuration. In such case "" is @@ -63,12 +63,12 @@ struct nvmem_config { const char *name; int id; struct module *owner; - struct gpio_desc *wp_gpio; const struct nvmem_cell_info *cells; int ncells; enum nvmem_type type; bool read_only; bool root_only; + bool ignore_wp; bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 07ca187fc5e44..fe339106e02c4 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -113,8 +113,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) } static inline int of_dma_configure_id(struct device *dev, - struct device_node *np, - bool force_dma) + struct device_node *np, + bool force_dma, + const u32 *id) { return 0; } diff --git a/include/linux/once.h b/include/linux/once.h index ae6f4eb41cbe7..bb58e1c3aa034 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -5,10 +5,18 @@ #include #include +/* Helpers used from arbitrary contexts. + * Hard irqs are blocked, be cautious. + */ bool __do_once_start(bool *done, unsigned long *flags); void __do_once_done(bool *done, struct static_key_true *once_key, unsigned long *flags, struct module *mod); +/* Variant for process contexts only. */ +bool __do_once_slow_start(bool *done); +void __do_once_slow_done(bool *done, struct static_key_true *once_key, + struct module *mod); + /* Call a function exactly once. The idea of DO_ONCE() is to perform * a function call such as initialization of random seeds, etc, only * once, where DO_ONCE() can live in the fast-path. After @func has @@ -52,9 +60,29 @@ void __do_once_done(bool *done, struct static_key_true *once_key, ___ret; \ }) +/* Variant of DO_ONCE() for process/sleepable contexts. */ +#define DO_ONCE_SLOW(func, ...) \ + ({ \ + bool ___ret = false; \ + static bool __section(".data.once") ___done = false; \ + static DEFINE_STATIC_KEY_TRUE(___once_key); \ + if (static_branch_unlikely(&___once_key)) { \ + ___ret = __do_once_slow_start(&___done); \ + if (unlikely(___ret)) { \ + func(__VA_ARGS__); \ + __do_once_slow_done(&___done, &___once_key, \ + THIS_MODULE); \ + } \ + } \ + ___ret; \ + }) + #define get_random_once(buf, nbytes) \ DO_ONCE(get_random_bytes, (buf), (nbytes)) #define get_random_once_wait(buf, nbytes) \ DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ +#define get_random_slow_once(buf, nbytes) \ + DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes)) + #endif /* _LINUX_ONCE_H */ diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h index 7f53a5c6f35e8..7dda3f6904654 100644 --- a/include/linux/platform_data/intel-spi.h +++ b/include/linux/platform_data/intel-spi.h @@ -19,11 +19,13 @@ enum intel_spi_type { /** * struct intel_spi_boardinfo - Board specific data for Intel SPI driver * @type: Type which this controller is compatible with - * @writeable: The chip is writeable + * @set_writeable: Try to make the chip writeable (optional) + * @data: Data to be passed to @set_writeable can be %NULL */ struct intel_spi_boardinfo { enum intel_spi_type type; - bool writeable; + bool (*set_writeable)(void __iomem *base, void *data); + void *data; }; #endif /* INTEL_SPI_PDATA_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 000cc0533c336..8c892730a1f15 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -190,8 +190,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {} static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; } #define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;}) +#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;}) #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) #define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) +#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;}) static inline struct pid *tgid_pidfd_to_pid(const struct file *file) { diff --git a/include/linux/random.h b/include/linux/random.h index 917470c4490ac..ed2bac6c7a8ac 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code, void add_interrupt_randomness(int irq) __latent_entropy; void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); -#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) { +#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); -} #else -static inline void add_latent_entropy(void) { } + add_device_randomness(NULL, 0); #endif +} void get_random_bytes(void *buf, size_t len); size_t __must_check get_random_bytes_arch(void *buf, size_t len); diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 136ea0997e6df..7d5a78f49d43d 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -99,8 +99,8 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full); __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, - struct file *filp, poll_table *poll_table); - + struct file *filp, poll_table *poll_table, int full); +void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu); #define RING_BUFFER_ALL_CPUS -1 diff --git a/include/linux/sched.h b/include/linux/sched.h index b055c217eb0be..5da4b3c89f636 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -885,6 +885,9 @@ struct task_struct { /* CLONE_CHILD_CLEARTID: */ int __user *clear_child_tid; + /* PF_IO_WORKER */ + void *pf_io_worker; + u64 utime; u64 stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h index d2b4204ba4d34..fa067de9f1a94 100644 --- a/include/linux/sched/jobctl.h +++ b/include/linux/sched/jobctl.h @@ -19,7 +19,6 @@ struct task_struct; #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ #define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ -#define JOBCTL_TASK_WORK_BIT 24 /* set by TWA_SIGNAL */ #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) @@ -29,10 +28,9 @@ struct task_struct; #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) #define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) -#define JOBCTL_TASK_WORK (1UL << JOBCTL_TASK_WORK_BIT) #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) -#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); extern void task_clear_jobctl_trapping(struct task_struct *task); diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 657640015b335..ae60f838ebb92 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -354,11 +354,23 @@ static inline int restart_syscall(void) return -ERESTARTNOINTR; } -static inline int signal_pending(struct task_struct *p) +static inline int task_sigpending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); } +static inline int signal_pending(struct task_struct *p) +{ + /* + * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same + * behavior in terms of ensuring that we break out of wait loops + * so that notify signal callbacks can be processed. + */ + if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) + return 1; + return task_sigpending(p); +} + static inline int __fatal_signal_pending(struct task_struct *p) { return unlikely(sigismember(&p->pending.signal, SIGKILL)); @@ -366,7 +378,7 @@ static inline int __fatal_signal_pending(struct task_struct *p) static inline int fatal_signal_pending(struct task_struct *p) { - return signal_pending(p) && __fatal_signal_pending(p); + return task_sigpending(p) && __fatal_signal_pending(p); } static inline int signal_pending_state(long state, struct task_struct *p) @@ -503,7 +515,7 @@ extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); static inline void restore_saved_sigmask_unless(bool interrupted) { if (interrupted) - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); + WARN_ON(!signal_pending(current)); else restore_saved_sigmask(); } diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index eeacb4a16fe3f..2832cc6be062b 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -31,6 +31,7 @@ struct kernel_clone_args { /* Number of elements in *set_tid */ size_t set_tid_size; int cgroup; + int io_thread; struct cgroup *cgrp; struct css_set *cset; }; @@ -60,6 +61,7 @@ extern void sched_post_fork(struct task_struct *p, extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); +void __noreturn make_task_dead(int signr); extern void proc_caches_init(void); @@ -85,6 +87,7 @@ extern void exit_files(struct task_struct *); extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 2b70f736b091d..92f3b778d8c20 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -74,6 +74,7 @@ struct uart_8250_port; struct uart_8250_ops { int (*setup_irq)(struct uart_8250_port *); void (*release_irq)(struct uart_8250_port *); + void (*setup_timer)(struct uart_8250_port *); }; struct uart_8250_em485 { diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 9c1292ea47fdc..6df4c3356ae61 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -100,7 +100,7 @@ struct uart_icount { __u32 buf_overrun; }; -typedef unsigned int __bitwise upf_t; +typedef u64 __bitwise upf_t; typedef unsigned int __bitwise upstat_t; struct uart_port { @@ -207,6 +207,7 @@ struct uart_port { #define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) #define UPF_DEAD ((__force upf_t) (1 << 30)) #define UPF_IOREMAP ((__force upf_t) (1 << 31)) +#define UPF_FULL_PROBE ((__force upf_t) (1ULL << 32)) #define __UPF_CHANGE_MASK 0x17fff #define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK) @@ -300,6 +301,23 @@ struct uart_state { /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 +/** + * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars + * @up: uart_port structure describing the port + * @chars: number of characters sent + * + * This function advances the tail of circular xmit buffer by the number of + * @chars transmitted and handles accounting of transmitted bytes (into + * @up's icount.tx). + */ +static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars) +{ + struct circ_buf *xmit = &up->state->xmit; + + xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1); + up->icount.tx += chars; +} + struct module; struct tty_driver; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 61fc053a4a4ef..39636fe7e8f0a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -681,6 +681,7 @@ typedef unsigned char *sk_buff_data_t; * @csum_level: indicates the number of consecutive checksums found in * the packet minus one that have been verified as * CHECKSUM_UNNECESSARY (max 3) + * @scm_io_uring: SKB holds io_uring registered files * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @napi_id: id of the NAPI struct this skb came from @@ -701,6 +702,7 @@ typedef unsigned char *sk_buff_data_t; * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header + * @kcov_handle: KCOV remote handle for remote coverage collection * @tail: Tail pointer * @end: End pointer * @head: Head of buffer @@ -858,6 +860,7 @@ struct sk_buff { #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; #endif + __u8 scm_io_uring:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -904,6 +907,10 @@ struct sk_buff { __u16 network_header; __u16 mac_header; +#ifdef CONFIG_KCOV + u64 kcov_handle; +#endif + /* private: */ __u32 headers_end[0]; /* public: */ @@ -4158,9 +4165,6 @@ enum skb_ext_id { #endif #if IS_ENABLED(CONFIG_MPTCP) SKB_EXT_MPTCP, -#endif -#if IS_ENABLED(CONFIG_KCOV) - SKB_EXT_KCOV_HANDLE, #endif SKB_EXT_NUM, /* must be last */ }; @@ -4616,35 +4620,27 @@ static inline void skb_reset_redirect(struct sk_buff *skb) #endif } -#if IS_ENABLED(CONFIG_KCOV) && IS_ENABLED(CONFIG_SKB_EXTENSIONS) +static inline bool skb_csum_is_sctp(struct sk_buff *skb) +{ + return skb->csum_not_inet; +} + static inline void skb_set_kcov_handle(struct sk_buff *skb, const u64 kcov_handle) { - /* Do not allocate skb extensions only to set kcov_handle to zero - * (as it is zero by default). However, if the extensions are - * already allocated, update kcov_handle anyway since - * skb_set_kcov_handle can be called to zero a previously set - * value. - */ - if (skb_has_extensions(skb) || kcov_handle) { - u64 *kcov_handle_ptr = skb_ext_add(skb, SKB_EXT_KCOV_HANDLE); - - if (kcov_handle_ptr) - *kcov_handle_ptr = kcov_handle; - } +#ifdef CONFIG_KCOV + skb->kcov_handle = kcov_handle; +#endif } static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { - u64 *kcov_handle = skb_ext_find(skb, SKB_EXT_KCOV_HANDLE); - - return kcov_handle ? *kcov_handle : 0; -} +#ifdef CONFIG_KCOV + return skb->kcov_handle; #else -static inline void skb_set_kcov_handle(struct sk_buff *skb, - const u64 kcov_handle) { } -static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { return 0; } -#endif /* CONFIG_KCOV && CONFIG_SKB_EXTENSIONS */ + return 0; +#endif +} #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h index 7f0bc3cf4d610..6374763186c80 100644 --- a/include/linux/soc/qcom/apr.h +++ b/include/linux/soc/qcom/apr.h @@ -79,6 +79,15 @@ struct apr_resp_pkt { #define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF) #define APR_SVC_MINOR_VERSION(v) (v & 0xFF) +struct packet_router; +struct pkt_router_svc { + struct device *dev; + struct packet_router *pr; + spinlock_t lock; + int id; + void *priv; +}; + struct apr_device { struct device dev; uint16_t svc_id; @@ -86,11 +95,12 @@ struct apr_device { uint32_t version; char name[APR_NAME_SIZE]; const char *service_path; - spinlock_t lock; + struct pkt_router_svc svc; struct list_head node; }; #define to_apr_device(d) container_of(d, struct apr_device, dev) +#define svc_to_apr_device(d) container_of(d, struct apr_device, svc) struct apr_driver { int (*probe)(struct apr_device *sl); diff --git a/include/linux/socket.h b/include/linux/socket.h index 9aa530d497da8..c3b35d18bcd30 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -421,6 +421,9 @@ extern int __sys_accept4_file(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags, unsigned long nofile); +extern struct file *do_accept(struct file *file, unsigned file_flags, + struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); @@ -436,5 +439,6 @@ extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len); extern int __sys_socketpair(int family, int type, int protocol, int __user *usockvec); +extern int __sys_shutdown_sock(struct socket *sock, int how); extern int __sys_shutdown(int fd, int how); #endif /* _LINUX_SOCKET_H */ diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 998a4ba28eba4..938216f8ab7e7 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -36,4 +36,52 @@ enum { #define offsetofend(TYPE, MEMBER) \ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) +/** + * struct_group() - Wrap a set of declarations in a mirrored struct + * + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. + */ +#define struct_group(NAME, MEMBERS...) \ + __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS) + +/** + * struct_group_attr() - Create a struct_group() with trailing attributes + * + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes to apply + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. Includes structure attributes argument. + */ +#define struct_group_attr(NAME, ATTRS, MEMBERS...) \ + __struct_group(/* no tag */, NAME, ATTRS, MEMBERS) + +/** + * struct_group_tagged() - Create a struct_group with a reusable tag + * + * @TAG: The tag name for the named sub-struct + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. Includes struct tag argument for the named copy, + * so the specified layout can be reused later. + */ +#define struct_group_tagged(TAG, NAME, MEMBERS...) \ + __struct_group(TAG, NAME, /* no attrs */, MEMBERS) + #endif diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 40df88728a6f4..abf7b8ec1fb64 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -199,6 +199,7 @@ struct plat_stmmacenet_data { int rss_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; + bool rx_clk_runs_in_lpi; int has_xgmac; bool vlan_fail_q_en; u8 vlan_fail_q; diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index cd188a527d169..3b35b6f6533aa 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -92,6 +92,11 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, char __user *, size_t); extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *); +/* returns true if the msg is in-flight, i.e., already eaten by the peer */ +static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) { + return (msg->copied != 0 && list_empty(&msg->list)); +} + struct rpc_clnt; extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *); extern int rpc_remove_client_dir(struct rpc_clnt *); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index aea0ce9f3b745..a058c96cf2138 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -341,7 +341,7 @@ asmlinkage long sys_io_uring_setup(u32 entries, struct io_uring_params __user *p); asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit, u32 min_complete, u32 flags, - const sigset_t __user *sig, size_t sigsz); + const void __user *argp, size_t argsz); asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op, void __user *arg, unsigned int nr_args); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 51298a4f46235..161eba9fd9122 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -195,6 +195,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, void unregister_sysctl_table(struct ctl_table_header * table); extern int sysctl_init(void); +extern void __register_sysctl_init(const char *path, struct ctl_table *table, + const char *table_name); +#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table) void do_sysctl_args(void); extern int pwrsw_enabled; diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 0d848a1e9e62f..5b8a93f288bb4 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -22,6 +22,8 @@ enum task_work_notify_mode { int task_work_add(struct task_struct *task, struct callback_head *twork, enum task_work_notify_mode mode); +struct callback_head *task_work_cancel_match(struct task_struct *task, + bool (*match)(struct callback_head *, void *data), void *data); struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); void task_work_run(void); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 2f87377e9af70..6e3340379d85f 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -265,7 +265,7 @@ struct tcp_sock { u32 packets_out; /* Packets which are "in flight" */ u32 retrans_out; /* Retransmitted packets out */ u32 max_packets_out; /* max packets_out in last window */ - u32 max_packets_seq; /* right edge of max_packets_out flight */ + u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ u16 urg_data; /* Saved octet of OOB data and control flags */ u8 ecn_flags; /* ECN status bits. */ diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index 93884086f3924..adc80e29168ea 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -35,7 +35,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) { struct rb_node *leftmost = rb_first_cached(&head->rb_root); - return rb_entry(leftmost, struct timerqueue_node, node); + return rb_entry_safe(leftmost, struct timerqueue_node, node); } static inline void timerqueue_init(struct timerqueue_node *node) diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 20c0ff54b7a0d..7d68a5cc58816 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -198,8 +198,8 @@ static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *ev * The loop below will unmap these fields if the log is larger than * one page, so save them here for reference: */ - count = READ_ONCE(event->count); - event_type = READ_ONCE(event->event_type); + count = event->count; + event_type = event->event_type; /* Verify that it's the log header */ if (event_header->pcr_idx != 0 || diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b480e1a07ed85..ee9ab7dbc8c35 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -198,4 +198,27 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) blkcg_maybe_throttle_current(); } +/* + * called by exit_to_user_mode_loop() if ti_work & _TIF_NOTIFY_SIGNAL. This + * is currently used by TWA_SIGNAL based task_work, which requires breaking + * wait loops to ensure that task_work is noticed and run. + */ +static inline void tracehook_notify_signal(void) +{ + clear_thread_flag(TIF_NOTIFY_SIGNAL); + smp_mb__after_atomic(); + if (current->task_works) + task_work_run(); +} + +/* + * Called when we have work to process from exit_to_user_mode_loop() + */ +static inline void set_notify_signal(struct task_struct *task) +{ + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && + !wake_up_state(task, TASK_INTERRUPTIBLE)) + kick_process(task); +} + #endif /* */ diff --git a/include/linux/uio.h b/include/linux/uio.h index 27ff8eb786dc3..cedb68e49e4f9 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -26,6 +26,12 @@ enum iter_type { ITER_DISCARD = 64, }; +struct iov_iter_state { + size_t iov_offset; + size_t count; + unsigned long nr_segs; +}; + struct iov_iter { /* * Bit 0 is the read/write bit, set if we're writing. @@ -55,6 +61,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i) return i->type & ~(READ | WRITE); } +static inline void iov_iter_save_state(struct iov_iter *iter, + struct iov_iter_state *state) +{ + state->iov_offset = iter->iov_offset; + state->count = iter->count; + state->nr_segs = iter->nr_segs; +} + static inline bool iter_is_iovec(const struct iov_iter *i) { return iov_iter_type(i) == ITER_IOVEC; @@ -226,6 +240,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); +void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); diff --git a/include/linux/units.h b/include/linux/units.h index aaf716364ec34..3457179f7116a 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -4,6 +4,26 @@ #include +/* Metric prefixes in accordance with Système international (d'unités) */ +#define PETA 1000000000000000ULL +#define TERA 1000000000000ULL +#define GIGA 1000000000UL +#define MEGA 1000000UL +#define KILO 1000UL +#define HECTO 100UL +#define DECA 10UL +#define DECI 10UL +#define CENTI 100UL +#define MILLI 1000UL +#define MICRO 1000000UL +#define NANO 1000000000UL +#define PICO 1000000000000ULL +#define FEMTO 1000000000000000ULL + +#define MILLIWATT_PER_WATT 1000L +#define MICROWATT_PER_MILLIWATT 1000L +#define MICROWATT_PER_WATT 1000000L + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) diff --git a/include/linux/usb.h b/include/linux/usb.h index a093667991bb9..568be613bdb31 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -754,11 +754,14 @@ extern struct device *usb_intf_get_dma_device(struct usb_interface *intf); extern int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable); extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index); +extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index); #else static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable) { return 0; } static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) { return true; } +static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index) + { return 0; } #endif /* USB autosuspend and autoresume */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 604c6c514a504..1cffa34740b00 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -36,62 +36,24 @@ #include #include -/** - * usb_ep_type_string() - Returns human readable-name of the endpoint type. - * @ep_type: The endpoint type to return human-readable name for. If it's not - * any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT}, - * usually got by usb_endpoint_type(), the string 'unknown' will be returned. - */ -extern const char *usb_ep_type_string(int ep_type); +/* USB 3.2 SuperSpeed Plus phy signaling rate generation and lane count */ -/** - * usb_speed_string() - Returns human readable-name of the speed. - * @speed: The speed to return human-readable name for. If it's not - * any of the speeds defined in usb_device_speed enum, string for - * USB_SPEED_UNKNOWN will be returned. - */ -extern const char *usb_speed_string(enum usb_device_speed speed); +enum usb_ssp_rate { + USB_SSP_GEN_UNKNOWN = 0, + USB_SSP_GEN_2x1, + USB_SSP_GEN_1x2, + USB_SSP_GEN_2x2, +}; -/** - * usb_get_maximum_speed - Get maximum requested speed for a given USB - * controller. - * @dev: Pointer to the given USB controller device - * - * The function gets the maximum speed string from property "maximum-speed", - * and returns the corresponding enum usb_device_speed. - */ +extern const char *usb_ep_type_string(int ep_type); +extern const char *usb_speed_string(enum usb_device_speed speed); extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); - -/** - * usb_state_string - Returns human readable name for the state. - * @state: The state to return a human-readable name for. If it's not - * any of the states devices in usb_device_state_string enum, - * the string UNKNOWN will be returned. - */ +extern enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev); extern const char *usb_state_string(enum usb_device_state state); +unsigned int usb_decode_interval(const struct usb_endpoint_descriptor *epd, + enum usb_device_speed speed); #ifdef CONFIG_TRACING -/** - * usb_decode_ctrl - Returns human readable representation of control request. - * @str: buffer to return a human-readable representation of control request. - * This buffer should have about 200 bytes. - * @size: size of str buffer. - * @bRequestType: matches the USB bmRequestType field - * @bRequest: matches the USB bRequest field - * @wValue: matches the USB wValue field (CPU byte order) - * @wIndex: matches the USB wIndex field (CPU byte order) - * @wLength: matches the USB wLength field (CPU byte order) - * - * Function returns decoded, formatted and human-readable description of - * control request packet. - * - * The usage scenario for this is for tracepoints, so function as a return - * use the same value as in parameters. This approach allows to use this - * function in TP_printk - * - * Important: wValue, wIndex, wLength parameters before invoking this function - * should be processed by le16_to_cpu macro. - */ extern const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, __u8 bRequest, __u16 wValue, __u16 wIndex, __u16 wLength); diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index 72299f261b253..43db6e47503c7 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h @@ -38,4 +38,16 @@ */ #define find_closest_descending(x, a, as) __find_closest(x, a, as, >=) +/** + * is_insidevar - check if the @ptr points inside the @var memory range. + * @ptr: the pointer to a memory address. + * @var: the variable which address and size identify the memory range. + * + * Evaluates to true if the address in @ptr lies within the memory + * range allocated to @var. + */ +#define is_insidevar(ptr, var) \ + ((uintptr_t)(ptr) >= (uintptr_t)(var) && \ + (uintptr_t)(ptr) < (uintptr_t)(var) + sizeof(var)) + #endif diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h index e547cbeee4310..c493a3ffe45c8 100644 --- a/include/media/dvbdev.h +++ b/include/media/dvbdev.h @@ -126,6 +126,7 @@ struct dvb_adapter { * struct dvb_device - represents a DVB device node * * @list_head: List head with all DVB devices + * @ref: reference counter * @fops: pointer to struct file_operations * @adapter: pointer to the adapter that holds this device node * @type: type of the device, as defined by &enum dvb_device_type. @@ -156,6 +157,7 @@ struct dvb_adapter { */ struct dvb_device { struct list_head list_head; + struct kref ref; const struct file_operations *fops; struct dvb_adapter *adapter; enum dvb_device_type type; @@ -187,6 +189,20 @@ struct dvb_device { void *priv; }; +/** + * dvb_device_get - Increase dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +struct dvb_device *dvb_device_get(struct dvb_device *dvbdev); + +/** + * dvb_device_put - Decrease dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_device_put(struct dvb_device *dvbdev); + /** * dvb_register_adapter - Registers a new DVB adapter * @@ -231,29 +247,17 @@ int dvb_register_device(struct dvb_adapter *adap, /** * dvb_remove_device - Remove a registered DVB device * - * This does not free memory. To do that, call dvb_free_device(). + * This does not free memory. dvb_free_device() will do that when + * reference counter is empty * * @dvbdev: pointer to struct dvb_device */ void dvb_remove_device(struct dvb_device *dvbdev); -/** - * dvb_free_device - Free memory occupied by a DVB device. - * - * Call dvb_unregister_device() before calling this function. - * - * @dvbdev: pointer to struct dvb_device - */ -void dvb_free_device(struct dvb_device *dvbdev); /** * dvb_unregister_device - Unregisters a DVB device * - * This is a combination of dvb_remove_device() and dvb_free_device(). - * Using this function is usually a mistake, and is often an indicator - * for a use-after-free bug (when a userspace process keeps a file - * handle to a detached device). - * * @dvbdev: pointer to struct dvb_device */ void dvb_unregister_device(struct dvb_device *dvbdev); diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index a3083529b6985..2e53ee1c8db49 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -175,7 +175,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, * * @sd: pointer to &struct v4l2_subdev * @client: pointer to struct i2c_client - * @devname: the name of the device; if NULL, the I²C device's name will be used + * @devname: the name of the device; if NULL, the I²C device drivers's name + * will be used * @postfix: sub-device specific string to put right after the I²C device name; * may be NULL */ diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index 1a28f299a4c61..895eae18271fa 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -15,8 +15,6 @@ #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) #define AD_TIMER_INTERVAL 100 /*msec*/ -#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} - #define AD_LACP_SLOW 0 #define AD_LACP_FAST 1 diff --git a/include/net/bonding.h b/include/net/bonding.h index 67d676059aa0d..d9cc3f5602fb2 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -763,6 +763,9 @@ extern struct rtnl_link_ops bond_link_ops; /* exported from bond_sysfs_slave.c */ extern const struct sysfs_ops slave_sysfs_ops; +/* exported from bond_3ad.c */ +extern const u8 lacpdu_mcast_addr[]; + static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) { atomic_long_inc(&dev->tx_dropped); diff --git a/include/net/dst.h b/include/net/dst.h index acd15c544cf37..ae2cf57d796b9 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -356,9 +356,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, struct net *net) { - /* TODO : stats should be SMP safe */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + DEV_STATS_INC(dev, rx_packets); + DEV_STATS_ADD(dev, rx_bytes, skb->len); __skb_tunnel_rx(skb, dev, net); } diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index d0d188c3294bd..03b64bf876a46 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -15,6 +15,22 @@ #ifndef IEEE802154_NETDEVICE_H #define IEEE802154_NETDEVICE_H +#define IEEE802154_REQUIRED_SIZE(struct_type, member) \ + (offsetof(typeof(struct_type), member) + \ + sizeof(((typeof(struct_type) *)(NULL))->member)) + +#define IEEE802154_ADDR_OFFSET \ + offsetof(typeof(struct sockaddr_ieee802154), addr) + +#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \ + IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type)) + +#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \ + IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr)) + +#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \ + IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr)) + #include #include #include @@ -165,6 +181,33 @@ static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr) memcpy(raw, &temp, IEEE802154_ADDR_LEN); } +static inline int +ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len) +{ + struct ieee802154_addr_sa *sa; + int ret = 0; + + sa = &daddr->addr; + if (len < IEEE802154_MIN_NAMELEN) + return -EINVAL; + switch (sa->addr_type) { + case IEEE802154_ADDR_NONE: + break; + case IEEE802154_ADDR_SHORT: + if (len < IEEE802154_NAMELEN_SHORT) + ret = -EINVAL; + break; + case IEEE802154_ADDR_LONG: + if (len < IEEE802154_NAMELEN_LONG) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a, const struct ieee802154_addr_sa *sa) { diff --git a/include/net/ip.h b/include/net/ip.h index c5822d7824cd0..4b775af572688 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -545,7 +545,7 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != offsetof(typeof(flow->addrs), v4addrs.src) + sizeof(flow->addrs.v4addrs.src)); - memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); + memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs)); flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 60601896d4747..89ce8a50f2363 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -842,7 +842,7 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != offsetof(typeof(flow->addrs), v6addrs.src) + sizeof(flow->addrs.v6addrs.src)); - memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs)); + memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs)); flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 753ba7e755d6d..3e529d8fce73a 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -58,8 +58,6 @@ struct mptcp_out_options { }; #ifdef CONFIG_MPTCP -extern struct request_sock_ops mptcp_subflow_request_sock_ops; - void mptcp_init(void); static inline bool sk_is_mptcp(const struct sock *sk) @@ -133,6 +131,9 @@ void mptcp_seq_show(struct seq_file *seq); int mptcp_subflow_init_cookie_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb); +struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener, + bool attach_listener); #else static inline void mptcp_init(void) @@ -208,6 +209,13 @@ static inline int mptcp_subflow_init_cookie_req(struct request_sock *req, { return 0; /* TCP fallback */ } + +static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener, + bool attach_listener) +{ + return NULL; +} #endif /* CONFIG_MPTCP */ #if IS_ENABLED(CONFIG_MPTCP_IPV6) diff --git a/include/net/mrp.h b/include/net/mrp.h index 1c308c034e1a6..a8102661fd613 100644 --- a/include/net/mrp.h +++ b/include/net/mrp.h @@ -120,6 +120,7 @@ struct mrp_applicant { struct sk_buff *pdu; struct rb_root mad; struct rcu_head rcu; + bool active; }; struct mrp_port { diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 7e58b44705705..50d5ffbad473e 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -179,4 +179,13 @@ struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload *offload); void taprio_offload_free(struct tc_taprio_qopt_offload *offload); +/* Ensure skb_mstamp_ns, which might have been populated with the txtime, is + * not mistaken for a software timestamp, because this will otherwise prevent + * the dispatch of hardware timestamps to the socket. + */ +static inline void skb_txtime_consumed(struct sk_buff *skb) +{ + skb->tstamp = ktime_set(0, 0); +} + #endif diff --git a/include/net/protocol.h b/include/net/protocol.h index 2b778e1d2d8f1..0fd2df844fc71 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -35,8 +35,6 @@ /* This is used to register protocols. */ struct net_protocol { - int (*early_demux)(struct sk_buff *skb); - int (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); /* This returns an error if we weren't able to handle the error. */ @@ -53,8 +51,6 @@ struct net_protocol { #if IS_ENABLED(CONFIG_IPV6) struct inet6_protocol { - void (*early_demux)(struct sk_buff *skb); - void (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); /* This returns an error if we weren't able to handle the error. */ diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index bed2387af456d..61cd19ee51f4e 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -1178,7 +1178,6 @@ static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) static inline void qdisc_reset_queue(struct Qdisc *sch) { __qdisc_reset_queue(&sch->q); - sch->qstats.backlog = 0; } static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, @@ -1326,4 +1325,11 @@ static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb); } +/* Make sure qdisc is no longer in SCHED state. */ +static inline void qdisc_synchronize(const struct Qdisc *q) +{ + while (test_bit(__QDISC_STATE_SCHED, &q->state)) + msleep(1); +} + #endif diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h index 01a70b27e026b..65058faea4db1 100644 --- a/include/net/sctp/stream_sched.h +++ b/include/net/sctp/stream_sched.h @@ -26,6 +26,8 @@ struct sctp_sched_ops { int (*init)(struct sctp_stream *stream); /* Init a stream */ int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp); + /* free a stream */ + void (*free_sid)(struct sctp_stream *stream, __u16 sid); /* Frees the entire thing */ void (*free)(struct sctp_stream *stream); diff --git a/include/net/sock.h b/include/net/sock.h index d53fb64374767..0f48d50a6dde7 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -315,7 +315,7 @@ struct bpf_local_storage; * @sk_tskey: counter to disambiguate concurrent tstamp requests * @sk_zckey: counter to order MSG_ZEROCOPY notifications * @sk_socket: Identd and reporting IO signals - * @sk_user_data: RPC layer private data + * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock. * @sk_frag: cached page frag * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit @@ -421,7 +421,7 @@ struct sock { #ifdef CONFIG_XFRM struct xfrm_policy __rcu *sk_policy[2]; #endif - struct dst_entry *sk_rx_dst; + struct dst_entry __rcu *sk_rx_dst; struct dst_entry __rcu *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; @@ -2243,6 +2243,19 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc return false; } +static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk) +{ + skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); + if (skb) { + if (sk_rmem_schedule(sk, skb, skb->truesize)) { + skb_set_owner_r(skb, sk); + return skb; + } + __kfree_skb(skb); + } + return NULL; +} + void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires); diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 505f1e18e9bf9..3eac185ae2e8a 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -38,21 +38,20 @@ extern struct sock *reuseport_select_sock(struct sock *sk, extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_detach_prog(struct sock *sk); -static inline bool reuseport_has_conns(struct sock *sk, bool set) +static inline bool reuseport_has_conns(struct sock *sk) { struct sock_reuseport *reuse; bool ret = false; rcu_read_lock(); reuse = rcu_dereference(sk->sk_reuseport_cb); - if (reuse) { - if (set) - reuse->has_conns = 1; - ret = reuse->has_conns; - } + if (reuse && reuse->has_conns) + ret = true; rcu_read_unlock(); return ret; } +void reuseport_has_conns_set(struct sock *sk); + #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 8528015590e44..afdf8bd1b4fe5 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -38,6 +38,7 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_PORT_MROUTER, SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL, SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, #if IS_ENABLED(CONFIG_BRIDGE_MRP) @@ -57,6 +58,7 @@ struct switchdev_attr { bool mrouter; /* PORT_MROUTER */ clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ + u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */ bool mc_disabled; /* MC_DISABLED */ #if IS_ENABLED(CONFIG_BRIDGE_MRP) u8 mrp_port_role; /* MRP_PORT_ROLE */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 8129ce9a07719..9a8d98639b20f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -934,7 +934,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific; INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); -INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); +void tcp_v6_early_demux(struct sk_buff *skb); #endif @@ -1271,11 +1271,14 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); + if (tp->is_cwnd_limited) + return true; + /* If in slow start, ensure cwnd grows to twice what was ACKed. */ if (tcp_in_slow_start(tp)) return tp->snd_cwnd < 2 * tp->max_packets_out; - return tp->is_cwnd_limited; + return false; } /* BBR congestion control needs pacing. diff --git a/include/net/udp.h b/include/net/udp.h index 010bc324f860b..388e68c7bca05 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -176,6 +176,7 @@ INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, struct udphdr *uh, struct sock *sk); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); +void udp_v6_early_demux(struct sk_buff *skb); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, netdev_features_t features, bool is_ipv6); diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 7a9a23e7a604a..c9a47d3d8f503 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -86,7 +86,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, struct xdp_umem *umem); int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, u16 queue_id, u16 flags); -int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, +int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, struct net_device *dev, u16 queue_id); void xp_destroy(struct xsk_buff_pool *pool); void xp_release(struct xdp_buff_xsk *xskb); diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 69ade4fb71aab..4d272e834ca2e 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -205,7 +205,7 @@ static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd) for_each_sg(scsi_sglist(cmd), sg, nseg, __i) static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd, - void *buf, int buflen) + const void *buf, int buflen) { return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, buflen); diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 037c77fb5dc55..c4de15f7a0a55 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -236,6 +236,14 @@ enum { ISCSI_SESSION_FREE, }; +enum { + ISCSI_SESSION_TARGET_UNBOUND, + ISCSI_SESSION_TARGET_ALLOCATED, + ISCSI_SESSION_TARGET_SCANNED, + ISCSI_SESSION_TARGET_UNBINDING, + ISCSI_SESSION_TARGET_MAX, +}; + #define ISCSI_MAX_TARGET -1 struct iscsi_cls_session { @@ -262,6 +270,7 @@ struct iscsi_cls_session { */ pid_t creator; int state; + int target_state; /* session target bind state */ int sid; /* session id */ void *dd_data; /* LLD private data */ struct device dev; /* sysfs transport/container device */ diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 6eed61e6cf8ac..496decb63f800 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -562,6 +562,8 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev, void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start); void snd_hdac_stream_clear(struct hdac_stream *azx_dev); void snd_hdac_stream_stop(struct hdac_stream *azx_dev); +void snd_hdac_stop_streams(struct hdac_bus *bus); +void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus); void snd_hdac_stream_reset(struct hdac_stream *azx_dev); void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set, unsigned int streams, unsigned int reg); diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 75048ea178f62..ddcb5b2f0a8e2 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -92,7 +92,6 @@ void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus, struct hdac_ext_stream *azx_dev, bool decouple); void snd_hdac_ext_stream_decouple(struct hdac_bus *bus, struct hdac_ext_stream *azx_dev, bool decouple); -void snd_hdac_ext_stop_streams(struct hdac_bus *bus); int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus, struct hdac_ext_stream *stream, u32 value); diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 5ffc2efedd9f8..6554a9f71c62e 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -106,24 +106,24 @@ struct snd_pcm_ops { #define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1) /* If you change this don't forget to change rates[] table in pcm_native.c */ -#define SNDRV_PCM_RATE_5512 (1<<0) /* 5512Hz */ -#define SNDRV_PCM_RATE_8000 (1<<1) /* 8000Hz */ -#define SNDRV_PCM_RATE_11025 (1<<2) /* 11025Hz */ -#define SNDRV_PCM_RATE_16000 (1<<3) /* 16000Hz */ -#define SNDRV_PCM_RATE_22050 (1<<4) /* 22050Hz */ -#define SNDRV_PCM_RATE_32000 (1<<5) /* 32000Hz */ -#define SNDRV_PCM_RATE_44100 (1<<6) /* 44100Hz */ -#define SNDRV_PCM_RATE_48000 (1<<7) /* 48000Hz */ -#define SNDRV_PCM_RATE_64000 (1<<8) /* 64000Hz */ -#define SNDRV_PCM_RATE_88200 (1<<9) /* 88200Hz */ -#define SNDRV_PCM_RATE_96000 (1<<10) /* 96000Hz */ -#define SNDRV_PCM_RATE_176400 (1<<11) /* 176400Hz */ -#define SNDRV_PCM_RATE_192000 (1<<12) /* 192000Hz */ -#define SNDRV_PCM_RATE_352800 (1<<13) /* 352800Hz */ -#define SNDRV_PCM_RATE_384000 (1<<14) /* 384000Hz */ - -#define SNDRV_PCM_RATE_CONTINUOUS (1<<30) /* continuous range */ -#define SNDRV_PCM_RATE_KNOT (1<<31) /* supports more non-continuos rates */ +#define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */ +#define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */ +#define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */ +#define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */ +#define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */ +#define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */ +#define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */ +#define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */ +#define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */ +#define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */ +#define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */ +#define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */ +#define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */ +#define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */ +#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */ + +#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */ +#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */ #define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\ SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\ diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 2150bd4c7a056..fe86172e86020 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -239,9 +239,9 @@ struct snd_soc_dai_ops { unsigned int *rx_num, unsigned int *rx_slot); int (*set_tristate)(struct snd_soc_dai *dai, int tristate); - int (*set_sdw_stream)(struct snd_soc_dai *dai, - void *stream, int direction); - void *(*get_sdw_stream)(struct snd_soc_dai *dai, int direction); + int (*set_stream)(struct snd_soc_dai *dai, + void *stream, int direction); + void *(*get_stream)(struct snd_soc_dai *dai, int direction); /* * DAI digital mute - optional. @@ -446,42 +446,42 @@ static inline void *snd_soc_dai_get_drvdata(struct snd_soc_dai *dai) } /** - * snd_soc_dai_set_sdw_stream() - Configures a DAI for SDW stream operation + * snd_soc_dai_set_stream() - Configures a DAI for stream operation * @dai: DAI - * @stream: STREAM + * @stream: STREAM (opaque structure depending on DAI type) * @direction: Stream direction(Playback/Capture) - * SoundWire subsystem doesn't have a notion of direction and we reuse + * Some subsystems, such as SoundWire, don't have a notion of direction and we reuse * the ASoC stream direction to configure sink/source ports. * Playback maps to source ports and Capture for sink ports. * * This should be invoked with NULL to clear the stream set previously. * Returns 0 on success, a negative error code otherwise. */ -static inline int snd_soc_dai_set_sdw_stream(struct snd_soc_dai *dai, - void *stream, int direction) +static inline int snd_soc_dai_set_stream(struct snd_soc_dai *dai, + void *stream, int direction) { - if (dai->driver->ops->set_sdw_stream) - return dai->driver->ops->set_sdw_stream(dai, stream, direction); + if (dai->driver->ops->set_stream) + return dai->driver->ops->set_stream(dai, stream, direction); else return -ENOTSUPP; } /** - * snd_soc_dai_get_sdw_stream() - Retrieves SDW stream from DAI + * snd_soc_dai_get_stream() - Retrieves stream from DAI * @dai: DAI * @direction: Stream direction(Playback/Capture) * * This routine only retrieves that was previously configured - * with snd_soc_dai_get_sdw_stream() + * with snd_soc_dai_get_stream() * * Returns pointer to stream or an ERR_PTR value, e.g. * ERR_PTR(-ENOTSUPP) if callback is not supported; */ -static inline void *snd_soc_dai_get_sdw_stream(struct snd_soc_dai *dai, - int direction) +static inline void *snd_soc_dai_get_stream(struct snd_soc_dai *dai, + int direction) { - if (dai->driver->ops->get_sdw_stream) - return dai->driver->ops->get_sdw_stream(dai, direction); + if (dai->driver->ops->get_stream) + return dai->driver->ops->get_stream(dai, direction); else return ERR_PTR(-ENOTSUPP); } diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index ecd24c719de4d..041be3ce10718 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -95,7 +95,7 @@ struct btrfs_space_info; EM( FLUSH_DELALLOC, "FLUSH_DELALLOC") \ EM( FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT") \ EM( FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR") \ - EM( FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS") \ + EM( FLUSH_DELAYED_REFS, "FLUSH_DELAYED_REFS") \ EM( ALLOC_CHUNK, "ALLOC_CHUNK") \ EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \ EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 4973265655a7f..1a91d5789df3b 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -104,6 +104,7 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE); TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR); TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE); TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_ENCRYPTED_FILENAME); TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); #define show_fc_reason(reason) \ @@ -116,7 +117,8 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); { EXT4_FC_REASON_RESIZE, "RESIZE"}, \ { EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \ { EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}, \ - { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}) + { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}, \ + { EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"}) TRACE_EVENT(ext4_other_inode_update_time, TP_PROTO(struct inode *inode, ino_t orig_ino), @@ -2940,7 +2942,7 @@ TRACE_EVENT(ext4_fc_stats, ), TP_printk("dev %d,%d fc ineligible reasons:\n" - "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u " + "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u" "num_commits:%lu, ineligible: %lu, numblks: %lu", MAJOR(__entry->dev), MINOR(__entry->dev), FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), @@ -2952,6 +2954,7 @@ TRACE_EVENT(ext4_fc_stats, FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), + FC_REASON_NAME_STAT(EXT4_FC_REASON_ENCRYPTED_FILENAME), __entry->fc_commits, __entry->fc_ineligible_commits, __entry->fc_numblks) ); diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 9f0d3b7d56b0f..0dd30de00e5b4 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -12,11 +12,11 @@ struct io_wq_work; /** * io_uring_create - called after a new io_uring context was prepared * - * @fd: corresponding file descriptor - * @ctx: pointer to a ring context structure + * @fd: corresponding file descriptor + * @ctx: pointer to a ring context structure * @sq_entries: actual SQ size * @cq_entries: actual CQ size - * @flags: SQ ring flags, provided to io_uring_setup(2) + * @flags: SQ ring flags, provided to io_uring_setup(2) * * Allows to trace io_uring creation and provide pointer to a context, that can * be used later to find correlated events. @@ -49,15 +49,15 @@ TRACE_EVENT(io_uring_create, ); /** - * io_uring_register - called after a buffer/file/eventfd was succesfully + * io_uring_register - called after a buffer/file/eventfd was successfully * registered for a ring * - * @ctx: pointer to a ring context structure - * @opcode: describes which operation to perform + * @ctx: pointer to a ring context structure + * @opcode: describes which operation to perform * @nr_user_files: number of registered files * @nr_user_bufs: number of registered buffers * @cq_ev_fd: whether eventfs registered or not - * @ret: return code + * @ret: return code * * Allows to trace fixed files/buffers/eventfds, that could be registered to * avoid an overhead of getting references to them for every operation. This @@ -142,16 +142,16 @@ TRACE_EVENT(io_uring_queue_async_work, TP_ARGS(ctx, rw, req, work, flags), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( int, rw ) - __field( void *, req ) + __field( void *, ctx ) + __field( int, rw ) + __field( void *, req ) __field( struct io_wq_work *, work ) __field( unsigned int, flags ) ), TP_fast_assign( __entry->ctx = ctx; - __entry->rw = rw; + __entry->rw = rw; __entry->req = req; __entry->work = work; __entry->flags = flags; @@ -196,10 +196,10 @@ TRACE_EVENT(io_uring_defer, /** * io_uring_link - called before the io_uring request added into link_list of - * another request + * another request * - * @ctx: pointer to a ring context structure - * @req: pointer to a linked request + * @ctx: pointer to a ring context structure + * @req: pointer to a linked request * @target_req: pointer to a previous request, that would contain @req * * Allows to track linked requests, to understand dependencies between requests @@ -212,8 +212,8 @@ TRACE_EVENT(io_uring_link, TP_ARGS(ctx, req, target_req), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( void *, req ) + __field( void *, ctx ) + __field( void *, req ) __field( void *, target_req ) ), @@ -244,7 +244,7 @@ TRACE_EVENT(io_uring_cqring_wait, TP_ARGS(ctx, min_events), TP_STRUCT__entry ( - __field( void *, ctx ) + __field( void *, ctx ) __field( int, min_events ) ), @@ -272,7 +272,7 @@ TRACE_EVENT(io_uring_fail_link, TP_ARGS(req, link), TP_STRUCT__entry ( - __field( void *, req ) + __field( void *, req ) __field( void *, link ) ), @@ -290,38 +290,42 @@ TRACE_EVENT(io_uring_fail_link, * @ctx: pointer to a ring context structure * @user_data: user data associated with the request * @res: result of the request + * @cflags: completion flags * */ TRACE_EVENT(io_uring_complete, - TP_PROTO(void *ctx, u64 user_data, long res), + TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags), - TP_ARGS(ctx, user_data, res), + TP_ARGS(ctx, user_data, res, cflags), TP_STRUCT__entry ( __field( void *, ctx ) __field( u64, user_data ) - __field( long, res ) + __field( int, res ) + __field( unsigned, cflags ) ), TP_fast_assign( __entry->ctx = ctx; __entry->user_data = user_data; __entry->res = res; + __entry->cflags = cflags; ), - TP_printk("ring %p, user_data 0x%llx, result %ld", + TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x", __entry->ctx, (unsigned long long)__entry->user_data, - __entry->res) + __entry->res, __entry->cflags) ); - /** * io_uring_submit_sqe - called before submitting one SQE * * @ctx: pointer to a ring context structure + * @req: pointer to a submitted request * @opcode: opcode of request * @user_data: user data associated with the request + * @flags request flags * @force_nonblock: whether a context blocking or not * @sq_thread: true if sq_thread has submitted this SQE * @@ -330,41 +334,60 @@ TRACE_EVENT(io_uring_complete, */ TRACE_EVENT(io_uring_submit_sqe, - TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock, - bool sq_thread), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags, + bool force_nonblock, bool sq_thread), - TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread), + TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) + __field( u32, flags ) __field( bool, force_nonblock ) __field( bool, sq_thread ) ), TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; + __entry->flags = flags; __entry->force_nonblock = force_nonblock; __entry->sq_thread = sq_thread; ), - TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->force_nonblock, __entry->sq_thread) + TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, " + "non block %d, sq_thread %d", __entry->ctx, __entry->req, + __entry->opcode, (unsigned long long)__entry->user_data, + __entry->flags, __entry->force_nonblock, __entry->sq_thread) ); +/* + * io_uring_poll_arm - called after arming a poll wait if successful + * + * @ctx: pointer to a ring context structure + * @req: pointer to the armed request + * @opcode: opcode of request + * @user_data: user data associated with the request + * @mask: request poll events mask + * @events: registered events of interest + * + * Allows to track which fds are waiting for and what are the events of + * interest. + */ TRACE_EVENT(io_uring_poll_arm, - TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, + int mask, int events), - TP_ARGS(ctx, opcode, user_data, mask, events), + TP_ARGS(ctx, req, opcode, user_data, mask, events), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) __field( int, mask ) @@ -373,16 +396,17 @@ TRACE_EVENT(io_uring_poll_arm, TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; __entry->mask = mask; __entry->events = events; ), - TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->mask, __entry->events) + TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", + __entry->ctx, __entry->req, __entry->opcode, + (unsigned long long) __entry->user_data, + __entry->mask, __entry->events) ); TRACE_EVENT(io_uring_poll_wake, @@ -437,27 +461,40 @@ TRACE_EVENT(io_uring_task_add, __entry->mask) ); +/* + * io_uring_task_run - called when task_work_run() executes the poll events + * notification callbacks + * + * @ctx: pointer to a ring context structure + * @req: pointer to the armed request + * @opcode: opcode of request + * @user_data: user data associated with the request + * + * Allows to track when notified poll events are processed + */ TRACE_EVENT(io_uring_task_run, - TP_PROTO(void *ctx, u8 opcode, u64 user_data), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data), - TP_ARGS(ctx, opcode, user_data), + TP_ARGS(ctx, req, opcode, user_data), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) ), TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; ), - TP_printk("ring %p, op %d, data 0x%llx", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data) + TP_printk("ring %p, req %p, op %d, data 0x%llx", + __entry->ctx, __entry->req, __entry->opcode, + (unsigned long long) __entry->user_data) ); #endif /* _TRACE_IO_URING_H */ diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index d16a32867f3a6..b1847e4314b83 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(jbd2_commit, TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) - __field( int, transaction ) + __field( tid_t, transaction ) ), TP_fast_assign( @@ -49,7 +49,7 @@ DECLARE_EVENT_CLASS(jbd2_commit, __entry->transaction = commit_transaction->t_tid; ), - TP_printk("dev %d,%d transaction %d sync %d", + TP_printk("dev %d,%d transaction %u sync %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit) ); @@ -97,8 +97,8 @@ TRACE_EVENT(jbd2_end_commit, TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) - __field( int, transaction ) - __field( int, head ) + __field( tid_t, transaction ) + __field( tid_t, head ) ), TP_fast_assign( @@ -108,7 +108,7 @@ TRACE_EVENT(jbd2_end_commit, __entry->head = journal->j_tail_sequence; ), - TP_printk("dev %d,%d transaction %d sync %d head %d", + TP_printk("dev %d,%d transaction %u sync %d head %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit, __entry->head) ); @@ -134,14 +134,14 @@ TRACE_EVENT(jbd2_submit_inode_data, ); DECLARE_EVENT_CLASS(jbd2_handle_start_class, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks), TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, requested_blocks) @@ -155,28 +155,28 @@ DECLARE_EVENT_CLASS(jbd2_handle_start_class, __entry->requested_blocks = requested_blocks; ), - TP_printk("dev %d,%d tid %lu type %u line_no %u " + TP_printk("dev %d,%d tid %u type %u line_no %u " "requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); TRACE_EVENT(jbd2_handle_extend, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int buffer_credits, int requested_blocks), @@ -184,7 +184,7 @@ TRACE_EVENT(jbd2_handle_extend, TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, buffer_credits ) @@ -200,7 +200,7 @@ TRACE_EVENT(jbd2_handle_extend, __entry->requested_blocks = requested_blocks; ), - TP_printk("dev %d,%d tid %lu type %u line_no %u " + TP_printk("dev %d,%d tid %u type %u line_no %u " "buffer_credits %d requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->buffer_credits, @@ -208,7 +208,7 @@ TRACE_EVENT(jbd2_handle_extend, ); TRACE_EVENT(jbd2_handle_stats, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int interval, int sync, int requested_blocks, int dirtied_blocks), @@ -217,7 +217,7 @@ TRACE_EVENT(jbd2_handle_stats, TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, interval ) @@ -237,7 +237,7 @@ TRACE_EVENT(jbd2_handle_stats, __entry->dirtied_blocks = dirtied_blocks; ), - TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " + TP_printk("dev %d,%d tid %u type %u line_no %u interval %d " "sync %d requested_blocks %d dirtied_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->interval, @@ -246,14 +246,14 @@ TRACE_EVENT(jbd2_handle_stats, ); TRACE_EVENT(jbd2_run_stats, - TP_PROTO(dev_t dev, unsigned long tid, + TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned long, wait ) __field( unsigned long, request_delay ) __field( unsigned long, running ) @@ -279,7 +279,7 @@ TRACE_EVENT(jbd2_run_stats, __entry->blocks_logged = stats->rs_blocks_logged; ), - TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u " + TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u " "locked %u flushing %u logging %u handle_count %u " "blocks %u blocks_logged %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, @@ -294,14 +294,14 @@ TRACE_EVENT(jbd2_run_stats, ); TRACE_EVENT(jbd2_checkpoint_stats, - TP_PROTO(dev_t dev, unsigned long tid, + TP_PROTO(dev_t dev, tid_t tid, struct transaction_chp_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned long, chp_time ) __field( __u32, forced_to_close ) __field( __u32, written ) @@ -317,7 +317,7 @@ TRACE_EVENT(jbd2_checkpoint_stats, __entry->dropped = stats->cs_dropped; ), - TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u " + TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u " "written %u dropped %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, jiffies_to_msecs(__entry->chp_time), diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 1c714336b8635..221856f2d295c 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -583,7 +583,7 @@ TRACE_EVENT(rxrpc_client, TP_fast_assign( __entry->conn = conn ? conn->debug_id : 0; __entry->channel = channel; - __entry->usage = conn ? atomic_read(&conn->usage) : -2; + __entry->usage = conn ? refcount_read(&conn->ref) : -2; __entry->op = op; __entry->cid = conn ? conn->proto.cid : 0; ), diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index d74c076e9e2b4..717d388ecbd6a 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -400,7 +400,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#define ALIGN_STRUCTFIELD(type) ((int)(offsetof(struct {char a; type b;}, b))) +#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;}))) #undef __field_ext #define __field_ext(_type, _item, _filter_type) { \ diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 5498d7a6556a7..dad9d3b4a97ad 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -271,6 +271,13 @@ extern "C" { */ #define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ +/* 2 plane YCbCr420. + * 3 10 bit components and 2 padding bits packed into 4 bytes. + * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian + * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian + */ +#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */ + /* 3 plane non-subsampled (444) YCbCr * 16 bits per component, but only 10 bits are used and 6 bits are padded * index 0: Y plane, [15:0] Y:x [10:6] little endian @@ -777,6 +784,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) * and UV. Some SAND-using hardware stores UV in a separate tiled * image from Y to reduce the column height, which is not supported * with these modifiers. + * + * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also + * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes + * wide, but as this is a 10 bpp format that translates to 96 pixels. */ #define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \ diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index cd2d8279a5e44..cb4e8e6e86a90 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -182,7 +182,7 @@ #define AUDIT_MAX_KEY_LEN 256 #define AUDIT_BITMASK_SIZE 64 #define AUDIT_WORD(nr) ((__u32)((nr)/32)) -#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32)) +#define AUDIT_BIT(nr) (1U << ((nr) - AUDIT_WORD(nr)*32)) #define AUDIT_SYSCALL_CLASSES 16 #define AUDIT_CLASS_DIR_WRITE 0 diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 2ddb4226cd231..43a44538ec8d0 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -427,7 +427,7 @@ struct vfs_ns_cap_data { */ #define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */ -#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */ +#define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */ #endif /* _UAPI_LINUX_CAPABILITY_H */ diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h index 8a3432d0f0dcb..e687658843b1c 100644 --- a/include/uapi/linux/eventpoll.h +++ b/include/uapi/linux/eventpoll.h @@ -41,6 +41,12 @@ #define EPOLLMSG (__force __poll_t)0x00000400 #define EPOLLRDHUP (__force __poll_t)0x00002000 +/* + * Internal flag - wakeup generated by io_uring, used to detect recursion back + * into the io_uring poll handler. + */ +#define EPOLL_URING_WAKE ((__force __poll_t)(1U << 27)) + /* Set exclusive wakeup mode for the target file descriptor */ #define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28)) diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 9d9ecc0f4c383..f086c5579006f 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -188,7 +188,7 @@ struct dsa_completion_record { }; uint32_t delta_rec_size; - uint32_t crc_val; + uint64_t crc_val; /* DIF check & strip */ struct { diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 98d8e06dea220..6481db9370028 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -42,23 +42,25 @@ struct io_uring_sqe { __u32 statx_flags; __u32 fadvise_advice; __u32 splice_flags; + __u32 rename_flags; + __u32 unlink_flags; + __u32 hardlink_flags; }; __u64 user_data; /* data to be passed back at completion time */ + /* pack this to avoid bogus arm OABI complaints */ union { - struct { - /* pack this to avoid bogus arm OABI complaints */ - union { - /* index into fixed buffers, if used */ - __u16 buf_index; - /* for grouped buffer selection */ - __u16 buf_group; - } __attribute__((packed)); - /* personality to use, if used */ - __u16 personality; - __s32 splice_fd_in; - }; - __u64 __pad2[3]; + /* index into fixed buffers, if used */ + __u16 buf_index; + /* for grouped buffer selection */ + __u16 buf_group; + } __attribute__((packed)); + /* personality to use, if used */ + __u16 personality; + union { + __s32 splice_fd_in; + __u32 file_index; }; + __u64 __pad2[2]; }; enum { @@ -132,6 +134,9 @@ enum { IORING_OP_PROVIDE_BUFFERS, IORING_OP_REMOVE_BUFFERS, IORING_OP_TEE, + IORING_OP_SHUTDOWN, + IORING_OP_RENAMEAT, + IORING_OP_UNLINKAT, /* this goes last, obviously */ IORING_OP_LAST, @@ -145,14 +150,34 @@ enum { /* * sqe->timeout_flags */ -#define IORING_TIMEOUT_ABS (1U << 0) - +#define IORING_TIMEOUT_ABS (1U << 0) +#define IORING_TIMEOUT_UPDATE (1U << 1) +#define IORING_TIMEOUT_BOOTTIME (1U << 2) +#define IORING_TIMEOUT_REALTIME (1U << 3) +#define IORING_LINK_TIMEOUT_UPDATE (1U << 4) +#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) +#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) /* * sqe->splice_flags * extends splice(2) flags */ #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ +/* + * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the + * command flags for POLL_ADD are stored in sqe->len. + * + * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if + * the poll handler will continue to report + * CQEs on behalf of the same SQE. + * + * IORING_POLL_UPDATE Update existing poll request, matching + * sqe->addr as the old user_data field. + */ +#define IORING_POLL_ADD_MULTI (1U << 0) +#define IORING_POLL_UPDATE_EVENTS (1U << 1) +#define IORING_POLL_UPDATE_USER_DATA (1U << 2) + /* * IO completion data structure (Completion Queue Entry) */ @@ -166,8 +191,10 @@ struct io_uring_cqe { * cqe->flags * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID + * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries */ #define IORING_CQE_F_BUFFER (1U << 0) +#define IORING_CQE_F_MORE (1U << 1) enum { IORING_CQE_BUFFER_SHIFT = 16, @@ -226,6 +253,7 @@ struct io_cqring_offsets { #define IORING_ENTER_GETEVENTS (1U << 0) #define IORING_ENTER_SQ_WAKEUP (1U << 1) #define IORING_ENTER_SQ_WAIT (1U << 2) +#define IORING_ENTER_EXT_ARG (1U << 3) /* * Passed in for io_uring_setup(2). Copied back with updated info on success @@ -253,6 +281,10 @@ struct io_uring_params { #define IORING_FEAT_CUR_PERSONALITY (1U << 4) #define IORING_FEAT_FAST_POLL (1U << 5) #define IORING_FEAT_POLL_32BITS (1U << 6) +#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) +#define IORING_FEAT_EXT_ARG (1U << 8) +#define IORING_FEAT_NATIVE_WORKERS (1U << 9) +#define IORING_FEAT_RSRC_TAGS (1U << 10) /* * io_uring_register(2) opcodes and arguments @@ -272,16 +304,62 @@ enum { IORING_REGISTER_RESTRICTIONS = 11, IORING_REGISTER_ENABLE_RINGS = 12, + /* extended with tagging */ + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + + /* set/clear io-wq thread affinities */ + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + + /* set/get max number of io-wq workers */ + IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + /* this goes last */ IORING_REGISTER_LAST }; +/* io-wq worker categories */ +enum { + IO_WQ_BOUND, + IO_WQ_UNBOUND, +}; + +/* deprecated, see struct io_uring_rsrc_update */ struct io_uring_files_update { __u32 offset; __u32 resv; __aligned_u64 /* __s32 * */ fds; }; +struct io_uring_rsrc_register { + __u32 nr; + __u32 resv; + __u64 resv2; + __aligned_u64 data; + __aligned_u64 tags; +}; + +struct io_uring_rsrc_update { + __u32 offset; + __u32 resv; + __aligned_u64 data; +}; + +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __aligned_u64 data; + __aligned_u64 tags; + __u32 nr; + __u32 resv2; +}; + +/* Skip updating fd indexes set to this value in the fd table */ +#define IORING_REGISTER_FILES_SKIP (-2) + #define IO_URING_OP_SUPPORTED (1U << 0) struct io_uring_probe_op { @@ -329,4 +407,11 @@ enum { IORING_RESTRICTION_LAST }; +struct io_uring_getevents_arg { + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad; + __u64 ts; +}; + #endif diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index e42d13b55cf3a..860bbf6bf29cb 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -18,6 +18,7 @@ #ifndef _UAPI_LINUX_IP_H #define _UAPI_LINUX_IP_H #include +#include #include #define IPTOS_TOS_MASK 0x1E @@ -100,8 +101,10 @@ struct iphdr { __u8 ttl; __u8 protocol; __sum16 check; - __be32 saddr; - __be32 daddr; + __struct_group(/* no tag */, addrs, /* no attrs */, + __be32 saddr; + __be32 daddr; + ); /*The options start here. */ }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 13e8751bf24a0..d44d0483fd73f 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -130,8 +131,10 @@ struct ipv6hdr { __u8 nexthdr; __u8 hop_limit; - struct in6_addr saddr; - struct in6_addr daddr; + __struct_group(/* no tag */, addrs, /* no attrs */, + struct in6_addr saddr; + struct in6_addr daddr; + ); }; diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h index edc6ddab0de6a..2d6f80d75ae74 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h +++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h @@ -15,7 +15,7 @@ enum sctp_conntrack { SCTP_CONNTRACK_SHUTDOWN_RECD, SCTP_CONNTRACK_SHUTDOWN_ACK_SENT, SCTP_CONNTRACK_HEARTBEAT_SENT, - SCTP_CONNTRACK_HEARTBEAT_ACKED, + SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */ SCTP_CONNTRACK_MAX }; diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h index 6b20fb22717b2..aa805e6d4e284 100644 --- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h +++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h @@ -94,7 +94,7 @@ enum ctattr_timeout_sctp { CTA_TIMEOUT_SCTP_SHUTDOWN_RECD, CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, CTA_TIMEOUT_SCTP_HEARTBEAT_SENT, - CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, + CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */ __CTA_TIMEOUT_SCTP_MAX }; #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1) diff --git a/include/uapi/linux/openat2.h b/include/uapi/linux/openat2.h index 58b1eb7113600..a5feb76049487 100644 --- a/include/uapi/linux/openat2.h +++ b/include/uapi/linux/openat2.h @@ -35,5 +35,9 @@ struct open_how { #define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".." be scoped inside the dirfd (similar to chroot(2)). */ +#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be + completed through cached lookup. May + return -EAGAIN if that's not + possible. */ #endif /* _UAPI_LINUX_OPENAT2_H */ diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index ee8220f8dcf5f..c3725b4922632 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -1,6 +1,31 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_STDDEF_H +#define _UAPI_LINUX_STDDEF_H + #include #ifndef __always_inline #define __always_inline inline #endif + +/** + * __struct_group() - Create a mirrored named and anonyomous struct + * + * @TAG: The tag name for the named sub-struct (usually empty) + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes (usually empty) + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical layout + * and size: one anonymous and one named. The former's members can be used + * normally without sub-struct naming, and the latter can be used to + * reason about the start, end, and size of the group of struct members. + * The named struct can also be explicitly tagged for layer reuse, as well + * as both having struct attributes appended. + */ +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } +#endif diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 7272f85d6d6ab..3736f2fe15418 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -3,7 +3,7 @@ #define _UAPI_LINUX_SWAB_H #include -#include +#include #include #include diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 0f865ae4ba89d..17ce56198c9ad 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -968,9 +968,22 @@ struct usb_ssp_cap_descriptor { __le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */ #define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */ #define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */ +#define USB_SSP_SUBLINK_SPEED_LSE_BPS 0 +#define USB_SSP_SUBLINK_SPEED_LSE_KBPS 1 +#define USB_SSP_SUBLINK_SPEED_LSE_MBPS 2 +#define USB_SSP_SUBLINK_SPEED_LSE_GBPS 3 + #define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */ +#define USB_SSP_SUBLINK_SPEED_ST_SYM_RX 0 +#define USB_SSP_SUBLINK_SPEED_ST_ASYM_RX 1 +#define USB_SSP_SUBLINK_SPEED_ST_SYM_TX 2 +#define USB_SSP_SUBLINK_SPEED_ST_ASYM_TX 3 + #define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */ #define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */ +#define USB_SSP_SUBLINK_SPEED_LP_SS 0 +#define USB_SSP_SUBLINK_SPEED_LP_SSP 1 + #define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */ } __attribute__((packed)); diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 534eaa4d39bc8..b28817c59fdf2 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -1552,7 +1552,8 @@ struct v4l2_bt_timings { ((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt)) #define V4L2_DV_BT_BLANKING_HEIGHT(bt) \ ((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \ - (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) + ((bt)->interlaced ? \ + ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0)) #define V4L2_DV_BT_FRAME_HEIGHT(bt) \ ((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt)) diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h index a75e14edc957e..dbd60f48b4b01 100644 --- a/include/uapi/sound/asequencer.h +++ b/include/uapi/sound/asequencer.h @@ -344,10 +344,10 @@ typedef int __bitwise snd_seq_client_type_t; #define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2) /* event filter flags */ -#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */ -#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */ -#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */ -#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */ +#define SNDRV_SEQ_FILTER_BROADCAST (1U<<0) /* accept broadcast messages */ +#define SNDRV_SEQ_FILTER_MULTICAST (1U<<1) /* accept multicast messages */ +#define SNDRV_SEQ_FILTER_BOUNCE (1U<<2) /* accept bounce event in error */ +#define SNDRV_SEQ_FILTER_USE_EVENT (1U<<31) /* use event filter */ struct snd_seq_client_info { int client; /* client number to inquire */ diff --git a/init/Kconfig b/init/Kconfig index 22912631d79b4..eba883d6d9ed5 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -71,7 +71,7 @@ config CC_HAS_ASM_GOTO_OUTPUT config CC_HAS_ASM_GOTO_TIED_OUTPUT depends on CC_HAS_ASM_GOTO_OUTPUT # Detect buggy gcc and clang, fixed in gcc-11 clang-14. - def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null) + def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null) config TOOLS_SUPPORT_RELR def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) diff --git a/io_uring/Makefile b/io_uring/Makefile new file mode 100644 index 0000000000000..3680425df9478 --- /dev/null +++ b/io_uring/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for io_uring + +obj-$(CONFIG_IO_URING) += io_uring.o +obj-$(CONFIG_IO_WQ) += io-wq.o diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c new file mode 100644 index 0000000000000..81485c1a9879e --- /dev/null +++ b/io_uring/io-wq.c @@ -0,0 +1,1404 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Basic worker thread pool for io_uring + * + * Copyright (C) 2019 Jens Axboe + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "io-wq.h" + +#define WORKER_IDLE_TIMEOUT (5 * HZ) + +enum { + IO_WORKER_F_UP = 1, /* up and active */ + IO_WORKER_F_RUNNING = 2, /* account as running */ + IO_WORKER_F_FREE = 4, /* worker on free list */ + IO_WORKER_F_BOUND = 8, /* is doing bounded work */ +}; + +enum { + IO_WQ_BIT_EXIT = 0, /* wq exiting */ +}; + +enum { + IO_ACCT_STALLED_BIT = 0, /* stalled on hash */ +}; + +/* + * One for each thread in a wqe pool + */ +struct io_worker { + refcount_t ref; + unsigned flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wqe *wqe; + + struct io_wq_work *cur_work; + spinlock_t lock; + + struct completion ref_done; + + unsigned long create_state; + struct callback_head create_work; + int create_index; + + union { + struct rcu_head rcu; + struct work_struct work; + }; +}; + +#if BITS_PER_LONG == 64 +#define IO_WQ_HASH_ORDER 6 +#else +#define IO_WQ_HASH_ORDER 5 +#endif + +#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER) + +struct io_wqe_acct { + unsigned nr_workers; + unsigned max_workers; + int index; + atomic_t nr_running; + struct io_wq_work_list work_list; + unsigned long flags; +}; + +enum { + IO_WQ_ACCT_BOUND, + IO_WQ_ACCT_UNBOUND, + IO_WQ_ACCT_NR, +}; + +/* + * Per-node worker thread pool + */ +struct io_wqe { + raw_spinlock_t lock; + struct io_wqe_acct acct[2]; + + int node; + + struct hlist_nulls_head free_list; + struct list_head all_list; + + struct wait_queue_entry wait; + + struct io_wq *wq; + struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; + + cpumask_var_t cpu_mask; +}; + +/* + * Per io_wq state + */ +struct io_wq { + unsigned long state; + + free_work_fn *free_work; + io_wq_work_fn *do_work; + + struct io_wq_hash *hash; + + atomic_t worker_refs; + struct completion worker_done; + + struct hlist_node cpuhp_node; + + struct task_struct *task; + + struct io_wqe *wqes[]; +}; + +static enum cpuhp_state io_wq_online; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index); +static void io_wqe_dec_running(struct io_worker *worker); +static bool io_acct_cancel_pending_work(struct io_wqe *wqe, + struct io_wqe_acct *acct, + struct io_cb_cancel_data *match); +static void create_worker_cb(struct callback_head *cb); +static void io_wq_cancel_tw_create(struct io_wq *wq); + +static bool io_worker_get(struct io_worker *worker) +{ + return refcount_inc_not_zero(&worker->ref); +} + +static void io_worker_release(struct io_worker *worker) +{ + if (refcount_dec_and_test(&worker->ref)) + complete(&worker->ref_done); +} + +static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound) +{ + return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; +} + +static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, + struct io_wq_work *work) +{ + return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); +} + +static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker) +{ + return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); +} + +static void io_worker_ref_put(struct io_wq *wq) +{ + if (atomic_dec_and_test(&wq->worker_refs)) + complete(&wq->worker_done); +} + +static void io_worker_cancel_cb(struct io_worker *worker) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + atomic_dec(&acct->nr_running); + raw_spin_lock(&worker->wqe->lock); + acct->nr_workers--; + raw_spin_unlock(&worker->wqe->lock); + io_worker_ref_put(wq); + clear_bit_unlock(0, &worker->create_state); + io_worker_release(worker); +} + +static bool io_task_worker_match(struct callback_head *cb, void *data) +{ + struct io_worker *worker; + + if (cb->func != create_worker_cb) + return false; + worker = container_of(cb, struct io_worker, create_work); + return worker == data; +} + +static void io_worker_exit(struct io_worker *worker) +{ + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + while (1) { + struct callback_head *cb = task_work_cancel_match(wq->task, + io_task_worker_match, worker); + + if (!cb) + break; + io_worker_cancel_cb(worker); + } + + if (refcount_dec_and_test(&worker->ref)) + complete(&worker->ref_done); + wait_for_completion(&worker->ref_done); + + raw_spin_lock(&wqe->lock); + if (worker->flags & IO_WORKER_F_FREE) + hlist_nulls_del_rcu(&worker->nulls_node); + list_del_rcu(&worker->all_list); + preempt_disable(); + io_wqe_dec_running(worker); + worker->flags = 0; + current->flags &= ~PF_IO_WORKER; + preempt_enable(); + raw_spin_unlock(&wqe->lock); + + kfree_rcu(worker, rcu); + io_worker_ref_put(wqe->wq); + do_exit(0); +} + +static inline bool io_acct_run_queue(struct io_wqe_acct *acct) +{ + if (!wq_list_empty(&acct->work_list) && + !test_bit(IO_ACCT_STALLED_BIT, &acct->flags)) + return true; + return false; +} + +/* + * Check head of free list for an available worker. If one isn't available, + * caller must create one. + */ +static bool io_wqe_activate_free_worker(struct io_wqe *wqe, + struct io_wqe_acct *acct) + __must_hold(RCU) +{ + struct hlist_nulls_node *n; + struct io_worker *worker; + + /* + * Iterate free_list and see if we can find an idle worker to + * activate. If a given worker is on the free_list but in the process + * of exiting, keep trying. + */ + hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) { + if (!io_worker_get(worker)) + continue; + if (io_wqe_get_acct(worker) != acct) { + io_worker_release(worker); + continue; + } + if (wake_up_process(worker->task)) { + io_worker_release(worker); + return true; + } + io_worker_release(worker); + } + + return false; +} + +/* + * We need a worker. If we find a free one, we're good. If not, and we're + * below the max number of workers, create one. + */ +static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) +{ + /* + * Most likely an attempt to queue unbounded work on an io_wq that + * wasn't setup with any unbounded workers. + */ + if (unlikely(!acct->max_workers)) + pr_warn_once("io-wq is not configured for unbound workers"); + + raw_spin_lock(&wqe->lock); + if (acct->nr_workers >= acct->max_workers) { + raw_spin_unlock(&wqe->lock); + return true; + } + acct->nr_workers++; + raw_spin_unlock(&wqe->lock); + atomic_inc(&acct->nr_running); + atomic_inc(&wqe->wq->worker_refs); + return create_io_worker(wqe->wq, wqe, acct->index); +} + +static void io_wqe_inc_running(struct io_worker *worker) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + + atomic_inc(&acct->nr_running); +} + +static void create_worker_cb(struct callback_head *cb) +{ + struct io_worker *worker; + struct io_wq *wq; + struct io_wqe *wqe; + struct io_wqe_acct *acct; + bool do_create = false; + + worker = container_of(cb, struct io_worker, create_work); + wqe = worker->wqe; + wq = wqe->wq; + acct = &wqe->acct[worker->create_index]; + raw_spin_lock(&wqe->lock); + if (acct->nr_workers < acct->max_workers) { + acct->nr_workers++; + do_create = true; + } + raw_spin_unlock(&wqe->lock); + if (do_create) { + create_io_worker(wq, wqe, worker->create_index); + } else { + atomic_dec(&acct->nr_running); + io_worker_ref_put(wq); + } + clear_bit_unlock(0, &worker->create_state); + io_worker_release(worker); +} + +static bool io_queue_worker_create(struct io_worker *worker, + struct io_wqe_acct *acct, + task_work_func_t func) +{ + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + /* raced with exit, just ignore create call */ + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) + goto fail; + if (!io_worker_get(worker)) + goto fail; + /* + * create_state manages ownership of create_work/index. We should + * only need one entry per worker, as the worker going to sleep + * will trigger the condition, and waking will clear it once it + * runs the task_work. + */ + if (test_bit(0, &worker->create_state) || + test_and_set_bit_lock(0, &worker->create_state)) + goto fail_release; + + atomic_inc(&wq->worker_refs); + init_task_work(&worker->create_work, func); + worker->create_index = acct->index; + if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { + /* + * EXIT may have been set after checking it above, check after + * adding the task_work and remove any creation item if it is + * now set. wq exit does that too, but we can have added this + * work item after we canceled in io_wq_exit_workers(). + */ + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) + io_wq_cancel_tw_create(wq); + io_worker_ref_put(wq); + return true; + } + io_worker_ref_put(wq); + clear_bit_unlock(0, &worker->create_state); +fail_release: + io_worker_release(worker); +fail: + atomic_dec(&acct->nr_running); + io_worker_ref_put(wq); + return false; +} + +static void io_wqe_dec_running(struct io_worker *worker) + __must_hold(wqe->lock) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + + if (!(worker->flags & IO_WORKER_F_UP)) + return; + + if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) { + atomic_inc(&acct->nr_running); + atomic_inc(&wqe->wq->worker_refs); + raw_spin_unlock(&wqe->lock); + io_queue_worker_create(worker, acct, create_worker_cb); + raw_spin_lock(&wqe->lock); + } +} + +/* + * Worker will start processing some work. Move it to the busy list, if + * it's currently on the freelist + */ +static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, + struct io_wq_work *work) + __must_hold(wqe->lock) +{ + if (worker->flags & IO_WORKER_F_FREE) { + worker->flags &= ~IO_WORKER_F_FREE; + hlist_nulls_del_init_rcu(&worker->nulls_node); + } +} + +/* + * No work, worker going to sleep. Move to freelist, and unuse mm if we + * have one attached. Dropping the mm may potentially sleep, so we drop + * the lock in that case and return success. Since the caller has to + * retry the loop in that case (we changed task state), we don't regrab + * the lock if we return success. + */ +static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) + __must_hold(wqe->lock) +{ + if (!(worker->flags & IO_WORKER_F_FREE)) { + worker->flags |= IO_WORKER_F_FREE; + hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); + } +} + +static inline unsigned int io_get_work_hash(struct io_wq_work *work) +{ + return work->flags >> IO_WQ_HASH_SHIFT; +} + +static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash) +{ + struct io_wq *wq = wqe->wq; + bool ret = false; + + spin_lock_irq(&wq->hash->wait.lock); + if (list_empty(&wqe->wait.entry)) { + __add_wait_queue(&wq->hash->wait, &wqe->wait); + if (!test_bit(hash, &wq->hash->map)) { + __set_current_state(TASK_RUNNING); + list_del_init(&wqe->wait.entry); + ret = true; + } + } + spin_unlock_irq(&wq->hash->wait.lock); + return ret; +} + +static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct, + struct io_worker *worker) + __must_hold(wqe->lock) +{ + struct io_wq_work_node *node, *prev; + struct io_wq_work *work, *tail; + unsigned int stall_hash = -1U; + struct io_wqe *wqe = worker->wqe; + + wq_list_for_each(node, prev, &acct->work_list) { + unsigned int hash; + + work = container_of(node, struct io_wq_work, list); + + /* not hashed, can run anytime */ + if (!io_wq_is_hashed(work)) { + wq_list_del(&acct->work_list, node, prev); + return work; + } + + hash = io_get_work_hash(work); + /* all items with this hash lie in [work, tail] */ + tail = wqe->hash_tail[hash]; + + /* hashed, can run if not already running */ + if (!test_and_set_bit(hash, &wqe->wq->hash->map)) { + wqe->hash_tail[hash] = NULL; + wq_list_cut(&acct->work_list, &tail->list, prev); + return work; + } + if (stall_hash == -1U) + stall_hash = hash; + /* fast forward to a next hash, for-each will fix up @prev */ + node = &tail->list; + } + + if (stall_hash != -1U) { + bool unstalled; + + /* + * Set this before dropping the lock to avoid racing with new + * work being added and clearing the stalled bit. + */ + set_bit(IO_ACCT_STALLED_BIT, &acct->flags); + raw_spin_unlock(&wqe->lock); + unstalled = io_wait_on_hash(wqe, stall_hash); + raw_spin_lock(&wqe->lock); + if (unstalled) { + clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); + if (wq_has_sleeper(&wqe->wq->hash->wait)) + wake_up(&wqe->wq->hash->wait); + } + } + + return NULL; +} + +static bool io_flush_signals(void) +{ + if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) { + __set_current_state(TASK_RUNNING); + tracehook_notify_signal(); + return true; + } + return false; +} + +static void io_assign_current_work(struct io_worker *worker, + struct io_wq_work *work) +{ + if (work) { + io_flush_signals(); + cond_resched(); + } + + spin_lock(&worker->lock); + worker->cur_work = work; + spin_unlock(&worker->lock); +} + +static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work); + +static void io_worker_handle_work(struct io_worker *worker) + __releases(wqe->lock) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state); + + do { + struct io_wq_work *work; +get_next: + /* + * If we got some work, mark us as busy. If we didn't, but + * the list isn't empty, it means we stalled on hashed work. + * Mark us stalled so we don't keep looking for work when we + * can't make progress, any work completion or insertion will + * clear the stalled flag. + */ + work = io_get_next_work(acct, worker); + if (work) + __io_worker_busy(wqe, worker, work); + + raw_spin_unlock(&wqe->lock); + if (!work) + break; + io_assign_current_work(worker, work); + __set_current_state(TASK_RUNNING); + + /* handle a whole dependent link */ + do { + struct io_wq_work *next_hashed, *linked; + unsigned int hash = io_get_work_hash(work); + + next_hashed = wq_next_work(work); + + if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND)) + work->flags |= IO_WQ_WORK_CANCEL; + wq->do_work(work); + io_assign_current_work(worker, NULL); + + linked = wq->free_work(work); + work = next_hashed; + if (!work && linked && !io_wq_is_hashed(linked)) { + work = linked; + linked = NULL; + } + io_assign_current_work(worker, work); + if (linked) + io_wqe_enqueue(wqe, linked); + + if (hash != -1U && !next_hashed) { + /* serialize hash clear with wake_up() */ + spin_lock_irq(&wq->hash->wait.lock); + clear_bit(hash, &wq->hash->map); + clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); + spin_unlock_irq(&wq->hash->wait.lock); + if (wq_has_sleeper(&wq->hash->wait)) + wake_up(&wq->hash->wait); + raw_spin_lock(&wqe->lock); + /* skip unnecessary unlock-lock wqe->lock */ + if (!work) + goto get_next; + raw_spin_unlock(&wqe->lock); + } + } while (work); + + raw_spin_lock(&wqe->lock); + } while (1); +} + +static int io_wqe_worker(void *data) +{ + struct io_worker *worker = data; + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + bool last_timeout = false; + char buf[TASK_COMM_LEN]; + + worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); + + snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid); + set_task_comm(current, buf); + + while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { + long ret; + + set_current_state(TASK_INTERRUPTIBLE); +loop: + raw_spin_lock(&wqe->lock); + if (io_acct_run_queue(acct)) { + io_worker_handle_work(worker); + goto loop; + } + /* timed out, exit unless we're the last worker */ + if (last_timeout && acct->nr_workers > 1) { + acct->nr_workers--; + raw_spin_unlock(&wqe->lock); + __set_current_state(TASK_RUNNING); + break; + } + last_timeout = false; + __io_worker_idle(wqe, worker); + raw_spin_unlock(&wqe->lock); + if (io_flush_signals()) + continue; + ret = schedule_timeout(WORKER_IDLE_TIMEOUT); + if (signal_pending(current)) { + struct ksignal ksig; + + if (!get_signal(&ksig)) + continue; + break; + } + last_timeout = !ret; + } + + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { + raw_spin_lock(&wqe->lock); + io_worker_handle_work(worker); + } + + io_worker_exit(worker); + return 0; +} + +/* + * Called when a worker is scheduled in. Mark us as currently running. + */ +void io_wq_worker_running(struct task_struct *tsk) +{ + struct io_worker *worker = tsk->pf_io_worker; + + if (!worker) + return; + if (!(worker->flags & IO_WORKER_F_UP)) + return; + if (worker->flags & IO_WORKER_F_RUNNING) + return; + worker->flags |= IO_WORKER_F_RUNNING; + io_wqe_inc_running(worker); +} + +/* + * Called when worker is going to sleep. If there are no workers currently + * running and we have work pending, wake up a free one or create a new one. + */ +void io_wq_worker_sleeping(struct task_struct *tsk) +{ + struct io_worker *worker = tsk->pf_io_worker; + + if (!worker) + return; + if (!(worker->flags & IO_WORKER_F_UP)) + return; + if (!(worker->flags & IO_WORKER_F_RUNNING)) + return; + + worker->flags &= ~IO_WORKER_F_RUNNING; + + raw_spin_lock(&worker->wqe->lock); + io_wqe_dec_running(worker); + raw_spin_unlock(&worker->wqe->lock); +} + +static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, + struct task_struct *tsk) +{ + tsk->pf_io_worker = worker; + worker->task = tsk; + set_cpus_allowed_ptr(tsk, wqe->cpu_mask); + tsk->flags |= PF_NO_SETAFFINITY; + + raw_spin_lock(&wqe->lock); + hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); + list_add_tail_rcu(&worker->all_list, &wqe->all_list); + worker->flags |= IO_WORKER_F_FREE; + raw_spin_unlock(&wqe->lock); + wake_up_new_task(tsk); +} + +static bool io_wq_work_match_all(struct io_wq_work *work, void *data) +{ + return true; +} + +static inline bool io_should_retry_thread(long err) +{ + /* + * Prevent perpetual task_work retry, if the task (or its group) is + * exiting. + */ + if (fatal_signal_pending(current)) + return false; + + switch (err) { + case -EAGAIN: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + return true; + default: + return false; + } +} + +static void create_worker_cont(struct callback_head *cb) +{ + struct io_worker *worker; + struct task_struct *tsk; + struct io_wqe *wqe; + + worker = container_of(cb, struct io_worker, create_work); + clear_bit_unlock(0, &worker->create_state); + wqe = worker->wqe; + tsk = create_io_thread(io_wqe_worker, worker, wqe->node); + if (!IS_ERR(tsk)) { + io_init_new_worker(wqe, worker, tsk); + io_worker_release(worker); + return; + } else if (!io_should_retry_thread(PTR_ERR(tsk))) { + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + + atomic_dec(&acct->nr_running); + raw_spin_lock(&wqe->lock); + acct->nr_workers--; + if (!acct->nr_workers) { + struct io_cb_cancel_data match = { + .fn = io_wq_work_match_all, + .cancel_all = true, + }; + + while (io_acct_cancel_pending_work(wqe, acct, &match)) + raw_spin_lock(&wqe->lock); + } + raw_spin_unlock(&wqe->lock); + io_worker_ref_put(wqe->wq); + kfree(worker); + return; + } + + /* re-create attempts grab a new worker ref, drop the existing one */ + io_worker_release(worker); + schedule_work(&worker->work); +} + +static void io_workqueue_create(struct work_struct *work) +{ + struct io_worker *worker = container_of(work, struct io_worker, work); + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + + if (!io_queue_worker_create(worker, acct, create_worker_cont)) + kfree(worker); +} + +static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) +{ + struct io_wqe_acct *acct = &wqe->acct[index]; + struct io_worker *worker; + struct task_struct *tsk; + + __set_current_state(TASK_RUNNING); + + worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); + if (!worker) { +fail: + atomic_dec(&acct->nr_running); + raw_spin_lock(&wqe->lock); + acct->nr_workers--; + raw_spin_unlock(&wqe->lock); + io_worker_ref_put(wq); + return false; + } + + refcount_set(&worker->ref, 1); + worker->wqe = wqe; + spin_lock_init(&worker->lock); + init_completion(&worker->ref_done); + + if (index == IO_WQ_ACCT_BOUND) + worker->flags |= IO_WORKER_F_BOUND; + + tsk = create_io_thread(io_wqe_worker, worker, wqe->node); + if (!IS_ERR(tsk)) { + io_init_new_worker(wqe, worker, tsk); + } else if (!io_should_retry_thread(PTR_ERR(tsk))) { + kfree(worker); + goto fail; + } else { + INIT_WORK(&worker->work, io_workqueue_create); + schedule_work(&worker->work); + } + + return true; +} + +/* + * Iterate the passed in list and call the specific function for each + * worker that isn't exiting + */ +static bool io_wq_for_each_worker(struct io_wqe *wqe, + bool (*func)(struct io_worker *, void *), + void *data) +{ + struct io_worker *worker; + bool ret = false; + + list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { + if (io_worker_get(worker)) { + /* no task if node is/was offline */ + if (worker->task) + ret = func(worker, data); + io_worker_release(worker); + if (ret) + break; + } + } + + return ret; +} + +static bool io_wq_worker_wake(struct io_worker *worker, void *data) +{ + set_notify_signal(worker->task); + wake_up_process(worker->task); + return false; +} + +static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) +{ + struct io_wq *wq = wqe->wq; + + do { + work->flags |= IO_WQ_WORK_CANCEL; + wq->do_work(work); + work = wq->free_work(work); + } while (work); +} + +static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) +{ + struct io_wqe_acct *acct = io_work_get_acct(wqe, work); + unsigned int hash; + struct io_wq_work *tail; + + if (!io_wq_is_hashed(work)) { +append: + wq_list_add_tail(&work->list, &acct->work_list); + return; + } + + hash = io_get_work_hash(work); + tail = wqe->hash_tail[hash]; + wqe->hash_tail[hash] = work; + if (!tail) + goto append; + + wq_list_add_after(&work->list, &tail->list, &acct->work_list); +} + +static bool io_wq_work_match_item(struct io_wq_work *work, void *data) +{ + return work == data; +} + +static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) +{ + struct io_wqe_acct *acct = io_work_get_acct(wqe, work); + unsigned work_flags = work->flags; + bool do_create; + + /* + * If io-wq is exiting for this task, or if the request has explicitly + * been marked as one that should not get executed, cancel it here. + */ + if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) || + (work->flags & IO_WQ_WORK_CANCEL)) { + io_run_cancel(work, wqe); + return; + } + + raw_spin_lock(&wqe->lock); + io_wqe_insert_work(wqe, work); + clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); + + rcu_read_lock(); + do_create = !io_wqe_activate_free_worker(wqe, acct); + rcu_read_unlock(); + + raw_spin_unlock(&wqe->lock); + + if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) || + !atomic_read(&acct->nr_running))) { + bool did_create; + + did_create = io_wqe_create_worker(wqe, acct); + if (likely(did_create)) + return; + + raw_spin_lock(&wqe->lock); + /* fatal condition, failed to create the first worker */ + if (!acct->nr_workers) { + struct io_cb_cancel_data match = { + .fn = io_wq_work_match_item, + .data = work, + .cancel_all = false, + }; + + if (io_acct_cancel_pending_work(wqe, acct, &match)) + raw_spin_lock(&wqe->lock); + } + raw_spin_unlock(&wqe->lock); + } +} + +void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) +{ + struct io_wqe *wqe = wq->wqes[numa_node_id()]; + + io_wqe_enqueue(wqe, work); +} + +/* + * Work items that hash to the same value will not be done in parallel. + * Used to limit concurrent writes, generally hashed by inode. + */ +void io_wq_hash_work(struct io_wq_work *work, void *val) +{ + unsigned int bit; + + bit = hash_ptr(val, IO_WQ_HASH_ORDER); + work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); +} + +static bool io_wq_worker_cancel(struct io_worker *worker, void *data) +{ + struct io_cb_cancel_data *match = data; + + /* + * Hold the lock to avoid ->cur_work going out of scope, caller + * may dereference the passed in work. + */ + spin_lock(&worker->lock); + if (worker->cur_work && + match->fn(worker->cur_work, match->data)) { + set_notify_signal(worker->task); + match->nr_running++; + } + spin_unlock(&worker->lock); + + return match->nr_running && !match->cancel_all; +} + +static inline void io_wqe_remove_pending(struct io_wqe *wqe, + struct io_wq_work *work, + struct io_wq_work_node *prev) +{ + struct io_wqe_acct *acct = io_work_get_acct(wqe, work); + unsigned int hash = io_get_work_hash(work); + struct io_wq_work *prev_work = NULL; + + if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { + if (prev) + prev_work = container_of(prev, struct io_wq_work, list); + if (prev_work && io_get_work_hash(prev_work) == hash) + wqe->hash_tail[hash] = prev_work; + else + wqe->hash_tail[hash] = NULL; + } + wq_list_del(&acct->work_list, &work->list, prev); +} + +static bool io_acct_cancel_pending_work(struct io_wqe *wqe, + struct io_wqe_acct *acct, + struct io_cb_cancel_data *match) + __releases(wqe->lock) +{ + struct io_wq_work_node *node, *prev; + struct io_wq_work *work; + + wq_list_for_each(node, prev, &acct->work_list) { + work = container_of(node, struct io_wq_work, list); + if (!match->fn(work, match->data)) + continue; + io_wqe_remove_pending(wqe, work, prev); + raw_spin_unlock(&wqe->lock); + io_run_cancel(work, wqe); + match->nr_pending++; + /* not safe to continue after unlock */ + return true; + } + + return false; +} + +static void io_wqe_cancel_pending_work(struct io_wqe *wqe, + struct io_cb_cancel_data *match) +{ + int i; +retry: + raw_spin_lock(&wqe->lock); + for (i = 0; i < IO_WQ_ACCT_NR; i++) { + struct io_wqe_acct *acct = io_get_acct(wqe, i == 0); + + if (io_acct_cancel_pending_work(wqe, acct, match)) { + if (match->cancel_all) + goto retry; + return; + } + } + raw_spin_unlock(&wqe->lock); +} + +static void io_wqe_cancel_running_work(struct io_wqe *wqe, + struct io_cb_cancel_data *match) +{ + rcu_read_lock(); + io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); + rcu_read_unlock(); +} + +enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + void *data, bool cancel_all) +{ + struct io_cb_cancel_data match = { + .fn = cancel, + .data = data, + .cancel_all = cancel_all, + }; + int node; + + /* + * First check pending list, if we're lucky we can just remove it + * from there. CANCEL_OK means that the work is returned as-new, + * no completion will be posted for it. + */ + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + io_wqe_cancel_pending_work(wqe, &match); + if (match.nr_pending && !match.cancel_all) + return IO_WQ_CANCEL_OK; + } + + /* + * Now check if a free (going busy) or busy worker has the work + * currently running. If we find it there, we'll return CANCEL_RUNNING + * as an indication that we attempt to signal cancellation. The + * completion will run normally in this case. + */ + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + io_wqe_cancel_running_work(wqe, &match); + if (match.nr_running && !match.cancel_all) + return IO_WQ_CANCEL_RUNNING; + } + + if (match.nr_running) + return IO_WQ_CANCEL_RUNNING; + if (match.nr_pending) + return IO_WQ_CANCEL_OK; + return IO_WQ_CANCEL_NOTFOUND; +} + +static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode, + int sync, void *key) +{ + struct io_wqe *wqe = container_of(wait, struct io_wqe, wait); + int i; + + list_del_init(&wait->entry); + + rcu_read_lock(); + for (i = 0; i < IO_WQ_ACCT_NR; i++) { + struct io_wqe_acct *acct = &wqe->acct[i]; + + if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags)) + io_wqe_activate_free_worker(wqe, acct); + } + rcu_read_unlock(); + return 1; +} + +struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) +{ + int ret, node, i; + struct io_wq *wq; + + if (WARN_ON_ONCE(!data->free_work || !data->do_work)) + return ERR_PTR(-EINVAL); + if (WARN_ON_ONCE(!bounded)) + return ERR_PTR(-EINVAL); + + wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL); + if (!wq) + return ERR_PTR(-ENOMEM); + ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); + if (ret) + goto err_wq; + + refcount_inc(&data->hash->refs); + wq->hash = data->hash; + wq->free_work = data->free_work; + wq->do_work = data->do_work; + + ret = -ENOMEM; + for_each_node(node) { + struct io_wqe *wqe; + int alloc_node = node; + + if (!node_online(alloc_node)) + alloc_node = NUMA_NO_NODE; + wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); + if (!wqe) + goto err; + wq->wqes[node] = wqe; + if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL)) + goto err; + cpumask_copy(wqe->cpu_mask, cpumask_of_node(node)); + wqe->node = alloc_node; + wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; + wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = + task_rlimit(current, RLIMIT_NPROC); + INIT_LIST_HEAD(&wqe->wait.entry); + wqe->wait.func = io_wqe_hash_wake; + for (i = 0; i < IO_WQ_ACCT_NR; i++) { + struct io_wqe_acct *acct = &wqe->acct[i]; + + acct->index = i; + atomic_set(&acct->nr_running, 0); + INIT_WQ_LIST(&acct->work_list); + } + wqe->wq = wq; + raw_spin_lock_init(&wqe->lock); + INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); + INIT_LIST_HEAD(&wqe->all_list); + } + + wq->task = get_task_struct(data->task); + atomic_set(&wq->worker_refs, 1); + init_completion(&wq->worker_done); + return wq; +err: + io_wq_put_hash(data->hash); + cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); + for_each_node(node) { + if (!wq->wqes[node]) + continue; + free_cpumask_var(wq->wqes[node]->cpu_mask); + kfree(wq->wqes[node]); + } +err_wq: + kfree(wq); + return ERR_PTR(ret); +} + +static bool io_task_work_match(struct callback_head *cb, void *data) +{ + struct io_worker *worker; + + if (cb->func != create_worker_cb && cb->func != create_worker_cont) + return false; + worker = container_of(cb, struct io_worker, create_work); + return worker->wqe->wq == data; +} + +void io_wq_exit_start(struct io_wq *wq) +{ + set_bit(IO_WQ_BIT_EXIT, &wq->state); +} + +static void io_wq_cancel_tw_create(struct io_wq *wq) +{ + struct callback_head *cb; + + while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) { + struct io_worker *worker; + + worker = container_of(cb, struct io_worker, create_work); + io_worker_cancel_cb(worker); + /* + * Only the worker continuation helper has worker allocated and + * hence needs freeing. + */ + if (cb->func == create_worker_cont) + kfree(worker); + } +} + +static void io_wq_exit_workers(struct io_wq *wq) +{ + int node; + + if (!wq->task) + return; + + io_wq_cancel_tw_create(wq); + + rcu_read_lock(); + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL); + } + rcu_read_unlock(); + io_worker_ref_put(wq); + wait_for_completion(&wq->worker_done); + + for_each_node(node) { + spin_lock_irq(&wq->hash->wait.lock); + list_del_init(&wq->wqes[node]->wait.entry); + spin_unlock_irq(&wq->hash->wait.lock); + } + put_task_struct(wq->task); + wq->task = NULL; +} + +static void io_wq_destroy(struct io_wq *wq) +{ + int node; + + cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); + + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + struct io_cb_cancel_data match = { + .fn = io_wq_work_match_all, + .cancel_all = true, + }; + io_wqe_cancel_pending_work(wqe, &match); + free_cpumask_var(wqe->cpu_mask); + kfree(wqe); + } + io_wq_put_hash(wq->hash); + kfree(wq); +} + +void io_wq_put_and_exit(struct io_wq *wq) +{ + WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state)); + + io_wq_exit_workers(wq); + io_wq_destroy(wq); +} + +struct online_data { + unsigned int cpu; + bool online; +}; + +static bool io_wq_worker_affinity(struct io_worker *worker, void *data) +{ + struct online_data *od = data; + + if (od->online) + cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask); + else + cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask); + return false; +} + +static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online) +{ + struct online_data od = { + .cpu = cpu, + .online = online + }; + int i; + + rcu_read_lock(); + for_each_node(i) + io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od); + rcu_read_unlock(); + return 0; +} + +static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); + + return __io_wq_cpu_online(wq, cpu, true); +} + +static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node) +{ + struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); + + return __io_wq_cpu_online(wq, cpu, false); +} + +int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask) +{ + int i; + + rcu_read_lock(); + for_each_node(i) { + struct io_wqe *wqe = wq->wqes[i]; + + if (mask) + cpumask_copy(wqe->cpu_mask, mask); + else + cpumask_copy(wqe->cpu_mask, cpumask_of_node(i)); + } + rcu_read_unlock(); + return 0; +} + +/* + * Set max number of unbounded workers, returns old value. If new_count is 0, + * then just return the old value. + */ +int io_wq_max_workers(struct io_wq *wq, int *new_count) +{ + int prev[IO_WQ_ACCT_NR]; + bool first_node = true; + int i, node; + + BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); + BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); + BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2); + + for (i = 0; i < 2; i++) { + if (new_count[i] > task_rlimit(current, RLIMIT_NPROC)) + new_count[i] = task_rlimit(current, RLIMIT_NPROC); + } + + for (i = 0; i < IO_WQ_ACCT_NR; i++) + prev[i] = 0; + + rcu_read_lock(); + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + struct io_wqe_acct *acct; + + raw_spin_lock(&wqe->lock); + for (i = 0; i < IO_WQ_ACCT_NR; i++) { + acct = &wqe->acct[i]; + if (first_node) + prev[i] = max_t(int, acct->max_workers, prev[i]); + if (new_count[i]) + acct->max_workers = new_count[i]; + } + raw_spin_unlock(&wqe->lock); + first_node = false; + } + rcu_read_unlock(); + + for (i = 0; i < IO_WQ_ACCT_NR; i++) + new_count[i] = prev[i]; + + return 0; +} + +static __init int io_wq_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online", + io_wq_cpu_online, io_wq_cpu_offline); + if (ret < 0) + return ret; + io_wq_online = ret; + return 0; +} +subsys_initcall(io_wq_init); diff --git a/fs/io-wq.h b/io_uring/io-wq.h similarity index 81% rename from fs/io-wq.h rename to io_uring/io-wq.h index 75113bcd5889f..bf5c4c5337605 100644 --- a/fs/io-wq.h +++ b/io_uring/io-wq.h @@ -1,7 +1,7 @@ #ifndef INTERNAL_IO_WQ_H #define INTERNAL_IO_WQ_H -#include +#include struct io_wq; @@ -9,16 +9,8 @@ enum { IO_WQ_WORK_CANCEL = 1, IO_WQ_WORK_HASHED = 2, IO_WQ_WORK_UNBOUND = 4, - IO_WQ_WORK_NO_CANCEL = 8, IO_WQ_WORK_CONCURRENT = 16, - IO_WQ_WORK_FILES = 32, - IO_WQ_WORK_FS = 64, - IO_WQ_WORK_MM = 128, - IO_WQ_WORK_CREDS = 256, - IO_WQ_WORK_BLKCG = 512, - IO_WQ_WORK_FSIZE = 1024, - IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ }; @@ -52,6 +44,7 @@ static inline void wq_list_add_after(struct io_wq_work_node *node, static inline void wq_list_add_tail(struct io_wq_work_node *node, struct io_wq_work_list *list) { + node->next = NULL; if (!list->first) { list->last = node; WRITE_ONCE(list->first, node); @@ -59,7 +52,6 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node, list->last->next = node; list->last = node; } - node->next = NULL; } static inline void wq_list_cut(struct io_wq_work_list *list, @@ -95,7 +87,6 @@ static inline void wq_list_del(struct io_wq_work_list *list, struct io_wq_work { struct io_wq_work_node list; - struct io_identity *identity; unsigned flags; }; @@ -107,37 +98,48 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) return container_of(work->list.next, struct io_wq_work, list); } -typedef void (free_work_fn)(struct io_wq_work *); -typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *); +typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); +typedef void (io_wq_work_fn)(struct io_wq_work *); -struct io_wq_data { - struct user_struct *user; +struct io_wq_hash { + refcount_t refs; + unsigned long map; + struct wait_queue_head wait; +}; + +static inline void io_wq_put_hash(struct io_wq_hash *hash) +{ + if (refcount_dec_and_test(&hash->refs)) + kfree(hash); +} +struct io_wq_data { + struct io_wq_hash *hash; + struct task_struct *task; io_wq_work_fn *do_work; free_work_fn *free_work; }; struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); -bool io_wq_get(struct io_wq *wq, struct io_wq_data *data); -void io_wq_destroy(struct io_wq *wq); +void io_wq_exit_start(struct io_wq *wq); +void io_wq_put_and_exit(struct io_wq *wq); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); void io_wq_hash_work(struct io_wq_work *work, void *val); +int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); +int io_wq_max_workers(struct io_wq *wq, int *new_count); + static inline bool io_wq_is_hashed(struct io_wq_work *work) { return work->flags & IO_WQ_WORK_HASHED; } -void io_wq_cancel_all(struct io_wq *wq); - typedef bool (work_cancel_fn)(struct io_wq_work *, void *); enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, void *data, bool cancel_all); -struct task_struct *io_wq_get_task(struct io_wq *wq); - #if defined(CONFIG_IO_WQ) extern void io_wq_worker_sleeping(struct task_struct *); extern void io_wq_worker_running(struct task_struct *); @@ -152,6 +154,7 @@ static inline void io_wq_worker_running(struct task_struct *tsk) static inline bool io_wq_current_is_worker(void) { - return in_task() && (current->flags & PF_IO_WORKER); + return in_task() && (current->flags & PF_IO_WORKER) && + current->pf_io_worker; } #endif diff --git a/fs/io_uring.c b/io_uring/io_uring.c similarity index 50% rename from fs/io_uring.c rename to io_uring/io_uring.c index 9654b60a06a58..642e1a0560c6d 100644 --- a/fs/io_uring.c +++ b/io_uring/io_uring.c @@ -11,7 +11,7 @@ * before writing the tail (using smp_load_acquire to read the tail will * do). It also needs a smp_mb() before updating CQ head (ordering the * entry load(s) with the head store), pairing with an implicit barrier - * through a control-dependency in io_get_cqring (smp_store_release to + * through a control-dependency in io_get_cqe (smp_store_release to * store head will do). Failure to do so could lead to reading invalid * CQ entries. * @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -75,35 +74,43 @@ #include #include #include -#include #include #include #include #include -#include -#include +#include #define CREATE_TRACE_POINTS #include #include -#include "internal.h" +#include "../fs/internal.h" #include "io-wq.h" #define IORING_MAX_ENTRIES 32768 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) +#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8 -/* - * Shift of 9 is 512 entries, or exactly one page on 64-bit archs - */ -#define IORING_FILE_TABLE_SHIFT 9 -#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT) -#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1) -#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE) +/* only define max */ +#define IORING_MAX_FIXED_FILES (1U << 15) #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ IORING_REGISTER_LAST + IORING_OP_LAST) +#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3) +#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT) +#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1) + +#define IORING_MAX_REG_BUFFERS (1U << 14) + +#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ + IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ + IOSQE_BUFFER_SELECT) +#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ + REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS) + +#define IO_TCTX_REFS_CACHE_NR (1U << 10) + struct io_uring { u32 head ____cacheline_aligned_in_smp; u32 tail ____cacheline_aligned_in_smp; @@ -162,7 +169,7 @@ struct io_rings { * Written by the application, shouldn't be modified by the * kernel. */ - u32 cq_flags; + u32 cq_flags; /* * Number of completion events lost because the queue was full; * this should be avoided by the application by making sure @@ -187,36 +194,65 @@ struct io_rings { struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; }; +enum io_uring_cmd_flags { + IO_URING_F_NONBLOCK = 1, + IO_URING_F_COMPLETE_DEFER = 2, +}; + struct io_mapped_ubuf { u64 ubuf; - size_t len; - struct bio_vec *bvec; + u64 ubuf_end; unsigned int nr_bvecs; unsigned long acct_pages; + struct bio_vec bvec[]; +}; + +struct io_ring_ctx; + +struct io_overflow_cqe { + struct io_uring_cqe cqe; + struct list_head list; +}; + +struct io_fixed_file { + /* file * with additional FFS_* flags */ + unsigned long file_ptr; +}; + +struct io_rsrc_put { + struct list_head list; + u64 tag; + union { + void *rsrc; + struct file *file; + struct io_mapped_ubuf *buf; + }; }; -struct fixed_file_table { - struct file **files; +struct io_file_table { + struct io_fixed_file *files; }; -struct fixed_file_ref_node { +struct io_rsrc_node { struct percpu_ref refs; struct list_head node; - struct list_head file_list; - struct fixed_file_data *file_data; + struct list_head rsrc_list; + struct io_rsrc_data *rsrc_data; struct llist_node llist; bool done; }; -struct fixed_file_data { - struct fixed_file_table *table; +typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc); + +struct io_rsrc_data { struct io_ring_ctx *ctx; - struct fixed_file_ref_node *node; - struct percpu_ref refs; + u64 **tags; + unsigned int nr; + rsrc_put_fn *do_put; + atomic_t refs; struct completion done; - struct list_head ref_list; - spinlock_t lock; + bool quiesce; }; struct io_buffer { @@ -234,33 +270,81 @@ struct io_restriction { bool registered; }; +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK, +}; + struct io_sq_data { refcount_t refs; + atomic_t park_pending; struct mutex lock; /* ctx's that are using this sqd */ struct list_head ctx_list; - struct list_head ctx_new_list; - struct mutex ctx_lock; struct task_struct *thread; struct wait_queue_head wait; + + unsigned sq_thread_idle; + int sq_cpu; + pid_t task_pid; + pid_t task_tgid; + + unsigned long state; + struct completion exited; +}; + +#define IO_COMPL_BATCH 32 +#define IO_REQ_CACHE_SIZE 32 +#define IO_REQ_ALLOC_BATCH 8 + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + struct blk_plug plug; + struct io_submit_link link; + + /* + * io_kiocb alloc cache + */ + void *reqs[IO_REQ_CACHE_SIZE]; + unsigned int free_reqs; + + bool plug_started; + + /* + * Batch completion logic + */ + struct io_kiocb *compl_reqs[IO_COMPL_BATCH]; + unsigned int compl_nr; + /* inline/task_work completion list, under ->uring_lock */ + struct list_head free_list; + + unsigned int ios_left; }; struct io_ring_ctx { + /* const or read-mostly hot data */ struct { struct percpu_ref refs; - } ____cacheline_aligned_in_smp; - struct { + struct io_rings *rings; unsigned int flags; unsigned int compat: 1; - unsigned int limit_mem: 1; - unsigned int cq_overflow_flushed: 1; unsigned int drain_next: 1; unsigned int eventfd_async: 1; unsigned int restricted: 1; - unsigned int sqo_dead: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + } ____cacheline_aligned_in_smp; + + /* submission data */ + struct { + struct mutex uring_lock; /* * Ring buffer of indices into array of io_uring_sqe, which is @@ -274,101 +358,59 @@ struct io_ring_ctx { * array. */ u32 *sq_array; + struct io_uring_sqe *sq_sqes; unsigned cached_sq_head; unsigned sq_entries; - unsigned sq_mask; - unsigned sq_thread_idle; - unsigned cached_sq_dropped; - unsigned cached_cq_overflow; - unsigned long sq_check_overflow; - struct list_head defer_list; + + /* + * Fixed resources fast path, should be accessed only under + * uring_lock, and updated through io_uring_register(2) + */ + struct io_rsrc_node *rsrc_node; + struct io_file_table file_table; + unsigned nr_user_files; + unsigned nr_user_bufs; + struct io_mapped_ubuf **user_bufs; + + struct io_submit_state submit_state; struct list_head timeout_list; + struct list_head ltimeout_list; struct list_head cq_overflow_list; - - struct io_uring_sqe *sq_sqes; + struct xarray io_buffers; + struct xarray personalities; + u32 pers_next; + unsigned sq_thread_idle; } ____cacheline_aligned_in_smp; - struct io_rings *rings; - - /* IO offload */ - struct io_wq *io_wq; - - /* - * For SQPOLL usage - we hold a reference to the parent task, so we - * have access to the ->files - */ - struct task_struct *sqo_task; - - /* Only used for accounting purposes */ - struct mm_struct *mm_account; - -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *sqo_blkcg_css; -#endif + /* IRQ completion list, under ->completion_lock */ + struct list_head locked_free_list; + unsigned int locked_free_nr; + const struct cred *sq_creds; /* cred used for __io_sq_thread() */ struct io_sq_data *sq_data; /* if using sq thread polling */ struct wait_queue_head sqo_sq_wait; - struct wait_queue_entry sqo_wait_entry; struct list_head sqd_list; - /* - * If used, fixed file set. Writers must ensure that ->refs is dead, - * readers must ensure that ->refs is alive as long as the file* is - * used. Only updated through io_uring_register(2). - */ - struct fixed_file_data *file_data; - unsigned nr_user_files; - - /* if used, fixed mapped user buffers */ - unsigned nr_user_bufs; - struct io_mapped_ubuf *user_bufs; - - struct user_struct *user; - - const struct cred *creds; - -#ifdef CONFIG_AUDIT - kuid_t loginuid; - unsigned int sessionid; -#endif - - struct completion ref_comp; - struct completion sq_thread_comp; - - /* if all else fails... */ - struct io_kiocb *fallback_req; - -#if defined(CONFIG_UNIX) - struct socket *ring_sock; -#endif - - struct xarray io_buffers; - - struct xarray personalities; - u32 pers_next; + unsigned long check_cq_overflow; struct { unsigned cached_cq_tail; unsigned cq_entries; - unsigned cq_mask; + struct eventfd_ctx *cq_ev_fd; + struct wait_queue_head poll_wait; + struct wait_queue_head cq_wait; + unsigned cq_extra; atomic_t cq_timeouts; unsigned cq_last_tm_flush; - unsigned long cq_check_overflow; - struct wait_queue_head cq_wait; - struct fasync_struct *cq_fasync; - struct eventfd_ctx *cq_ev_fd; - } ____cacheline_aligned_in_smp; - - struct { - struct mutex uring_lock; - wait_queue_head_t wait; } ____cacheline_aligned_in_smp; struct { spinlock_t completion_lock; + spinlock_t timeout_lock; + /* * ->iopoll_list is protected by the ctx->uring_lock for * io_uring instances that don't use IORING_SETUP_SQPOLL. @@ -378,17 +420,62 @@ struct io_ring_ctx { struct list_head iopoll_list; struct hlist_head *cancel_hash; unsigned cancel_hash_bits; - bool poll_multi_file; - - spinlock_t inflight_lock; - struct list_head inflight_list; + bool poll_multi_queue; } ____cacheline_aligned_in_smp; - struct delayed_work file_put_work; - struct llist_head file_put_llist; - - struct work_struct exit_work; struct io_restriction restrictions; + + /* slow path rsrc auxilary data, used by update/register */ + struct { + struct io_rsrc_node *rsrc_backup_node; + struct io_mapped_ubuf *dummy_ubuf; + struct io_rsrc_data *file_data; + struct io_rsrc_data *buf_data; + + struct delayed_work rsrc_put_work; + struct llist_head rsrc_put_llist; + struct list_head rsrc_ref_list; + spinlock_t rsrc_ref_lock; + }; + + /* Keep this last, we don't need it for the fast path */ + struct { + #if defined(CONFIG_UNIX) + struct socket *ring_sock; + #endif + /* hashed buffered write serialization */ + struct io_wq_hash *hash_map; + + /* Only used for accounting purposes */ + struct user_struct *user; + struct mm_struct *mm_account; + + /* ctx exit and cancelation */ + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + u32 iowq_limits[2]; + bool iowq_limits_set; + }; +}; + +struct io_uring_task { + /* submission side */ + int cached_refs; + struct xarray xa; + struct wait_queue_head wait; + const struct io_ring_ctx *last; + struct io_wq *io_wq; + struct percpu_counter inflight; + atomic_t inflight_tracked; + atomic_t in_idle; + + spinlock_t task_lock; + struct io_wq_work_list task_list; + struct callback_head task_work; + bool task_running; }; /* @@ -397,20 +484,24 @@ struct io_ring_ctx { */ struct io_poll_iocb { struct file *file; - union { - struct wait_queue_head *head; - u64 addr; - }; + struct wait_queue_head *head; __poll_t events; - bool done; - bool canceled; struct wait_queue_entry wait; }; +struct io_poll_update { + struct file *file; + u64 old_user_data; + u64 new_user_data; + __poll_t events; + bool update_events; + bool update_user_data; +}; + struct io_close { struct file *file; - struct file *put_file; int fd; + u32 file_slot; }; struct io_timeout_data { @@ -418,6 +509,7 @@ struct io_timeout_data { struct hrtimer timer; struct timespec64 ts; enum hrtimer_mode mode; + u32 flags; }; struct io_accept { @@ -425,6 +517,7 @@ struct io_accept { struct sockaddr __user *addr; int __user *addr_len; int flags; + u32 file_slot; unsigned long nofile; }; @@ -446,11 +539,20 @@ struct io_timeout { u32 off; u32 target_seq; struct list_head list; + /* head of the link, used by linked timeouts only */ + struct io_kiocb *head; + /* for linked completions */ + struct io_kiocb *prev; }; struct io_timeout_rem { struct file *file; u64 addr; + + /* timeout update */ + struct timespec64 ts; + u32 flags; + bool ltimeout; }; struct io_rw { @@ -469,25 +571,27 @@ struct io_connect { struct io_sr_msg { struct file *file; union { - struct user_msghdr __user *umsg; - void __user *buf; + struct compat_msghdr __user *umsg_compat; + struct user_msghdr __user *umsg; + void __user *buf; }; int msg_flags; int bgid; size_t len; + size_t done_io; struct io_buffer *kbuf; }; struct io_open { struct file *file; int dfd; - bool ignore_nonblock; + u32 file_slot; struct filename *filename; struct open_how how; unsigned long nofile; }; -struct io_files_update { +struct io_rsrc_update { struct file *file; u64 arg; u32 nr_args; @@ -518,10 +622,10 @@ struct io_epoll { struct io_splice { struct file *file_out; - struct file *file_in; loff_t off_out; loff_t off_in; u64 len; + int splice_fd_in; unsigned int flags; }; @@ -543,9 +647,52 @@ struct io_statx { struct statx __user *buffer; }; +struct io_shutdown { + struct file *file; + int how; +}; + +struct io_rename { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_unlink { + struct file *file; + int dfd; + int flags; + struct filename *filename; +}; + +struct io_mkdir { + struct file *file; + int dfd; + umode_t mode; + struct filename *filename; +}; + +struct io_symlink { + struct file *file; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; +}; + +struct io_hardlink { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + struct io_completion { struct file *file; - struct list_head list; u32 cflags; }; @@ -555,7 +702,8 @@ struct io_async_connect { struct io_async_msghdr { struct iovec fast_iov[UIO_FASTIOV]; - struct iovec *iov; + /* points to an allocated iov, if NULL we use fast_iov instead */ + struct iovec *free_iov; struct sockaddr __user *uaddr; struct msghdr msg; struct sockaddr_storage addr; @@ -565,6 +713,7 @@ struct io_async_rw { struct iovec fast_iov[UIO_FASTIOV]; const struct iovec *free_iovec; struct iov_iter iter; + struct iov_iter_state iter_state; size_t bytes_done; struct wait_page_queue wpq; }; @@ -577,19 +726,25 @@ enum { REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, - REQ_F_LINK_HEAD_BIT, - REQ_F_FAIL_LINK_BIT, + /* first byte is taken by user flags, shift it to not overlap */ + REQ_F_FAIL_BIT = 8, REQ_F_INFLIGHT_BIT, REQ_F_CUR_POS_BIT, REQ_F_NOWAIT_BIT, REQ_F_LINK_TIMEOUT_BIT, - REQ_F_ISREG_BIT, REQ_F_NEED_CLEANUP_BIT, REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, - REQ_F_NO_FILE_TABLE_BIT, - REQ_F_WORK_INITIALIZED_BIT, - REQ_F_LTIMEOUT_ACTIVE_BIT, + REQ_F_COMPLETE_INLINE_BIT, + REQ_F_REISSUE_BIT, + REQ_F_CREDS_BIT, + REQ_F_REFCOUNT_BIT, + REQ_F_ARM_LTIMEOUT_BIT, + REQ_F_PARTIAL_IO_BIT, + /* keep async read/write and isreg together and in order */ + REQ_F_NOWAIT_READ_BIT, + REQ_F_NOWAIT_WRITE_BIT, + REQ_F_ISREG_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -609,11 +764,9 @@ enum { /* IOSQE_BUFFER_SELECT */ REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), - /* head of a link */ - REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT), /* fail rest of links */ - REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT), - /* on inflight list */ + REQ_F_FAIL = BIT(REQ_F_FAIL_BIT), + /* on inflight list, should be cancelled and waited on exit reliably */ REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), /* read/write uses file position */ REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), @@ -621,20 +774,30 @@ enum { REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), /* has or had linked timeout */ REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), - /* regular file */ - REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), /* needs cleanup */ REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), /* already went through poll handler */ REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), /* buffer already selected */ REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), - /* doesn't need file table for this request */ - REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), - /* io_wq_work is initialized */ - REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT), - /* linked timeout is active, i.e. prepared by link's head */ - REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT), + /* completion is deferred through io_comp_state */ + REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), + /* caller should reissue async */ + REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), + /* supports async reads */ + REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT), + /* supports async writes */ + REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT), + /* regular file */ + REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), + /* has creds assigned */ + REQ_F_CREDS = BIT(REQ_F_CREDS_BIT), + /* skip refcounting if not set */ + REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), + /* there is a linked timeout that has to be armed */ + REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), + /* request has already done partial IO */ + REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), }; struct async_poll { @@ -642,6 +805,21 @@ struct async_poll { struct io_poll_iocb *double_poll; }; +typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked); + +struct io_task_work { + union { + struct io_wq_work_node node; + struct llist_node fallback_node; + }; + io_req_tw_func_t func; +}; + +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + /* * NOTE! Each of the iocb union members has the file pointer * as the first entry in their struct definition. So you can @@ -653,6 +831,7 @@ struct io_kiocb { struct file *file; struct io_rw rw; struct io_poll_iocb poll; + struct io_poll_update poll_update; struct io_accept accept; struct io_sync sync; struct io_cancel cancel; @@ -662,13 +841,19 @@ struct io_kiocb { struct io_sr_msg sr_msg; struct io_open open; struct io_close close; - struct io_files_update files_update; + struct io_rsrc_update rsrc_update; struct io_fadvise fadvise; struct io_madvise madvise; struct io_epoll epoll; struct io_splice splice; struct io_provide_buf pbuf; struct io_statx statx; + struct io_shutdown shutdown; + struct io_rename rename; + struct io_unlink unlink; + struct io_mkdir mkdir; + struct io_symlink symlink; + struct io_hardlink hardlink; /* use only after cleaning per-op data, see io_clean_op() */ struct io_completion compl; }; @@ -684,70 +869,44 @@ struct io_kiocb { struct io_ring_ctx *ctx; unsigned int flags; - refcount_t refs; + atomic_t refs; struct task_struct *task; u64 user_data; - struct list_head link_list; + struct io_kiocb *link; + struct percpu_ref *fixed_rsrc_refs; - /* - * 1. used with ctx->iopoll_list with reads/writes - * 2. to track reqs with ->files (see io_op_def::file_table) - */ + /* used with ctx->iopoll_list with reads/writes */ struct list_head inflight_entry; - - struct list_head iopoll_entry; - - struct percpu_ref *fixed_file_refs; - struct callback_head task_work; + struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; struct async_poll *apoll; struct io_wq_work work; -}; + const struct cred *creds; -struct io_defer_entry { - struct list_head list; - struct io_kiocb *req; - u32 seq; + /* store used ubuf, so we can prevent reloading */ + struct io_mapped_ubuf *imu; + /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ + struct io_buffer *kbuf; + atomic_t poll_refs; }; -#define IO_IOPOLL_BATCH 8 - -struct io_comp_state { - unsigned int nr; - struct list_head list; +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; struct io_ring_ctx *ctx; }; -struct io_submit_state { - struct blk_plug plug; - - /* - * io_kiocb alloc cache - */ - void *reqs[IO_IOPOLL_BATCH]; - unsigned int free_reqs; - - /* - * Batch completion logic - */ - struct io_comp_state comp; - - /* - * File reference cache - */ - struct file *file; - unsigned int fd; - unsigned int has_refs; - unsigned int ios_left; +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; }; struct io_op_def { /* needs req->file assigned */ unsigned needs_file : 1; - /* don't fail if file grab fails */ - unsigned needs_file_no_error : 1; /* hash wq insertion if file is a regular file */ unsigned hash_reg_file : 1; /* unbound wq insertion if file is a non-regular file */ @@ -759,11 +918,12 @@ struct io_op_def { unsigned pollout : 1; /* op supports buffer selection */ unsigned buffer_select : 1; - /* must always have async data allocated */ - unsigned needs_async_data : 1; + /* do prep async if is going to be punted */ + unsigned needs_async_setup : 1; + /* should block plug */ + unsigned plug : 1; /* size of async data needed, if any */ unsigned short async_size; - unsigned work_flags; }; static const struct io_op_def io_op_defs[] = { @@ -773,41 +933,36 @@ static const struct io_op_def io_op_defs[] = { .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, - .needs_async_data = 1, + .needs_async_setup = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FILES, }, [IORING_OP_WRITEV] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .needs_async_data = 1, + .needs_async_setup = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FSIZE | IO_WQ_WORK_FILES, }, [IORING_OP_FSYNC] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_READ_FIXED] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM | - IO_WQ_WORK_FILES, }, [IORING_OP_WRITE_FIXED] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE | - IO_WQ_WORK_MM | IO_WQ_WORK_FILES, }, [IORING_OP_POLL_ADD] = { .needs_file = 1, @@ -816,126 +971,91 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_POLL_REMOVE] = {}, [IORING_OP_SYNC_FILE_RANGE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_SENDMSG] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .needs_async_data = 1, + .needs_async_setup = 1, .async_size = sizeof(struct io_async_msghdr), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, }, [IORING_OP_RECVMSG] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, - .needs_async_data = 1, + .needs_async_setup = 1, .async_size = sizeof(struct io_async_msghdr), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, }, [IORING_OP_TIMEOUT] = { - .needs_async_data = 1, .async_size = sizeof(struct io_timeout_data), - .work_flags = IO_WQ_WORK_MM, }, - [IORING_OP_TIMEOUT_REMOVE] = {}, + [IORING_OP_TIMEOUT_REMOVE] = { + /* used by timeout updates' prep() */ + }, [IORING_OP_ACCEPT] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES, }, [IORING_OP_ASYNC_CANCEL] = {}, [IORING_OP_LINK_TIMEOUT] = { - .needs_async_data = 1, .async_size = sizeof(struct io_timeout_data), - .work_flags = IO_WQ_WORK_MM, }, [IORING_OP_CONNECT] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .needs_async_data = 1, + .needs_async_setup = 1, .async_size = sizeof(struct io_async_connect), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FS, }, [IORING_OP_FALLOCATE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE, - }, - [IORING_OP_OPENAT] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, - }, - [IORING_OP_CLOSE] = { - .needs_file = 1, - .needs_file_no_error = 1, - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG, - }, - [IORING_OP_FILES_UPDATE] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM, - }, - [IORING_OP_STATX] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM | - IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG, }, + [IORING_OP_OPENAT] = {}, + [IORING_OP_CLOSE] = {}, + [IORING_OP_FILES_UPDATE] = {}, + [IORING_OP_STATX] = {}, [IORING_OP_READ] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FILES, }, [IORING_OP_WRITE] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FSIZE | IO_WQ_WORK_FILES, }, [IORING_OP_FADVISE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, - }, - [IORING_OP_MADVISE] = { - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, + [IORING_OP_MADVISE] = {}, [IORING_OP_SEND] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, }, [IORING_OP_RECV] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, }, [IORING_OP_OPENAT2] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS | - IO_WQ_WORK_BLKCG, }, [IORING_OP_EPOLL_CTL] = { .unbound_nonreg_file = 1, - .work_flags = IO_WQ_WORK_FILES, }, [IORING_OP_SPLICE] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_PROVIDE_BUFFERS] = {}, [IORING_OP_REMOVE_BUFFERS] = {}, @@ -944,43 +1064,47 @@ static const struct io_op_def io_op_defs[] = { .hash_reg_file = 1, .unbound_nonreg_file = 1, }, + [IORING_OP_SHUTDOWN] = { + .needs_file = 1, + }, + [IORING_OP_RENAMEAT] = {}, + [IORING_OP_UNLINKAT] = {}, }; -enum io_mem_account { - ACCT_LOCKED, - ACCT_PINNED, -}; +/* requests with any of those set should undergo io_disarm_next() */ +#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) -static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node); -static struct fixed_file_ref_node *alloc_fixed_file_ref_node( - struct io_ring_ctx *ctx); +static bool io_disarm_next(struct io_kiocb *req); +static void io_uring_del_tctx_node(unsigned long index); +static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, + struct task_struct *task, + bool cancel_all); +static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); + +static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags); -static void __io_complete_rw(struct io_kiocb *req, long res, long res2, - struct io_comp_state *cs); -static void io_cqring_fill_event(struct io_kiocb *req, long res); static void io_put_req(struct io_kiocb *req); -static void io_put_req_deferred(struct io_kiocb *req, int nr); -static void io_double_put_req(struct io_kiocb *req); -static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); -static void __io_queue_linked_timeout(struct io_kiocb *req); +static void io_put_req_deferred(struct io_kiocb *req); +static void io_dismantle_req(struct io_kiocb *req); static void io_queue_linked_timeout(struct io_kiocb *req); -static int __io_sqe_files_update(struct io_ring_ctx *ctx, - struct io_uring_files_update *ip, - unsigned nr_args); -static void __io_clean_op(struct io_kiocb *req); -static struct file *io_file_get(struct io_submit_state *state, +static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, + struct io_uring_rsrc_update2 *up, + unsigned nr_args); +static void io_clean_op(struct io_kiocb *req); +static struct file *io_file_get(struct io_ring_ctx *ctx, struct io_kiocb *req, int fd, bool fixed); -static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs); -static void io_file_put_work(struct work_struct *work); +static void __io_queue_sqe(struct io_kiocb *req); +static void io_rsrc_put_work(struct work_struct *work); -static ssize_t io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct iov_iter *iter, - bool needs_lock); -static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, - const struct iovec *fast_iov, - struct iov_iter *iter, bool force); -static void io_req_drop_files(struct io_kiocb *req); static void io_req_task_queue(struct io_kiocb *req); +static void io_submit_flush_completions(struct io_ring_ctx *ctx); +static int io_req_prep_async(struct io_kiocb *req); + +static int io_install_fixed_file(struct io_kiocb *req, struct file *file, + unsigned int issue_flags, u32 slot_index); +static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags); + +static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer); static struct kmem_cache *req_cachep; @@ -999,21 +1123,67 @@ struct sock *io_uring_get_socket(struct file *file) } EXPORT_SYMBOL(io_uring_get_socket); -static inline void io_clean_op(struct io_kiocb *req) +static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) +{ + if (!*locked) { + mutex_lock(&ctx->uring_lock); + *locked = true; + } +} + +#define io_for_each_link(pos, head) \ + for (pos = (head); pos; pos = pos->link) + +/* + * Shamelessly stolen from the mm implementation of page reference checking, + * see commit f958d7b528b1 for details. + */ +#define req_ref_zero_or_close_to_overflow(req) \ + ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u) + +static inline bool req_ref_inc_not_zero(struct io_kiocb *req) { - if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED)) - __io_clean_op(req); + WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); + return atomic_inc_not_zero(&req->refs); } -static inline bool __io_match_files(struct io_kiocb *req, - struct files_struct *files) +static inline bool req_ref_put_and_test(struct io_kiocb *req) { - if (req->file && req->file->f_op == &io_uring_fops) + if (likely(!(req->flags & REQ_F_REFCOUNT))) return true; - return ((req->flags & REQ_F_WORK_INITIALIZED) && - (req->work.flags & IO_WQ_WORK_FILES)) && - req->work.identity->files == files; + WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); + return atomic_dec_and_test(&req->refs); +} + +static inline void req_ref_get(struct io_kiocb *req) +{ + WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); + WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); + atomic_inc(&req->refs); +} + +static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) +{ + if (!(req->flags & REQ_F_REFCOUNT)) { + req->flags |= REQ_F_REFCOUNT; + atomic_set(&req->refs, nr); + } +} + +static inline void io_req_set_refcount(struct io_kiocb *req) +{ + __io_req_set_refcount(req, 1); +} + +static inline void io_req_set_rsrc_node(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!req->fixed_rsrc_refs) { + req->fixed_rsrc_refs = &ctx->rsrc_node->refs; + percpu_ref_get(req->fixed_rsrc_refs); + } } static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) @@ -1028,169 +1198,104 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) percpu_ref_put(ref); } -static bool io_match_task(struct io_kiocb *head, - struct task_struct *task, - struct files_struct *files) +static bool io_match_task(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) + __must_hold(&req->ctx->timeout_lock) { - struct io_kiocb *link; + struct io_kiocb *req; - if (task && head->task != task) { - /* in terms of cancelation, always match if req task is dead */ - if (head->task->flags & PF_EXITING) - return true; + if (task && head->task != task) return false; - } - if (!files) + if (cancel_all) return true; - if (__io_match_files(head, files)) - return true; - if (head->flags & REQ_F_LINK_HEAD) { - list_for_each_entry(link, &head->link_list, link_list) { - if (__io_match_files(link, files)) - return true; - } + + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; } return false; } - -static void io_sq_thread_drop_mm(void) +static bool io_match_linked(struct io_kiocb *head) { - struct mm_struct *mm = current->mm; + struct io_kiocb *req; - if (mm) { - kthread_unuse_mm(mm); - mmput(mm); - current->mm = NULL; + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; } + return false; } -static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) +/* + * As io_match_task() but protected against racing with linked timeouts. + * User must not hold timeout_lock. + */ +static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) { - struct mm_struct *mm; - - if (current->flags & PF_EXITING) - return -EFAULT; - if (current->mm) - return 0; + bool matched; - /* Should never happen */ - if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL))) - return -EFAULT; + if (task && head->task != task) + return false; + if (cancel_all) + return true; - task_lock(ctx->sqo_task); - mm = ctx->sqo_task->mm; - if (unlikely(!mm || !mmget_not_zero(mm))) - mm = NULL; - task_unlock(ctx->sqo_task); + if (head->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = head->ctx; - if (mm) { - kthread_use_mm(mm); - return 0; + /* protect against races with linked timeouts */ + spin_lock_irq(&ctx->timeout_lock); + matched = io_match_linked(head); + spin_unlock_irq(&ctx->timeout_lock); + } else { + matched = io_match_linked(head); } - - return -EFAULT; + return matched; } -static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, - struct io_kiocb *req) +static inline void req_set_fail(struct io_kiocb *req) { - if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM)) - return 0; - return __io_sq_thread_acquire_mm(ctx); -} - -static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx, - struct cgroup_subsys_state **cur_css) - -{ -#ifdef CONFIG_BLK_CGROUP - /* puts the old one when swapping */ - if (*cur_css != ctx->sqo_blkcg_css) { - kthread_associate_blkcg(ctx->sqo_blkcg_css); - *cur_css = ctx->sqo_blkcg_css; - } -#endif -} - -static void io_sq_thread_unassociate_blkcg(void) -{ -#ifdef CONFIG_BLK_CGROUP - kthread_associate_blkcg(NULL); -#endif + req->flags |= REQ_F_FAIL; } -static inline void req_set_fail_links(struct io_kiocb *req) +static inline void req_fail_link_node(struct io_kiocb *req, int res) { - if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) - req->flags |= REQ_F_FAIL_LINK; + req_set_fail(req); + req->result = res; } -/* - * None of these are dereferenced, they are simply used to check if any of - * them have changed. If we're under current and check they are still the - * same, we're fine to grab references to them for actual out-of-line use. - */ -static void io_init_identity(struct io_identity *id) +static void io_ring_ctx_ref_free(struct percpu_ref *ref) { - id->files = current->files; - id->mm = current->mm; -#ifdef CONFIG_BLK_CGROUP - rcu_read_lock(); - id->blkcg_css = blkcg_css(); - rcu_read_unlock(); -#endif - id->creds = current_cred(); - id->nsproxy = current->nsproxy; - id->fs = current->fs; - id->fsize = rlimit(RLIMIT_FSIZE); -#ifdef CONFIG_AUDIT - id->loginuid = current->loginuid; - id->sessionid = current->sessionid; -#endif - refcount_set(&id->count, 1); -} + struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); -static inline void __io_req_init_async(struct io_kiocb *req) -{ - memset(&req->work, 0, sizeof(req->work)); - req->flags |= REQ_F_WORK_INITIALIZED; + complete(&ctx->ref_comp); } -/* - * Note: must call io_req_init_async() for the first time you - * touch any members of io_wq_work. - */ -static inline void io_req_init_async(struct io_kiocb *req) +static inline bool io_is_timeout_noseq(struct io_kiocb *req) { - struct io_uring_task *tctx = req->task->io_uring; - - if (req->flags & REQ_F_WORK_INITIALIZED) - return; - - __io_req_init_async(req); - - /* Grab a ref if this isn't our static identity */ - req->work.identity = tctx->identity; - if (tctx->identity != &tctx->__identity) - refcount_inc(&req->work.identity->count); + return !req->timeout.off; } -static inline bool io_async_submit(struct io_ring_ctx *ctx) +static void io_fallback_req_func(struct work_struct *work) { - return ctx->flags & IORING_SETUP_SQPOLL; -} + struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, + fallback_work.work); + struct llist_node *node = llist_del_all(&ctx->fallback_llist); + struct io_kiocb *req, *tmp; + bool locked = false; -static void io_ring_ctx_ref_free(struct percpu_ref *ref) -{ - struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); + percpu_ref_get(&ctx->refs); + llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) + req->io_task_work.func(req, &locked); - complete(&ctx->ref_comp); -} + if (locked) { + if (ctx->submit_state.compl_nr) + io_submit_flush_completions(ctx); + mutex_unlock(&ctx->uring_lock); + } + percpu_ref_put(&ctx->refs); -static inline bool io_is_timeout_noseq(struct io_kiocb *req) -{ - return !req->timeout.off; } static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) @@ -1202,10 +1307,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) if (!ctx) return NULL; - ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL); - if (!ctx->fallback_req) - goto err; - /* * Use 5 bits less than the max cq entries, that should give us around * 32 entries per hash list if totally full and uniformly spread. @@ -1221,6 +1322,12 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) goto err; __hash_init(ctx->cancel_hash, 1U << hash_bits); + ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL); + if (!ctx->dummy_ubuf) + goto err; + /* set invalid range, so io_import_fixed() fails meeting it */ + ctx->dummy_ubuf->ubuf = -1UL; + if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) goto err; @@ -1228,232 +1335,109 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ctx->flags = p->flags; init_waitqueue_head(&ctx->sqo_sq_wait); INIT_LIST_HEAD(&ctx->sqd_list); - init_waitqueue_head(&ctx->cq_wait); + init_waitqueue_head(&ctx->poll_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); init_completion(&ctx->ref_comp); - init_completion(&ctx->sq_thread_comp); xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); mutex_init(&ctx->uring_lock); - init_waitqueue_head(&ctx->wait); + init_waitqueue_head(&ctx->cq_wait); spin_lock_init(&ctx->completion_lock); + spin_lock_init(&ctx->timeout_lock); INIT_LIST_HEAD(&ctx->iopoll_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); - spin_lock_init(&ctx->inflight_lock); - INIT_LIST_HEAD(&ctx->inflight_list); - INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work); - init_llist_head(&ctx->file_put_llist); + INIT_LIST_HEAD(&ctx->ltimeout_list); + spin_lock_init(&ctx->rsrc_ref_lock); + INIT_LIST_HEAD(&ctx->rsrc_ref_list); + INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); + init_llist_head(&ctx->rsrc_put_llist); + INIT_LIST_HEAD(&ctx->tctx_list); + INIT_LIST_HEAD(&ctx->submit_state.free_list); + INIT_LIST_HEAD(&ctx->locked_free_list); + INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); return ctx; err: - if (ctx->fallback_req) - kmem_cache_free(req_cachep, ctx->fallback_req); + kfree(ctx->dummy_ubuf); kfree(ctx->cancel_hash); kfree(ctx); return NULL; } +static void io_account_cq_overflow(struct io_ring_ctx *ctx) +{ + struct io_rings *r = ctx->rings; + + WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); + ctx->cq_extra--; +} + static bool req_need_defer(struct io_kiocb *req, u32 seq) { if (unlikely(req->flags & REQ_F_IO_DRAIN)) { struct io_ring_ctx *ctx = req->ctx; - return seq != ctx->cached_cq_tail - + READ_ONCE(ctx->cached_cq_overflow); + return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; } return false; } -static void __io_commit_cqring(struct io_ring_ctx *ctx) -{ - struct io_rings *rings = ctx->rings; - - /* order cqe stores with ring update */ - smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); -} +#define FFS_ASYNC_READ 0x1UL +#define FFS_ASYNC_WRITE 0x2UL +#ifdef CONFIG_64BIT +#define FFS_ISREG 0x4UL +#else +#define FFS_ISREG 0x0UL +#endif +#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG) -static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req) +static inline bool io_req_ffs_set(struct io_kiocb *req) { - if (req->work.identity == &tctx->__identity) - return; - if (refcount_dec_and_test(&req->work.identity->count)) - kfree(req->work.identity); + return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); } -static void io_req_clean_work(struct io_kiocb *req) +static void io_req_track_inflight(struct io_kiocb *req) { - if (!(req->flags & REQ_F_WORK_INITIALIZED)) - return; - - req->flags &= ~REQ_F_WORK_INITIALIZED; - - if (req->work.flags & IO_WQ_WORK_MM) { - mmdrop(req->work.identity->mm); - req->work.flags &= ~IO_WQ_WORK_MM; - } -#ifdef CONFIG_BLK_CGROUP - if (req->work.flags & IO_WQ_WORK_BLKCG) { - css_put(req->work.identity->blkcg_css); - req->work.flags &= ~IO_WQ_WORK_BLKCG; - } -#endif - if (req->work.flags & IO_WQ_WORK_CREDS) { - put_cred(req->work.identity->creds); - req->work.flags &= ~IO_WQ_WORK_CREDS; - } - if (req->work.flags & IO_WQ_WORK_FS) { - struct fs_struct *fs = req->work.identity->fs; - - spin_lock(&req->work.identity->fs->lock); - if (--fs->users) - fs = NULL; - spin_unlock(&req->work.identity->fs->lock); - if (fs) - free_fs_struct(fs); - req->work.flags &= ~IO_WQ_WORK_FS; + if (!(req->flags & REQ_F_INFLIGHT)) { + req->flags |= REQ_F_INFLIGHT; + atomic_inc(&req->task->io_uring->inflight_tracked); } - if (req->flags & REQ_F_INFLIGHT) - io_req_drop_files(req); - - io_put_identity(req->task->io_uring, req); } -/* - * Create a private copy of io_identity, since some fields don't match - * the current context. - */ -static bool io_identity_cow(struct io_kiocb *req) +static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) { - struct io_uring_task *tctx = req->task->io_uring; - const struct cred *creds = NULL; - struct io_identity *id; - - if (req->work.flags & IO_WQ_WORK_CREDS) - creds = req->work.identity->creds; - - id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL); - if (unlikely(!id)) { - req->work.flags |= IO_WQ_WORK_CANCEL; - return false; - } - - /* - * We can safely just re-init the creds we copied Either the field - * matches the current one, or we haven't grabbed it yet. The only - * exception is ->creds, through registered personalities, so handle - * that one separately. - */ - io_init_identity(id); - if (creds) - id->creds = creds; - - /* add one for this request */ - refcount_inc(&id->count); + if (WARN_ON_ONCE(!req->link)) + return NULL; - /* drop tctx and req identity references, if needed */ - if (tctx->identity != &tctx->__identity && - refcount_dec_and_test(&tctx->identity->count)) - kfree(tctx->identity); - if (req->work.identity != &tctx->__identity && - refcount_dec_and_test(&req->work.identity->count)) - kfree(req->work.identity); + req->flags &= ~REQ_F_ARM_LTIMEOUT; + req->flags |= REQ_F_LINK_TIMEOUT; - req->work.identity = id; - tctx->identity = id; - return true; + /* linked timeouts should have two refs once prep'ed */ + io_req_set_refcount(req); + __io_req_set_refcount(req->link, 2); + return req->link; } -static bool io_grab_identity(struct io_kiocb *req) +static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) { - const struct io_op_def *def = &io_op_defs[req->opcode]; - struct io_identity *id = req->work.identity; - struct io_ring_ctx *ctx = req->ctx; - - if (def->work_flags & IO_WQ_WORK_FSIZE) { - if (id->fsize != rlimit(RLIMIT_FSIZE)) - return false; - req->work.flags |= IO_WQ_WORK_FSIZE; - } -#ifdef CONFIG_BLK_CGROUP - if (!(req->work.flags & IO_WQ_WORK_BLKCG) && - (def->work_flags & IO_WQ_WORK_BLKCG)) { - rcu_read_lock(); - if (id->blkcg_css != blkcg_css()) { - rcu_read_unlock(); - return false; - } - /* - * This should be rare, either the cgroup is dying or the task - * is moving cgroups. Just punt to root for the handful of ios. - */ - if (css_tryget_online(id->blkcg_css)) - req->work.flags |= IO_WQ_WORK_BLKCG; - rcu_read_unlock(); - } -#endif - if (!(req->work.flags & IO_WQ_WORK_CREDS)) { - if (id->creds != current_cred()) - return false; - get_cred(id->creds); - req->work.flags |= IO_WQ_WORK_CREDS; - } -#ifdef CONFIG_AUDIT - if (!uid_eq(current->loginuid, id->loginuid) || - current->sessionid != id->sessionid) - return false; -#endif - if (!(req->work.flags & IO_WQ_WORK_FS) && - (def->work_flags & IO_WQ_WORK_FS)) { - if (current->fs != id->fs) - return false; - spin_lock(&id->fs->lock); - if (!id->fs->in_exec) { - id->fs->users++; - req->work.flags |= IO_WQ_WORK_FS; - } else { - req->work.flags |= IO_WQ_WORK_CANCEL; - } - spin_unlock(¤t->fs->lock); - } - if (!(req->work.flags & IO_WQ_WORK_FILES) && - (def->work_flags & IO_WQ_WORK_FILES) && - !(req->flags & REQ_F_NO_FILE_TABLE)) { - if (id->files != current->files || - id->nsproxy != current->nsproxy) - return false; - atomic_inc(&id->files->count); - get_nsproxy(id->nsproxy); - - if (!(req->flags & REQ_F_INFLIGHT)) { - req->flags |= REQ_F_INFLIGHT; - - spin_lock_irq(&ctx->inflight_lock); - list_add(&req->inflight_entry, &ctx->inflight_list); - spin_unlock_irq(&ctx->inflight_lock); - } - req->work.flags |= IO_WQ_WORK_FILES; - } - if (!(req->work.flags & IO_WQ_WORK_MM) && - (def->work_flags & IO_WQ_WORK_MM)) { - if (id->mm != current->mm) - return false; - mmgrab(id->mm); - req->work.flags |= IO_WQ_WORK_MM; - } - - return true; + if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) + return NULL; + return __io_prep_linked_timeout(req); } static void io_prep_async_work(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; struct io_ring_ctx *ctx = req->ctx; - struct io_identity *id; - io_req_init_async(req); - id = req->work.identity; + if (!(req->flags & REQ_F_CREDS)) { + req->flags |= REQ_F_CREDS; + req->creds = get_current_cred(); + } + req->work.list.next = NULL; + req->work.flags = 0; if (req->flags & REQ_F_FORCE_ASYNC) req->work.flags |= IO_WQ_WORK_CONCURRENT; @@ -1464,92 +1448,77 @@ static void io_prep_async_work(struct io_kiocb *req) if (def->unbound_nonreg_file) req->work.flags |= IO_WQ_WORK_UNBOUND; } - - /* if we fail grabbing identity, we must COW, regrab, and retry */ - if (io_grab_identity(req)) - return; - - if (!io_identity_cow(req)) - return; - - /* can't fail at this point */ - if (!io_grab_identity(req)) - WARN_ON(1); } static void io_prep_async_link(struct io_kiocb *req) { struct io_kiocb *cur; - io_prep_async_work(req); - if (req->flags & REQ_F_LINK_HEAD) - list_for_each_entry(cur, &req->link_list, link_list) + if (req->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->timeout_lock); + io_for_each_link(cur, req) + io_prep_async_work(cur); + spin_unlock_irq(&ctx->timeout_lock); + } else { + io_for_each_link(cur, req) io_prep_async_work(cur); + } } -static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req) +static void io_queue_async_work(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *link = io_prep_linked_timeout(req); + struct io_uring_task *tctx = req->task->io_uring; - trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, - &req->work, req->flags); - io_wq_enqueue(ctx->io_wq, &req->work); - return link; -} + /* must not take the lock, NULL it as a precaution */ + locked = NULL; -static void io_queue_async_work(struct io_kiocb *req) -{ - struct io_kiocb *link; + BUG_ON(!tctx); + BUG_ON(!tctx->io_wq); /* init ->work of the whole link before punting */ io_prep_async_link(req); - link = __io_queue_async_work(req); + /* + * Not expected to happen, but if we do have a bug where this _can_ + * happen, catch it here and ensure the request is marked as + * canceled. That will make io-wq go through the usual work cancel + * procedure rather than attempt to run this request (or create a new + * worker for it). + */ + if (WARN_ON_ONCE(!same_thread_group(req->task, current))) + req->work.flags |= IO_WQ_WORK_CANCEL; + + trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, + &req->work, req->flags); + io_wq_enqueue(tctx->io_wq, &req->work); if (link) io_queue_linked_timeout(link); } static void io_kill_timeout(struct io_kiocb *req, int status) + __must_hold(&req->ctx->completion_lock) + __must_hold(&req->ctx->timeout_lock) { struct io_timeout_data *io = req->async_data; - int ret; - ret = hrtimer_try_to_cancel(&io->timer); - if (ret != -1) { + if (hrtimer_try_to_cancel(&io->timer) != -1) { if (status) - req_set_fail_links(req); + req_set_fail(req); atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); list_del_init(&req->timeout.list); - io_cqring_fill_event(req, status); - io_put_req_deferred(req, 1); - } -} - -/* - * Returns true if we found and killed one or more timeouts - */ -static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, - struct files_struct *files) -{ - struct io_kiocb *req, *tmp; - int canceled = 0; - - spin_lock_irq(&ctx->completion_lock); - list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { - if (io_match_task(req, tsk, files)) { - io_kill_timeout(req, -ECANCELED); - canceled++; - } + io_fill_cqe_req(req, status, 0); + io_put_req_deferred(req); } - spin_unlock_irq(&ctx->completion_lock); - return canceled != 0; } -static void __io_queue_deferred(struct io_ring_ctx *ctx) +static void io_queue_deferred(struct io_ring_ctx *ctx) { - do { + while (!list_empty(&ctx->defer_list)) { struct io_defer_entry *de = list_first_entry(&ctx->defer_list, struct io_defer_entry, list); @@ -1558,19 +1527,16 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) list_del_init(&de->list); io_req_task_queue(de->req); kfree(de); - } while (!list_empty(&ctx->defer_list)); + } } static void io_flush_timeouts(struct io_ring_ctx *ctx) + __must_hold(&ctx->completion_lock) { + u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); struct io_kiocb *req, *tmp; - u32 seq; - - if (list_empty(&ctx->timeout_list)) - return; - - seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); + spin_lock_irq(&ctx->timeout_lock); list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { u32 events_needed, events_got; @@ -1591,441 +1557,568 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) io_kill_timeout(req, 0); } - ctx->cq_last_tm_flush = seq; + spin_unlock_irq(&ctx->timeout_lock); } -static void io_commit_cqring(struct io_ring_ctx *ctx) +static void __io_commit_cqring_flush(struct io_ring_ctx *ctx) { - io_flush_timeouts(ctx); - __io_commit_cqring(ctx); + if (ctx->off_timeout_used) + io_flush_timeouts(ctx); + if (ctx->drain_active) + io_queue_deferred(ctx); +} - if (unlikely(!list_empty(&ctx->defer_list))) - __io_queue_deferred(ctx); +static inline void io_commit_cqring(struct io_ring_ctx *ctx) +{ + if (unlikely(ctx->off_timeout_used || ctx->drain_active)) + __io_commit_cqring_flush(ctx); + /* order cqe stores with ring update */ + smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); } static inline bool io_sqring_full(struct io_ring_ctx *ctx) { struct io_rings *r = ctx->rings; - return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries; + return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; +} + +static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) +{ + return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); } -static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) +static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; - unsigned tail; + unsigned tail, mask = ctx->cq_entries - 1; - tail = ctx->cached_cq_tail; /* * writes to the cq entry need to come after reading head; the * control dependency is enough as we're using WRITE_ONCE to * fill the cq entry */ - if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) + if (__io_cqring_events(ctx) == ctx->cq_entries) return NULL; - ctx->cached_cq_tail++; - return &rings->cqes[tail & ctx->cq_mask]; + tail = ctx->cached_cq_tail++; + return &rings->cqes[tail & mask]; } static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) { - if (!ctx->cq_ev_fd) + if (likely(!ctx->cq_ev_fd)) return false; if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) return false; - if (!ctx->eventfd_async) - return true; - return io_wq_current_is_worker(); + return !ctx->eventfd_async || io_wq_current_is_worker(); } +/* + * This should only get called when at least one event has been posted. + * Some applications rely on the eventfd notification count only changing + * IFF a new CQE has been added to the CQ ring. There's no depedency on + * 1:1 relationship between how many times this function is called (and + * hence the eventfd count) and number of CQEs posted to the CQ ring. + */ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) { - if (wq_has_sleeper(&ctx->cq_wait)) { - wake_up_interruptible(&ctx->cq_wait); - kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); - } - if (waitqueue_active(&ctx->wait)) - wake_up(&ctx->wait); + /* + * wake_up_all() may seem excessive, but io_wake_function() and + * io_should_wake() handle the termination of the loop and only + * wake as many waiters as we need to. + */ + if (wq_has_sleeper(&ctx->cq_wait)) + __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait)) wake_up(&ctx->sq_data->wait); if (io_should_trigger_evfd(ctx)) - eventfd_signal(ctx->cq_ev_fd, 1); + eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE); + if (waitqueue_active(&ctx->poll_wait)) + __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } -static void io_cqring_mark_overflow(struct io_ring_ctx *ctx) +static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) { - if (list_empty(&ctx->cq_overflow_list)) { - clear_bit(0, &ctx->sq_check_overflow); - clear_bit(0, &ctx->cq_check_overflow); - ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; + /* see waitqueue_active() comment */ + smp_mb(); + + if (ctx->flags & IORING_SETUP_SQPOLL) { + if (waitqueue_active(&ctx->cq_wait)) + __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } + if (io_should_trigger_evfd(ctx)) + eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE); + if (waitqueue_active(&ctx->poll_wait)) + __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } /* Returns true if there are no backlogged entries after the flush */ -static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, - struct task_struct *tsk, - struct files_struct *files) +static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) { - struct io_rings *rings = ctx->rings; - struct io_kiocb *req, *tmp; - struct io_uring_cqe *cqe; - unsigned long flags; - LIST_HEAD(list); - - if (!force) { - if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) == - rings->cq_ring_entries)) - return false; - } + bool all_flushed, posted; - spin_lock_irqsave(&ctx->completion_lock, flags); + if (!force && __io_cqring_events(ctx) == ctx->cq_entries) + return false; - cqe = NULL; - list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) { - if (!io_match_task(req, tsk, files)) - continue; + posted = false; + spin_lock(&ctx->completion_lock); + while (!list_empty(&ctx->cq_overflow_list)) { + struct io_uring_cqe *cqe = io_get_cqe(ctx); + struct io_overflow_cqe *ocqe; - cqe = io_get_cqring(ctx); if (!cqe && !force) break; + ocqe = list_first_entry(&ctx->cq_overflow_list, + struct io_overflow_cqe, list); + if (cqe) + memcpy(cqe, &ocqe->cqe, sizeof(*cqe)); + else + io_account_cq_overflow(ctx); - list_move(&req->compl.list, &list); - if (cqe) { - WRITE_ONCE(cqe->user_data, req->user_data); - WRITE_ONCE(cqe->res, req->result); - WRITE_ONCE(cqe->flags, req->compl.cflags); - } else { - ctx->cached_cq_overflow++; - WRITE_ONCE(ctx->rings->cq_overflow, - ctx->cached_cq_overflow); - } + posted = true; + list_del(&ocqe->list); + kfree(ocqe); } - io_commit_cqring(ctx); - io_cqring_mark_overflow(ctx); - - spin_unlock_irqrestore(&ctx->completion_lock, flags); - io_cqring_ev_posted(ctx); - - while (!list_empty(&list)) { - req = list_first_entry(&list, struct io_kiocb, compl.list); - list_del(&req->compl.list); - io_put_req(req); + all_flushed = list_empty(&ctx->cq_overflow_list); + if (all_flushed) { + clear_bit(0, &ctx->check_cq_overflow); + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW); } - return cqe != NULL; + if (posted) + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + if (posted) + io_cqring_ev_posted(ctx); + return all_flushed; } -static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, - struct task_struct *tsk, - struct files_struct *files) +static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx) { - if (test_bit(0, &ctx->cq_check_overflow)) { + bool ret = true; + + if (test_bit(0, &ctx->check_cq_overflow)) { /* iopoll syncs against uring_lock, not completion_lock */ if (ctx->flags & IORING_SETUP_IOPOLL) mutex_lock(&ctx->uring_lock); - __io_cqring_overflow_flush(ctx, force, tsk, files); + ret = __io_cqring_overflow_flush(ctx, false); if (ctx->flags & IORING_SETUP_IOPOLL) mutex_unlock(&ctx->uring_lock); } + + return ret; } -static void __io_cqring_fill_event(struct io_kiocb *req, long res, - unsigned int cflags) +/* must to be called somewhat shortly after putting a request */ +static inline void io_put_task(struct task_struct *task, int nr) { - struct io_ring_ctx *ctx = req->ctx; - struct io_uring_cqe *cqe; + struct io_uring_task *tctx = task->io_uring; - trace_io_uring_complete(ctx, req->user_data, res); - - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqring(ctx); - if (likely(cqe)) { - WRITE_ONCE(cqe->user_data, req->user_data); - WRITE_ONCE(cqe->res, res); - WRITE_ONCE(cqe->flags, cflags); - } else if (ctx->cq_overflow_flushed || - atomic_read(&req->task->io_uring->in_idle)) { - /* - * If we're in ring overflow flush mode, or in task cancel mode, - * then we cannot store the request for later flushing, we need - * to drop it on the floor. - */ - ctx->cached_cq_overflow++; - WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow); + if (likely(task == current)) { + tctx->cached_refs += nr; } else { - if (list_empty(&ctx->cq_overflow_list)) { - set_bit(0, &ctx->sq_check_overflow); - set_bit(0, &ctx->cq_check_overflow); - ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; - } - io_clean_op(req); - req->result = res; - req->compl.cflags = cflags; - refcount_inc(&req->refs); - list_add_tail(&req->compl.list, &ctx->cq_overflow_list); + percpu_counter_sub(&tctx->inflight, nr); + if (unlikely(atomic_read(&tctx->in_idle))) + wake_up(&tctx->wait); + put_task_struct_many(task, nr); } } -static void io_cqring_fill_event(struct io_kiocb *req, long res) +static void io_task_refs_refill(struct io_uring_task *tctx) { - __io_cqring_fill_event(req, res, 0); + unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; + + percpu_counter_add(&tctx->inflight, refill); + refcount_add(refill, ¤t->usage); + tctx->cached_refs += refill; } -static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) +static inline void io_get_task_refs(int nr) { - struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; - - spin_lock_irqsave(&ctx->completion_lock, flags); - __io_cqring_fill_event(req, res, cflags); - io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); + struct io_uring_task *tctx = current->io_uring; - io_cqring_ev_posted(ctx); + tctx->cached_refs -= nr; + if (unlikely(tctx->cached_refs < 0)) + io_task_refs_refill(tctx); } -static void io_submit_flush_completions(struct io_comp_state *cs) +static __cold void io_uring_drop_tctx_refs(struct task_struct *task) { - struct io_ring_ctx *ctx = cs->ctx; + struct io_uring_task *tctx = task->io_uring; + unsigned int refs = tctx->cached_refs; - spin_lock_irq(&ctx->completion_lock); - while (!list_empty(&cs->list)) { - struct io_kiocb *req; + if (refs) { + tctx->cached_refs = 0; + percpu_counter_sub(&tctx->inflight, refs); + put_task_struct_many(task, refs); + } +} - req = list_first_entry(&cs->list, struct io_kiocb, compl.list); - list_del(&req->compl.list); - __io_cqring_fill_event(req, req->result, req->compl.cflags); +static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) +{ + struct io_overflow_cqe *ocqe; + ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT); + if (!ocqe) { /* - * io_free_req() doesn't care about completion_lock unless one - * of these flags is set. REQ_F_WORK_INITIALIZED is in the list - * because of a potential deadlock with req->work.fs->lock + * If we're in ring overflow flush mode, or in task cancel mode, + * or cannot allocate an overflow entry, then we need to drop it + * on the floor. */ - if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT - |REQ_F_WORK_INITIALIZED)) { - spin_unlock_irq(&ctx->completion_lock); - io_put_req(req); - spin_lock_irq(&ctx->completion_lock); - } else { - io_put_req(req); - } + io_account_cq_overflow(ctx); + return false; } - io_commit_cqring(ctx); - spin_unlock_irq(&ctx->completion_lock); + if (list_empty(&ctx->cq_overflow_list)) { + set_bit(0, &ctx->check_cq_overflow); + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW); - io_cqring_ev_posted(ctx); - cs->nr = 0; + } + ocqe->cqe.user_data = user_data; + ocqe->cqe.res = res; + ocqe->cqe.flags = cflags; + list_add_tail(&ocqe->list, &ctx->cq_overflow_list); + return true; } -static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, - struct io_comp_state *cs) +static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) { - if (!cs) { - io_cqring_add_event(req, res, cflags); - io_put_req(req); - } else { - io_clean_op(req); - req->result = res; - req->compl.cflags = cflags; - list_add_tail(&req->compl.list, &cs->list); - if (++cs->nr >= 32) - io_submit_flush_completions(cs); + struct io_uring_cqe *cqe; + + trace_io_uring_complete(ctx, user_data, res, cflags); + + /* + * If we can't get a cq entry, userspace overflowed the + * submission (by quite a lot). Increment the overflow count in + * the ring. + */ + cqe = io_get_cqe(ctx); + if (likely(cqe)) { + WRITE_ONCE(cqe->user_data, user_data); + WRITE_ONCE(cqe->res, res); + WRITE_ONCE(cqe->flags, cflags); + return true; } + return io_cqring_event_overflow(ctx, user_data, res, cflags); } -static void io_req_complete(struct io_kiocb *req, long res) +static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) { - __io_req_complete(req, res, 0, NULL); + __io_fill_cqe(req->ctx, req->user_data, res, cflags); } -static inline bool io_is_fallback_req(struct io_kiocb *req) +static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) { - return req == (struct io_kiocb *) - ((unsigned long) req->ctx->fallback_req & ~1UL); + ctx->cq_extra++; + return __io_fill_cqe(ctx, user_data, res, cflags); } -static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx) +static void io_req_complete_post(struct io_kiocb *req, s32 res, + u32 cflags) { - struct io_kiocb *req; - - req = ctx->fallback_req; - if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req)) - return req; - - return NULL; -} + struct io_ring_ctx *ctx = req->ctx; -static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx, - struct io_submit_state *state) + spin_lock(&ctx->completion_lock); + __io_fill_cqe(ctx, req->user_data, res, cflags); + /* + * If we're the last reference to this request, add to our locked + * free_list cache. + */ + if (req_ref_put_and_test(req)) { + if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { + if (req->flags & IO_DISARM_MASK) + io_disarm_next(req); + if (req->link) { + io_req_task_queue(req->link); + req->link = NULL; + } + } + io_dismantle_req(req); + io_put_task(req->task, 1); + list_add(&req->inflight_entry, &ctx->locked_free_list); + ctx->locked_free_nr++; + } else { + if (!percpu_ref_tryget(&ctx->refs)) + req = NULL; + } + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + + if (req) { + io_cqring_ev_posted(ctx); + percpu_ref_put(&ctx->refs); + } +} + +static inline bool io_req_needs_clean(struct io_kiocb *req) { - if (!state->free_reqs) { - gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; - size_t sz; - int ret; + return req->flags & IO_REQ_CLEAN_FLAGS; +} - sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs)); - ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs); +static inline void io_req_complete_state(struct io_kiocb *req, s32 res, + u32 cflags) +{ + if (io_req_needs_clean(req)) + io_clean_op(req); + req->result = res; + req->compl.cflags = cflags; + req->flags |= REQ_F_COMPLETE_INLINE; +} - /* - * Bulk alloc is all-or-nothing. If we fail to get a batch, - * retry single alloc to be on the safe side. - */ - if (unlikely(ret <= 0)) { - state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); - if (!state->reqs[0]) - goto fallback; - ret = 1; - } - state->free_reqs = ret; +static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, + s32 res, u32 cflags) +{ + if (issue_flags & IO_URING_F_COMPLETE_DEFER) + io_req_complete_state(req, res, cflags); + else + io_req_complete_post(req, res, cflags); +} + +static inline void io_req_complete(struct io_kiocb *req, s32 res) +{ + __io_req_complete(req, 0, res, 0); +} + +static void io_req_complete_failed(struct io_kiocb *req, s32 res) +{ + req_set_fail(req); + io_req_complete_post(req, res, 0); +} + +static void io_req_complete_fail_submit(struct io_kiocb *req) +{ + /* + * We don't submit, fail them all, for that replace hardlinks with + * normal links. Extra REQ_F_LINK is tolerated. + */ + req->flags &= ~REQ_F_HARDLINK; + req->flags |= REQ_F_LINK; + io_req_complete_failed(req, req->result); +} + +/* + * Don't initialise the fields below on every allocation, but do that in + * advance and keep them valid across allocations. + */ +static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) +{ + req->ctx = ctx; + req->link = NULL; + req->async_data = NULL; + /* not necessary, but safer to zero */ + req->result = 0; +} + +static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, + struct io_submit_state *state) +{ + spin_lock(&ctx->completion_lock); + list_splice_init(&ctx->locked_free_list, &state->free_list); + ctx->locked_free_nr = 0; + spin_unlock(&ctx->completion_lock); +} + +/* Returns true IFF there are requests in the cache */ +static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) +{ + struct io_submit_state *state = &ctx->submit_state; + int nr; + + /* + * If we have more than a batch's worth of requests in our IRQ side + * locked cache, grab the lock and move them over to our submission + * side cache. + */ + if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) + io_flush_cached_locked_reqs(ctx, state); + + nr = state->free_reqs; + while (!list_empty(&state->free_list)) { + struct io_kiocb *req = list_first_entry(&state->free_list, + struct io_kiocb, inflight_entry); + + list_del(&req->inflight_entry); + state->reqs[nr++] = req; + if (nr == ARRAY_SIZE(state->reqs)) + break; + } + + state->free_reqs = nr; + return nr != 0; +} + +/* + * A request might get retired back into the request caches even before opcode + * handlers and io_issue_sqe() are done with it, e.g. inline completion path. + * Because of that, io_alloc_req() should be called only under ->uring_lock + * and with extra caution to not get a request that is still worked on. + */ +static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + struct io_submit_state *state = &ctx->submit_state; + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + int ret, i; + + BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH); + + if (likely(state->free_reqs || io_flush_cached_reqs(ctx))) + goto got_req; + + ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH, + state->reqs); + + /* + * Bulk alloc is all-or-nothing. If we fail to get a batch, + * retry single alloc to be on the safe side. + */ + if (unlikely(ret <= 0)) { + state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); + if (!state->reqs[0]) + return NULL; + ret = 1; } + for (i = 0; i < ret; i++) + io_preinit_req(state->reqs[i], ctx); + state->free_reqs = ret; +got_req: state->free_reqs--; return state->reqs[state->free_reqs]; -fallback: - return io_get_fallback_req(ctx); } -static inline void io_put_file(struct io_kiocb *req, struct file *file, - bool fixed) +static inline void io_put_file(struct file *file) { - if (fixed) - percpu_ref_put(req->fixed_file_refs); - else + if (file) fput(file); } static void io_dismantle_req(struct io_kiocb *req) { - io_clean_op(req); + unsigned int flags = req->flags; - if (req->async_data) + if (io_req_needs_clean(req)) + io_clean_op(req); + if (!(flags & REQ_F_FIXED_FILE)) + io_put_file(req->file); + if (req->fixed_rsrc_refs) + percpu_ref_put(req->fixed_rsrc_refs); + if (req->async_data) { kfree(req->async_data); - if (req->file) - io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); - - io_req_clean_work(req); + req->async_data = NULL; + } } static void __io_free_req(struct io_kiocb *req) { - struct io_uring_task *tctx = req->task->io_uring; struct io_ring_ctx *ctx = req->ctx; io_dismantle_req(req); + io_put_task(req->task, 1); - percpu_counter_dec(&tctx->inflight); - if (atomic_read(&tctx->in_idle)) - wake_up(&tctx->wait); - put_task_struct(req->task); + spin_lock(&ctx->completion_lock); + list_add(&req->inflight_entry, &ctx->locked_free_list); + ctx->locked_free_nr++; + spin_unlock(&ctx->completion_lock); - if (likely(!io_is_fallback_req(req))) - kmem_cache_free(req_cachep, req); - else - clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req); percpu_ref_put(&ctx->refs); } -static void io_kill_linked_timeout(struct io_kiocb *req) +static inline void io_remove_next_linked(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *link; - bool cancelled = false; - unsigned long flags; - - spin_lock_irqsave(&ctx->completion_lock, flags); - link = list_first_entry_or_null(&req->link_list, struct io_kiocb, - link_list); - /* - * Can happen if a linked timeout fired and link had been like - * req -> link t-out -> link t-out [-> ...] - */ - if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) { - struct io_timeout_data *io = link->async_data; - int ret; - - list_del_init(&link->link_list); - ret = hrtimer_try_to_cancel(&io->timer); - if (ret != -1) { - io_cqring_fill_event(link, -ECANCELED); - io_commit_cqring(ctx); - cancelled = true; - } - } - req->flags &= ~REQ_F_LINK_TIMEOUT; - spin_unlock_irqrestore(&ctx->completion_lock, flags); + struct io_kiocb *nxt = req->link; - if (cancelled) { - io_cqring_ev_posted(ctx); - io_put_req(link); - } + req->link = nxt->link; + nxt->link = NULL; } -static struct io_kiocb *io_req_link_next(struct io_kiocb *req) +static bool io_kill_linked_timeout(struct io_kiocb *req) + __must_hold(&req->ctx->completion_lock) + __must_hold(&req->ctx->timeout_lock) { - struct io_kiocb *nxt; + struct io_kiocb *link = req->link; - /* - * The list should never be empty when we are called here. But could - * potentially happen if the chain is messed up, check to be on the - * safe side. - */ - if (unlikely(list_empty(&req->link_list))) - return NULL; + if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { + struct io_timeout_data *io = link->async_data; - nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list); - list_del_init(&req->link_list); - if (!list_empty(&nxt->link_list)) - nxt->flags |= REQ_F_LINK_HEAD; - return nxt; + io_remove_next_linked(req); + link->timeout.head = NULL; + if (hrtimer_try_to_cancel(&io->timer) != -1) { + list_del(&link->timeout.list); + io_fill_cqe_req(link, -ECANCELED, 0); + io_put_req_deferred(link); + return true; + } + } + return false; } -/* - * Called if REQ_F_LINK_HEAD is set, and we fail the head request - */ static void io_fail_links(struct io_kiocb *req) + __must_hold(&req->ctx->completion_lock) { - struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; + struct io_kiocb *nxt, *link = req->link; - spin_lock_irqsave(&ctx->completion_lock, flags); - while (!list_empty(&req->link_list)) { - struct io_kiocb *link = list_first_entry(&req->link_list, - struct io_kiocb, link_list); + req->link = NULL; + while (link) { + long res = -ECANCELED; - list_del_init(&link->link_list); - trace_io_uring_fail_link(req, link); + if (link->flags & REQ_F_FAIL) + res = link->result; - io_cqring_fill_event(link, -ECANCELED); + nxt = link->link; + link->link = NULL; - /* - * It's ok to free under spinlock as they're not linked anymore, - * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on - * work.fs->lock. - */ - if (link->flags & REQ_F_WORK_INITIALIZED) - io_put_req_deferred(link, 2); - else - io_double_put_req(link); + trace_io_uring_fail_link(req, link); + io_fill_cqe_req(link, res, 0); + io_put_req_deferred(link); + link = nxt; } +} - io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); +static bool io_disarm_next(struct io_kiocb *req) + __must_hold(&req->ctx->completion_lock) +{ + bool posted = false; - io_cqring_ev_posted(ctx); + if (req->flags & REQ_F_ARM_LTIMEOUT) { + struct io_kiocb *link = req->link; + + req->flags &= ~REQ_F_ARM_LTIMEOUT; + if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { + io_remove_next_linked(req); + io_fill_cqe_req(link, -ECANCELED, 0); + io_put_req_deferred(link); + posted = true; + } + } else if (req->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->timeout_lock); + posted = io_kill_linked_timeout(req); + spin_unlock_irq(&ctx->timeout_lock); + } + if (unlikely((req->flags & REQ_F_FAIL) && + !(req->flags & REQ_F_HARDLINK))) { + posted |= (req->link != NULL); + io_fail_links(req); + } + return posted; } static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) { - req->flags &= ~REQ_F_LINK_HEAD; - if (req->flags & REQ_F_LINK_TIMEOUT) - io_kill_linked_timeout(req); + struct io_kiocb *nxt; /* * If LINK is set, we have dependent requests in this chain. If we @@ -2033,28 +2126,112 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) * dependencies to the next request. In case of failure, fail the rest * of the chain. */ - if (likely(!(req->flags & REQ_F_FAIL_LINK))) - return io_req_link_next(req); - io_fail_links(req); - return NULL; + if (req->flags & IO_DISARM_MASK) { + struct io_ring_ctx *ctx = req->ctx; + bool posted; + + spin_lock(&ctx->completion_lock); + posted = io_disarm_next(req); + if (posted) + io_commit_cqring(req->ctx); + spin_unlock(&ctx->completion_lock); + if (posted) + io_cqring_ev_posted(ctx); + } + nxt = req->link; + req->link = NULL; + return nxt; } -static struct io_kiocb *io_req_find_next(struct io_kiocb *req) +static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) { - if (likely(!(req->flags & REQ_F_LINK_HEAD))) + if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) return NULL; return __io_req_find_next(req); } -static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) +static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) +{ + if (!ctx) + return; + if (*locked) { + if (ctx->submit_state.compl_nr) + io_submit_flush_completions(ctx); + mutex_unlock(&ctx->uring_lock); + *locked = false; + } + percpu_ref_put(&ctx->refs); +} + +static void tctx_task_work(struct callback_head *cb) +{ + bool locked = false; + struct io_ring_ctx *ctx = NULL; + struct io_uring_task *tctx = container_of(cb, struct io_uring_task, + task_work); + + while (1) { + struct io_wq_work_node *node; + + if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr) + io_submit_flush_completions(ctx); + + spin_lock_irq(&tctx->task_lock); + node = tctx->task_list.first; + INIT_WQ_LIST(&tctx->task_list); + if (!node) + tctx->task_running = false; + spin_unlock_irq(&tctx->task_lock); + if (!node) + break; + + do { + struct io_wq_work_node *next = node->next; + struct io_kiocb *req = container_of(node, struct io_kiocb, + io_task_work.node); + + if (req->ctx != ctx) { + ctx_flush_and_put(ctx, &locked); + ctx = req->ctx; + /* if not contended, grab and improve batching */ + locked = mutex_trylock(&ctx->uring_lock); + percpu_ref_get(&ctx->refs); + } + req->io_task_work.func(req, &locked); + node = next; + } while (node); + + cond_resched(); + } + + ctx_flush_and_put(ctx, &locked); + + /* relaxed read is enough as only the task itself sets ->in_idle */ + if (unlikely(atomic_read(&tctx->in_idle))) + io_uring_drop_tctx_refs(current); +} + +static void io_req_task_work_add(struct io_kiocb *req) { struct task_struct *tsk = req->task; - struct io_ring_ctx *ctx = req->ctx; + struct io_uring_task *tctx = tsk->io_uring; enum task_work_notify_mode notify; - int ret; + struct io_wq_work_node *node; + unsigned long flags; + bool running; + + WARN_ON_ONCE(!tctx); - if (tsk->flags & PF_EXITING) - return -ESRCH; + spin_lock_irqsave(&tctx->task_lock, flags); + wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); + running = tctx->task_running; + if (!running) + tctx->task_running = true; + spin_unlock_irqrestore(&tctx->task_lock, flags); + + /* task_work already pending, we're done */ + if (running) + return; /* * SQPOLL kernel thread doesn't need notification, just a wakeup. For @@ -2062,85 +2239,68 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) * processing task_work. There's no reliable way to tell if TWA_RESUME * will do the job. */ - notify = TWA_NONE; - if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) - notify = TWA_SIGNAL; - - ret = task_work_add(tsk, &req->task_work, notify); - if (!ret) + notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; + if (!task_work_add(tsk, &tctx->task_work, notify)) { wake_up_process(tsk); + return; + } - return ret; -} - -static void __io_req_task_cancel(struct io_kiocb *req, int error) -{ - struct io_ring_ctx *ctx = req->ctx; - - spin_lock_irq(&ctx->completion_lock); - io_cqring_fill_event(req, error); - io_commit_cqring(ctx); - spin_unlock_irq(&ctx->completion_lock); + spin_lock_irqsave(&tctx->task_lock, flags); + tctx->task_running = false; + node = tctx->task_list.first; + INIT_WQ_LIST(&tctx->task_list); + spin_unlock_irqrestore(&tctx->task_lock, flags); - io_cqring_ev_posted(ctx); - req_set_fail_links(req); - io_double_put_req(req); + while (node) { + req = container_of(node, struct io_kiocb, io_task_work.node); + node = node->next; + if (llist_add(&req->io_task_work.fallback_node, + &req->ctx->fallback_llist)) + schedule_delayed_work(&req->ctx->fallback_work, 1); + } } -static void io_req_task_cancel(struct callback_head *cb) +static void io_req_task_cancel(struct io_kiocb *req, bool *locked) { - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); struct io_ring_ctx *ctx = req->ctx; - mutex_lock(&ctx->uring_lock); - __io_req_task_cancel(req, -ECANCELED); - mutex_unlock(&ctx->uring_lock); - percpu_ref_put(&ctx->refs); + /* not needed for normal modes, but SQPOLL depends on it */ + io_tw_lock(ctx, locked); + io_req_complete_failed(req, req->result); } -static void __io_req_task_submit(struct io_kiocb *req) +static void io_req_task_submit(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; - mutex_lock(&ctx->uring_lock); - if (!ctx->sqo_dead && !__io_sq_thread_acquire_mm(ctx)) - __io_queue_sqe(req, NULL); + io_tw_lock(ctx, locked); + /* req->task == current here, checking PF_EXITING is safe */ + if (likely(!(req->task->flags & PF_EXITING))) + __io_queue_sqe(req); else - __io_req_task_cancel(req, -EFAULT); - mutex_unlock(&ctx->uring_lock); - - if (ctx->flags & IORING_SETUP_SQPOLL) - io_sq_thread_drop_mm(); + io_req_complete_failed(req, -EFAULT); } -static void io_req_task_submit(struct callback_head *cb) +static void io_req_task_queue_fail(struct io_kiocb *req, int ret) { - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_ring_ctx *ctx = req->ctx; - - __io_req_task_submit(req); - percpu_ref_put(&ctx->refs); + req->result = ret; + req->io_task_work.func = io_req_task_cancel; + io_req_task_work_add(req); } static void io_req_task_queue(struct io_kiocb *req) { - int ret; - - init_task_work(&req->task_work, io_req_task_submit); - percpu_ref_get(&req->ctx->refs); - - ret = io_req_task_work_add(req, true); - if (unlikely(ret)) { - struct task_struct *tsk; + req->io_task_work.func = io_req_task_submit; + io_req_task_work_add(req); +} - init_task_work(&req->task_work, io_req_task_cancel); - tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); - } +static void io_req_task_queue_reissue(struct io_kiocb *req) +{ + req->io_task_work.func = io_queue_async_work; + io_req_task_work_add(req); } -static void io_queue_next(struct io_kiocb *req) +static inline void io_queue_next(struct io_kiocb *req) { struct io_kiocb *nxt = io_req_find_next(req); @@ -2154,153 +2314,118 @@ static void io_free_req(struct io_kiocb *req) __io_free_req(req); } -struct req_batch { - void *reqs[IO_IOPOLL_BATCH]; - int to_free; +static void io_free_req_work(struct io_kiocb *req, bool *locked) +{ + io_free_req(req); +} +struct req_batch { struct task_struct *task; int task_refs; + int ctx_refs; }; static inline void io_init_req_batch(struct req_batch *rb) { - rb->to_free = 0; rb->task_refs = 0; + rb->ctx_refs = 0; rb->task = NULL; } -static void __io_req_free_batch_flush(struct io_ring_ctx *ctx, - struct req_batch *rb) +static void io_req_free_batch_finish(struct io_ring_ctx *ctx, + struct req_batch *rb) { - kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs); - percpu_ref_put_many(&ctx->refs, rb->to_free); - rb->to_free = 0; + if (rb->ctx_refs) + percpu_ref_put_many(&ctx->refs, rb->ctx_refs); + if (rb->task) + io_put_task(rb->task, rb->task_refs); } -static void io_req_free_batch_finish(struct io_ring_ctx *ctx, - struct req_batch *rb) +static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, + struct io_submit_state *state) { - if (rb->to_free) - __io_req_free_batch_flush(ctx, rb); - if (rb->task) { - struct io_uring_task *tctx = rb->task->io_uring; + io_queue_next(req); + io_dismantle_req(req); - percpu_counter_sub(&tctx->inflight, rb->task_refs); - if (atomic_read(&tctx->in_idle)) - wake_up(&tctx->wait); - put_task_struct_many(rb->task, rb->task_refs); - rb->task = NULL; + if (req->task != rb->task) { + if (rb->task) + io_put_task(rb->task, rb->task_refs); + rb->task = req->task; + rb->task_refs = 0; } + rb->task_refs++; + rb->ctx_refs++; + + if (state->free_reqs != ARRAY_SIZE(state->reqs)) + state->reqs[state->free_reqs++] = req; + else + list_add(&req->inflight_entry, &state->free_list); } -static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) +static void io_submit_flush_completions(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) { - if (unlikely(io_is_fallback_req(req))) { - io_free_req(req); - return; + struct io_submit_state *state = &ctx->submit_state; + int i, nr = state->compl_nr; + struct req_batch rb; + + spin_lock(&ctx->completion_lock); + for (i = 0; i < nr; i++) { + struct io_kiocb *req = state->compl_reqs[i]; + + __io_fill_cqe(ctx, req->user_data, req->result, + req->compl.cflags); } - if (req->flags & REQ_F_LINK_HEAD) - io_queue_next(req); + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + io_cqring_ev_posted(ctx); - if (req->task != rb->task) { - if (rb->task) { - struct io_uring_task *tctx = rb->task->io_uring; + io_init_req_batch(&rb); + for (i = 0; i < nr; i++) { + struct io_kiocb *req = state->compl_reqs[i]; - percpu_counter_sub(&tctx->inflight, rb->task_refs); - if (atomic_read(&tctx->in_idle)) - wake_up(&tctx->wait); - put_task_struct_many(rb->task, rb->task_refs); - } - rb->task = req->task; - rb->task_refs = 0; + if (req_ref_put_and_test(req)) + io_req_free_batch(&rb, req, &ctx->submit_state); } - rb->task_refs++; - io_dismantle_req(req); - rb->reqs[rb->to_free++] = req; - if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) - __io_req_free_batch_flush(req->ctx, rb); + io_req_free_batch_finish(ctx, &rb); + state->compl_nr = 0; } /* * Drop reference to request, return next in chain (if there is one) if this * was the last reference to this request. */ -static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) +static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) { struct io_kiocb *nxt = NULL; - if (refcount_dec_and_test(&req->refs)) { + if (req_ref_put_and_test(req)) { nxt = io_req_find_next(req); __io_free_req(req); } return nxt; } -static void io_put_req(struct io_kiocb *req) +static inline void io_put_req(struct io_kiocb *req) { - if (refcount_dec_and_test(&req->refs)) + if (req_ref_put_and_test(req)) io_free_req(req); } -static void io_put_req_deferred_cb(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - - io_free_req(req); -} - -static void io_free_req_deferred(struct io_kiocb *req) +static inline void io_put_req_deferred(struct io_kiocb *req) { - int ret; - - init_task_work(&req->task_work, io_put_req_deferred_cb); - ret = io_req_task_work_add(req, true); - if (unlikely(ret)) { - struct task_struct *tsk; - - tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); + if (req_ref_put_and_test(req)) { + req->io_task_work.func = io_free_req_work; + io_req_task_work_add(req); } } -static inline void io_put_req_deferred(struct io_kiocb *req, int refs) -{ - if (refcount_sub_and_test(refs, &req->refs)) - io_free_req_deferred(req); -} - -static struct io_wq_work *io_steal_work(struct io_kiocb *req) -{ - struct io_kiocb *nxt; - - /* - * A ref is owned by io-wq in which context we're. So, if that's the - * last one, it's safe to steal next work. False negatives are Ok, - * it just will be re-punted async in io_put_work() - */ - if (refcount_read(&req->refs) != 1) - return NULL; - - nxt = io_req_find_next(req); - return nxt ? &nxt->work : NULL; -} - -static void io_double_put_req(struct io_kiocb *req) -{ - /* drop both submit and complete references */ - if (refcount_sub_and_test(2, &req->refs)) - io_free_req(req); -} - static unsigned io_cqring_events(struct io_ring_ctx *ctx) { - struct io_rings *rings = ctx->rings; - /* See comment at the top of this file */ smp_rmb(); - return ctx->cached_cq_tail - READ_ONCE(rings->cq.head); + return __io_cqring_events(ctx); } static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) @@ -2326,38 +2451,23 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) { struct io_buffer *kbuf; + if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + return 0; kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; return io_put_kbuf(req, kbuf); } static inline bool io_run_task_work(void) { - /* - * Not safe to run on exiting task, and the task_work handling will - * not add work to such a task. - */ - if (unlikely(current->flags & PF_EXITING)) - return false; - if (current->task_works) { + if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) { __set_current_state(TASK_RUNNING); - task_work_run(); + tracehook_notify_signal(); return true; } return false; } -static void io_iopoll_queue(struct list_head *again) -{ - struct io_kiocb *req; - - do { - req = list_first_entry(again, struct io_kiocb, iopoll_entry); - list_del(&req->iopoll_entry); - __io_complete_rw(req, -EAGAIN, 0, NULL); - } while (!list_empty(again)); -} - /* * Find and free completed poll iocbs */ @@ -2366,41 +2476,39 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, { struct req_batch rb; struct io_kiocb *req; - LIST_HEAD(again); /* order with ->result store in io_complete_rw_iopoll() */ smp_rmb(); io_init_req_batch(&rb); while (!list_empty(done)) { - int cflags = 0; - - req = list_first_entry(done, struct io_kiocb, iopoll_entry); - if (READ_ONCE(req->result) == -EAGAIN) { - req->result = 0; - req->iopoll_completed = 0; - list_move_tail(&req->iopoll_entry, &again); - continue; - } - list_del(&req->iopoll_entry); + struct io_uring_cqe *cqe; + unsigned cflags; - if (req->flags & REQ_F_BUFFER_SELECTED) - cflags = io_put_rw_kbuf(req); - - __io_cqring_fill_event(req, req->result, cflags); + req = list_first_entry(done, struct io_kiocb, inflight_entry); + list_del(&req->inflight_entry); + cflags = io_put_rw_kbuf(req); (*nr_events)++; - if (refcount_dec_and_test(&req->refs)) - io_req_free_batch(&rb, req); + cqe = io_get_cqe(ctx); + if (cqe) { + WRITE_ONCE(cqe->user_data, req->user_data); + WRITE_ONCE(cqe->res, req->result); + WRITE_ONCE(cqe->flags, cflags); + } else { + spin_lock(&ctx->completion_lock); + io_cqring_event_overflow(ctx, req->user_data, + req->result, cflags); + spin_unlock(&ctx->completion_lock); + } + + if (req_ref_put_and_test(req)) + io_req_free_batch(&rb, req, &ctx->submit_state); } io_commit_cqring(ctx); - if (ctx->flags & IORING_SETUP_SQPOLL) - io_cqring_ev_posted(ctx); + io_cqring_ev_posted_iopoll(ctx); io_req_free_batch_finish(ctx, &rb); - - if (!list_empty(&again)) - io_iopoll_queue(&again); } static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, @@ -2409,17 +2517,16 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, struct io_kiocb *req, *tmp; LIST_HEAD(done); bool spin; - int ret; /* * Only spin for completions if we don't have multiple devices hanging * off our complete list, and we're under the requested amount. */ - spin = !ctx->poll_multi_file && *nr_events < min; + spin = !ctx->poll_multi_queue && *nr_events < min; - ret = 0; - list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, iopoll_entry) { + list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { struct kiocb *kiocb = &req->rw.kiocb; + int ret; /* * Move completed and retryable entries to our local lists. @@ -2427,50 +2534,27 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, * and complete those lists first, if we have entries there. */ if (READ_ONCE(req->iopoll_completed)) { - list_move_tail(&req->iopoll_entry, &done); + list_move_tail(&req->inflight_entry, &done); continue; } if (!list_empty(&done)) break; ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); - if (ret < 0) - break; + if (unlikely(ret < 0)) + return ret; + else if (ret) + spin = false; /* iopoll may have completed current req */ if (READ_ONCE(req->iopoll_completed)) - list_move_tail(&req->iopoll_entry, &done); - - if (ret && spin) - spin = false; - ret = 0; + list_move_tail(&req->inflight_entry, &done); } if (!list_empty(&done)) io_iopoll_complete(ctx, nr_events, &done); - return ret; -} - -/* - * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a - * non-spinning poll check - we'll still enter the driver poll loop, but only - * as a non-spinning completion check. - */ -static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, - long min) -{ - while (!list_empty(&ctx->iopoll_list) && !need_resched()) { - int ret; - - ret = io_do_iopoll(ctx, nr_events, min); - if (ret < 0) - return ret; - if (*nr_events >= min) - return 0; - } - - return 1; + return 0; } /* @@ -2508,7 +2592,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) static int io_iopoll_check(struct io_ring_ctx *ctx, long min) { unsigned int nr_events = 0; - int iters = 0, ret = 0; + int ret = 0; /* * We disallow the app entering submit/complete with polling, but we @@ -2516,17 +2600,16 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) * that got punted to a workqueue. */ mutex_lock(&ctx->uring_lock); + /* + * Don't enter poll loop if we already have events pending. + * If we do, we can potentially be spinning for commands that + * already triggered a CQE (eg in error). + */ + if (test_bit(0, &ctx->check_cq_overflow)) + __io_cqring_overflow_flush(ctx, false); + if (io_cqring_events(ctx)) + goto out; do { - /* - * Don't enter poll loop if we already have events pending. - * If we do, we can potentially be spinning for commands that - * already triggered a CQE (eg in error). - */ - if (test_bit(0, &ctx->cq_check_overflow)) - __io_cqring_overflow_flush(ctx, false, NULL, NULL); - if (io_cqring_events(ctx)) - break; - /* * If a submit got punted to a workqueue, we can have the * application entering polling for a command before it gets @@ -2537,18 +2620,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) * forever, while the workqueue is stuck trying to acquire the * very same mutex. */ - if (!(++iters & 7)) { + if (list_empty(&ctx->iopoll_list)) { + u32 tail = ctx->cached_cq_tail; + mutex_unlock(&ctx->uring_lock); io_run_task_work(); mutex_lock(&ctx->uring_lock); - } - - ret = io_iopoll_getevents(ctx, &nr_events, min); - if (ret <= 0) - break; - ret = 0; - } while (min && !nr_events && !need_resched()); + /* some requests don't go through iopoll_list */ + if (tail != ctx->cached_cq_tail || + list_empty(&ctx->iopoll_list)) + break; + } + ret = io_do_iopoll(ctx, &nr_events, min); + } while (!ret && nr_events < min && !need_resched()); +out: mutex_unlock(&ctx->uring_lock); return ret; } @@ -2560,79 +2646,142 @@ static void kiocb_end_write(struct io_kiocb *req) * thread. */ if (req->flags & REQ_F_ISREG) { - struct inode *inode = file_inode(req->file); + struct super_block *sb = file_inode(req->file)->i_sb; - __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); + __sb_writers_acquired(sb, SB_FREEZE_WRITE); + sb_end_write(sb); } - file_end_write(req->file); -} - -static void io_complete_rw_common(struct kiocb *kiocb, long res, - struct io_comp_state *cs) -{ - struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); - int cflags = 0; - - if (kiocb->ki_flags & IOCB_WRITE) - kiocb_end_write(req); - - if (res != req->result) - req_set_fail_links(req); - if (req->flags & REQ_F_BUFFER_SELECTED) - cflags = io_put_rw_kbuf(req); - __io_req_complete(req, res, cflags, cs); } #ifdef CONFIG_BLOCK -static bool io_resubmit_prep(struct io_kiocb *req, int error) +static bool io_resubmit_prep(struct io_kiocb *req) { - req_set_fail_links(req); - return false; + struct io_async_rw *rw = req->async_data; + + if (!rw) + return !io_req_prep_async(req); + iov_iter_restore(&rw->iter, &rw->iter_state); + return true; } -#endif -static bool io_rw_reissue(struct io_kiocb *req, long res) +static bool io_rw_should_reissue(struct io_kiocb *req) { -#ifdef CONFIG_BLOCK umode_t mode = file_inode(req->file)->i_mode; - int ret; + struct io_ring_ctx *ctx = req->ctx; if (!S_ISBLK(mode) && !S_ISREG(mode)) return false; - if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) + if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && + !(ctx->flags & IORING_SETUP_IOPOLL))) return false; /* * If ref is dying, we might be running poll reap from the exit work. * Don't attempt to reissue from that path, just let it fail with * -EAGAIN. */ - if (percpu_ref_is_dying(&req->ctx->refs)) + if (percpu_ref_is_dying(&ctx->refs)) + return false; + /* + * Play it safe and assume not safe to re-import and reissue if we're + * not in the original thread group (or in task context). + */ + if (!same_thread_group(req->task, current) || !in_task()) return false; + return true; +} +#else +static bool io_resubmit_prep(struct io_kiocb *req) +{ + return false; +} +static bool io_rw_should_reissue(struct io_kiocb *req) +{ + return false; +} +#endif - ret = io_sq_thread_acquire_mm(req->ctx, req); +/* + * Trigger the notifications after having done some IO, and finish the write + * accounting, if any. + */ +static void io_req_io_end(struct io_kiocb *req) +{ + struct io_rw *rw = &req->rw; - if (io_resubmit_prep(req, ret)) { - refcount_inc(&req->refs); - io_queue_async_work(req); - return true; + if (rw->kiocb.ki_flags & IOCB_WRITE) { + kiocb_end_write(req); + fsnotify_modify(req->file); + } else { + fsnotify_access(req->file); } +} -#endif +static bool __io_complete_rw_common(struct io_kiocb *req, long res) +{ + if (res != req->result) { + if ((res == -EAGAIN || res == -EOPNOTSUPP) && + io_rw_should_reissue(req)) { + /* + * Reissue will start accounting again, finish the + * current cycle. + */ + io_req_io_end(req); + req->flags |= REQ_F_REISSUE; + return true; + } + req_set_fail(req); + req->result = res; + } return false; } -static void __io_complete_rw(struct io_kiocb *req, long res, long res2, - struct io_comp_state *cs) +static inline int io_fixup_rw_res(struct io_kiocb *req, long res) +{ + struct io_async_rw *io = req->async_data; + + /* add previously done IO, if any */ + if (io && io->bytes_done > 0) { + if (res < 0) + res = io->bytes_done; + else + res += io->bytes_done; + } + return res; +} + +static void io_req_task_complete(struct io_kiocb *req, bool *locked) +{ + unsigned int cflags = io_put_rw_kbuf(req); + int res = req->result; + + if (*locked) { + struct io_ring_ctx *ctx = req->ctx; + struct io_submit_state *state = &ctx->submit_state; + + io_req_complete_state(req, res, cflags); + state->compl_reqs[state->compl_nr++] = req; + if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) + io_submit_flush_completions(ctx); + } else { + io_req_complete_post(req, res, cflags); + } +} + +static void io_req_rw_complete(struct io_kiocb *req, bool *locked) { - if (!io_rw_reissue(req, res)) - io_complete_rw_common(&req->rw.kiocb, res, cs); + io_req_io_end(req); + io_req_task_complete(req, locked); } static void io_complete_rw(struct kiocb *kiocb, long res, long res2) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); - __io_complete_rw(req, res, res2, NULL); + if (__io_complete_rw_common(req, res)) + return; + req->result = io_fixup_rw_res(req, res); + req->io_task_work.func = io_req_rw_complete; + io_req_task_work_add(req); } static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) @@ -2641,12 +2790,15 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) if (kiocb->ki_flags & IOCB_WRITE) kiocb_end_write(req); - - if (res != -EAGAIN && res != req->result) - req_set_fail_links(req); + if (unlikely(res != req->result)) { + if (res == -EAGAIN && io_rw_should_reissue(req)) { + req->flags |= REQ_F_REISSUE; + return; + } + } WRITE_ONCE(req->result, res); - /* order with io_poll_complete() checking ->result */ + /* order with io_iopoll_complete() checking ->result */ smp_wmb(); WRITE_ONCE(req->iopoll_completed, 1); } @@ -2654,12 +2806,17 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) /* * After the iocb has been issued, it's safe to be found on the poll list. * Adding the kiocb to the list AFTER submission ensures that we don't - * find it from a io_iopoll_getevents() thread before the issuer is done + * find it from a io_do_iopoll() thread before the issuer is done * accessing the kiocb cookie. */ static void io_iopoll_req_issued(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; + const bool in_async = io_wq_current_is_worker(); + + /* workqueue context doesn't hold uring_lock, grab it now */ + if (unlikely(in_async)) + mutex_lock(&ctx->uring_lock); /* * Track whether we have multiple files in our lists. This will impact @@ -2667,14 +2824,22 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * different devices. */ if (list_empty(&ctx->iopoll_list)) { - ctx->poll_multi_file = false; - } else if (!ctx->poll_multi_file) { + ctx->poll_multi_queue = false; + } else if (!ctx->poll_multi_queue) { struct io_kiocb *list_req; + unsigned int queue_num0, queue_num1; list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, - iopoll_entry); - if (list_req->file != req->file) - ctx->poll_multi_file = true; + inflight_entry); + + if (list_req->file != req->file) { + ctx->poll_multi_queue = true; + } else { + queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie); + queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie); + if (queue_num0 != queue_num1) + ctx->poll_multi_queue = true; + } } /* @@ -2682,61 +2847,28 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * it to the front so we find it first. */ if (READ_ONCE(req->iopoll_completed)) - list_add(&req->iopoll_entry, &ctx->iopoll_list); + list_add(&req->inflight_entry, &ctx->iopoll_list); else - list_add_tail(&req->iopoll_entry, &ctx->iopoll_list); - - if ((ctx->flags & IORING_SETUP_SQPOLL) && - wq_has_sleeper(&ctx->sq_data->wait)) - wake_up(&ctx->sq_data->wait); -} + list_add_tail(&req->inflight_entry, &ctx->iopoll_list); -static void __io_state_file_put(struct io_submit_state *state) -{ - if (state->has_refs) - fput_many(state->file, state->has_refs); - state->file = NULL; -} - -static inline void io_state_file_put(struct io_submit_state *state) -{ - if (state->file) - __io_state_file_put(state); -} - -/* - * Get as many references to a file as we have IOs left in this submission, - * assuming most submissions are for one file, or at least that each file - * has more than one submission. - */ -static struct file *__io_file_get(struct io_submit_state *state, int fd) -{ - if (!state) - return fget(fd); + if (unlikely(in_async)) { + /* + * If IORING_SETUP_SQPOLL is enabled, sqes are either handle + * in sq thread task context or in io worker task context. If + * current task context is sq thread, we don't need to check + * whether should wake up sq thread. + */ + if ((ctx->flags & IORING_SETUP_SQPOLL) && + wq_has_sleeper(&ctx->sq_data->wait)) + wake_up(&ctx->sq_data->wait); - if (state->file) { - if (state->fd == fd) { - state->has_refs--; - return state->file; - } - __io_state_file_put(state); + mutex_unlock(&ctx->uring_lock); } - state->file = fget_many(fd, state->ios_left); - if (!state->file) - return NULL; - - state->fd = fd; - state->has_refs = state->ios_left - 1; - return state->file; } static bool io_bdev_nowait(struct block_device *bdev) { -#ifdef CONFIG_BLOCK return !bdev || blk_queue_nowait(bdev_get_queue(bdev)); -#else - return true; -#endif } /* @@ -2744,19 +2876,21 @@ static bool io_bdev_nowait(struct block_device *bdev) * any file. For now, just ensure that anything potentially problematic is done * inline. */ -static bool io_file_supports_async(struct file *file, int rw) +static bool __io_file_supports_nowait(struct file *file, int rw) { umode_t mode = file_inode(file)->i_mode; if (S_ISBLK(mode)) { - if (io_bdev_nowait(file->f_inode->i_bdev)) + if (IS_ENABLED(CONFIG_BLOCK) && + io_bdev_nowait(I_BDEV(file->f_mapping->host))) return true; return false; } if (S_ISSOCK(mode)) return true; if (S_ISREG(mode)) { - if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) && + if (IS_ENABLED(CONFIG_BLOCK) && + io_bdev_nowait(file->f_inode->i_sb->s_bdev) && file->f_op != &io_uring_fops) return true; return false; @@ -2775,27 +2909,44 @@ static bool io_file_supports_async(struct file *file, int rw) return file->f_op->write_iter != NULL; } -static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static bool io_file_supports_nowait(struct io_kiocb *req, int rw) +{ + if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) + return true; + else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) + return true; + + return __io_file_supports_nowait(req->file, rw); +} + +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, + int rw) { struct io_ring_ctx *ctx = req->ctx; struct kiocb *kiocb = &req->rw.kiocb; + struct file *file = req->file; unsigned ioprio; int ret; - if (S_ISREG(file_inode(req->file)->i_mode)) + if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode)) req->flags |= REQ_F_ISREG; kiocb->ki_pos = READ_ONCE(sqe->off); - if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) { - req->flags |= REQ_F_CUR_POS; - kiocb->ki_pos = req->file->f_pos; - } kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); kiocb->ki_flags = iocb_flags(kiocb->ki_filp); ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); if (unlikely(ret)) return ret; + /* + * If the file is marked O_NONBLOCK, still allow retry for it if it + * supports async. Otherwise it's impossible to use O_NONBLOCK files + * reliably. If not, or it IOCB_NOWAIT is set, don't retry. + */ + if ((kiocb->ki_flags & IOCB_NOWAIT) || + ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) + req->flags |= REQ_F_NOWAIT; + ioprio = READ_ONCE(sqe->ioprio); if (ioprio) { ret = ioprio_check_cap(ioprio); @@ -2806,10 +2957,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) } else kiocb->ki_ioprio = get_current_ioprio(); - /* don't allow async punt if RWF_NOWAIT was requested */ - if (kiocb->ki_flags & IOCB_NOWAIT) - req->flags |= REQ_F_NOWAIT; - if (ctx->flags & IORING_SETUP_IOPOLL) { if (!(kiocb->ki_flags & IOCB_DIRECT) || !kiocb->ki_filp->f_op->iopoll) @@ -2824,9 +2971,24 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) kiocb->ki_complete = io_complete_rw; } + /* used for fixed read/write too - just read unconditionally */ + req->buf_index = READ_ONCE(sqe->buf_index); + req->imu = NULL; + + if (req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED) { + struct io_ring_ctx *ctx = req->ctx; + u16 index; + + if (unlikely(req->buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); + req->imu = ctx->user_bufs[index]; + io_req_set_rsrc_node(req); + } + req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); - req->buf_index = READ_ONCE(sqe->buf_index); return 0; } @@ -2851,49 +3013,77 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) } } -static void kiocb_done(struct kiocb *kiocb, ssize_t ret, - struct io_comp_state *cs) +static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) { - struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); - struct io_async_rw *io = req->async_data; + struct kiocb *kiocb = &req->rw.kiocb; - /* add previously done IO, if any */ - if (io && io->bytes_done > 0) { - if (ret < 0) - ret = io->bytes_done; - else - ret += io->bytes_done; + if (kiocb->ki_pos != -1) + return &kiocb->ki_pos; + + if (!(req->file->f_mode & FMODE_STREAM)) { + req->flags |= REQ_F_CUR_POS; + kiocb->ki_pos = req->file->f_pos; + return &kiocb->ki_pos; } + kiocb->ki_pos = 0; + return NULL; +} + +static void kiocb_done(struct kiocb *kiocb, ssize_t ret, + unsigned int issue_flags) +{ + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); + if (req->flags & REQ_F_CUR_POS) req->file->f_pos = kiocb->ki_pos; - if (ret >= 0 && kiocb->ki_complete == io_complete_rw) - __io_complete_rw(req, ret, 0, cs); - else + if (ret >= 0 && (kiocb->ki_complete == io_complete_rw)) { + if (!__io_complete_rw_common(req, ret)) { + /* + * Safe to call io_end from here as we're inline + * from the submission path. + */ + io_req_io_end(req); + __io_req_complete(req, issue_flags, + io_fixup_rw_res(req, ret), + io_put_rw_kbuf(req)); + } + } else { io_rw_done(kiocb, ret); + } + + if (req->flags & REQ_F_REISSUE) { + req->flags &= ~REQ_F_REISSUE; + if (io_resubmit_prep(req)) { + io_req_task_queue_reissue(req); + } else { + unsigned int cflags = io_put_rw_kbuf(req); + struct io_ring_ctx *ctx = req->ctx; + + ret = io_fixup_rw_res(req, ret); + req_set_fail(req); + if (!(issue_flags & IO_URING_F_NONBLOCK)) { + mutex_lock(&ctx->uring_lock); + __io_req_complete(req, issue_flags, ret, cflags); + mutex_unlock(&ctx->uring_lock); + } else { + __io_req_complete(req, issue_flags, ret, cflags); + } + } + } } -static ssize_t io_import_fixed(struct io_kiocb *req, int rw, - struct iov_iter *iter) +static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, + struct io_mapped_ubuf *imu) { - struct io_ring_ctx *ctx = req->ctx; size_t len = req->rw.len; - struct io_mapped_ubuf *imu; - u16 index, buf_index = req->buf_index; + u64 buf_end, buf_addr = req->rw.addr; size_t offset; - u64 buf_addr; - if (unlikely(buf_index >= ctx->nr_user_bufs)) - return -EFAULT; - index = array_index_nospec(buf_index, ctx->nr_user_bufs); - imu = &ctx->user_bufs[index]; - buf_addr = req->rw.addr; - - /* overflow */ - if (buf_addr + len < buf_addr) + if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end))) return -EFAULT; /* not inside the mapped region */ - if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len) + if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end)) return -EFAULT; /* @@ -2938,7 +3128,14 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw, } } - return len; + return 0; +} + +static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) +{ + if (WARN_ON_ONCE(!req->imu)) + return -EFAULT; + return __io_import_fixed(req, rw, iter, req->imu); } static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock) @@ -3079,16 +3276,14 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, needs_lock); } -static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct iov_iter *iter, - bool needs_lock) +static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, + struct iov_iter *iter, bool needs_lock) { void __user *buf = u64_to_user_ptr(req->rw.addr); size_t sqe_len = req->rw.len; + u8 opcode = req->opcode; ssize_t ret; - u8 opcode; - opcode = req->opcode; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { *iovec = NULL; return io_import_fixed(req, rw, iter); @@ -3113,10 +3308,8 @@ static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, if (req->flags & REQ_F_BUFFER_SELECT) { ret = io_iov_buffer_select(req, *iovec, needs_lock); - if (!ret) { - ret = (*iovec)->iov_len; - iov_iter_init(iter, rw, *iovec, 1, ret); - } + if (!ret) + iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len); *iovec = NULL; return ret; } @@ -3125,18 +3318,6 @@ static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, req->ctx->compat); } -static ssize_t io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct iov_iter *iter, - bool needs_lock) -{ - struct io_async_rw *iorw = req->async_data; - - if (!iorw) - return __io_import_iovec(rw, req, iovec, iter, needs_lock); - *iovec = NULL; - return 0; -} - static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) { return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; @@ -3151,6 +3332,7 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) struct kiocb *kiocb = &req->rw.kiocb; struct file *file = req->file; ssize_t ret = 0; + loff_t *ppos; /* * Don't support polled IO through this interface, and we can't @@ -3162,6 +3344,8 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) if (kiocb->ki_flags & IOCB_NOWAIT) return -EAGAIN; + ppos = io_kiocb_ppos(kiocb); + while (iov_iter_count(iter)) { struct iovec iovec; ssize_t nr; @@ -3175,10 +3359,10 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) if (rw == READ) { nr = file->f_op->read(file, iovec.iov_base, - iovec.iov_len, io_kiocb_ppos(kiocb)); + iovec.iov_len, ppos); } else { nr = file->f_op->write(file, iovec.iov_base, - iovec.iov_len, io_kiocb_ppos(kiocb)); + iovec.iov_len, ppos); } if (nr < 0) { @@ -3229,32 +3413,31 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, } } -static inline int __io_alloc_async_data(struct io_kiocb *req) +static inline int io_alloc_async_data(struct io_kiocb *req) { WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); return req->async_data == NULL; } -static int io_alloc_async_data(struct io_kiocb *req) -{ - if (!io_op_defs[req->opcode].needs_async_data) - return 0; - - return __io_alloc_async_data(req); -} - static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, const struct iovec *fast_iov, struct iov_iter *iter, bool force) { - if (!force && !io_op_defs[req->opcode].needs_async_data) + if (!force && !io_op_defs[req->opcode].needs_async_setup) return 0; if (!req->async_data) { - if (__io_alloc_async_data(req)) + struct io_async_rw *iorw; + + if (io_alloc_async_data(req)) { + kfree(iovec); return -ENOMEM; + } io_req_map_rw(req, iovec, fast_iov, iter); + iorw = req->async_data; + /* we've copied and mapped the iter, ensure state is saved */ + iov_iter_save_state(&iorw->iter, &iorw->iter_state); } return 0; } @@ -3263,9 +3446,9 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) { struct io_async_rw *iorw = req->async_data; struct iovec *iov = iorw->fast_iov; - ssize_t ret; + int ret; - ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false); + ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); if (unlikely(ret < 0)) return ret; @@ -3273,24 +3456,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) iorw->free_iovec = iov; if (iov) req->flags |= REQ_F_NEED_CLEANUP; + iov_iter_save_state(&iorw->iter, &iorw->iter_state); return 0; } static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - ssize_t ret; - - ret = io_prep_rw(req, sqe); - if (ret) - return ret; - if (unlikely(!(req->file->f_mode & FMODE_READ))) return -EBADF; - - /* either don't need iovec imported or already have it */ - if (!req->async_data) - return 0; - return io_rw_prep_async(req, READ); + return io_prep_rw(req, sqe, READ); } /* @@ -3309,7 +3483,6 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, struct wait_page_queue *wpq; struct io_kiocb *req = wait->private; struct wait_page_key *key = arg; - int ret; wpq = container_of(wait, struct wait_page_queue, wait); @@ -3318,22 +3491,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; list_del_init(&wait->entry); - - init_task_work(&req->task_work, io_req_task_submit); - percpu_ref_get(&req->ctx->refs); - - /* submit ref gets dropped, acquire a new one */ - refcount_inc(&req->refs); - ret = io_req_task_work_add(req, true); - if (unlikely(ret)) { - struct task_struct *tsk; - - /* queue just for cancelation */ - init_task_work(&req->task_work, io_req_task_cancel); - tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); - } + io_req_task_queue(req); return 1; } @@ -3380,7 +3538,7 @@ static bool io_rw_should_retry(struct io_kiocb *req) return true; } -static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) +static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) { if (req->file->f_op->read_iter) return call_read_iter(req->file, &req->rw.kiocb, iter); @@ -3390,27 +3548,41 @@ static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) return -EINVAL; } -static int io_read(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static bool need_read_all(struct io_kiocb *req) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; - struct kiocb *kiocb = &req->rw.kiocb; + return req->flags & REQ_F_ISREG || + S_ISBLK(file_inode(req->file)->i_mode); +} + +static int io_read(struct io_kiocb *req, unsigned int issue_flags) +{ + struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; + struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; - struct iov_iter iter_cp; struct io_async_rw *rw = req->async_data; - ssize_t io_size, ret, ret2; - bool no_async; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + struct iov_iter_state __state, *state; + ssize_t ret, ret2; + loff_t *ppos; - if (rw) + if (rw) { iter = &rw->iter; - - ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); - if (ret < 0) - return ret; - iter_cp = *iter; - io_size = iov_iter_count(iter); - req->result = io_size; - ret = 0; + state = &rw->iter_state; + /* + * We come here from an earlier attempt, restore our state to + * match in case it doesn't. It's cheap enough that we don't + * need to make this conditional. + */ + iov_iter_restore(iter, state); + iovec = NULL; + } else { + ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); + if (ret < 0) + return ret; + state = &__state; + iov_iter_save_state(iter, state); + } + req->result = iov_iter_count(iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3418,127 +3590,133 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, else kiocb->ki_flags |= IOCB_NOWAIT; - /* If the file doesn't support async, just async punt */ - no_async = force_nonblock && !io_file_supports_async(req->file, READ); - if (no_async) - goto copy_iov; + if (force_nonblock && !io_file_supports_nowait(req, READ)) { + ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true); + return ret ?: -EAGAIN; + } - ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size); - if (unlikely(ret)) - goto out_free; + ppos = io_kiocb_update_pos(req); + + ret = rw_verify_area(READ, req->file, ppos, req->result); + if (unlikely(ret)) { + kfree(iovec); + return ret; + } ret = io_iter_do_read(req, iter); - if (!ret) { - goto done; - } else if (ret == -EIOCBQUEUED) { - ret = 0; - goto out_free; - } else if (ret == -EAGAIN) { + if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { + req->flags &= ~REQ_F_REISSUE; /* IOPOLL retry should happen for io-wq threads */ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) goto done; - /* no retry on NONBLOCK marked file */ - if (req->file->f_flags & O_NONBLOCK) + /* no retry on NONBLOCK nor RWF_NOWAIT */ + if (req->flags & REQ_F_NOWAIT) goto done; - /* some cases will consume bytes even on error returns */ - *iter = iter_cp; ret = 0; - goto copy_iov; - } else if (ret < 0) { - /* make sure -ERESTARTSYS -> -EINTR is done */ + } else if (ret == -EIOCBQUEUED) { + goto out_free; + } else if (ret <= 0 || ret == req->result || !force_nonblock || + (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { + /* read all, failed, already did sync or don't want to retry */ goto done; } - /* read it all, or we did blocking attempt. no retry. */ - if (!iov_iter_count(iter) || !force_nonblock || - (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG)) - goto done; + /* + * Don't depend on the iter state matching what was consumed, or being + * untouched in case of error. Restore it and we'll advance it + * manually if we need to. + */ + iov_iter_restore(iter, state); - io_size -= ret; -copy_iov: ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); - if (ret2) { - ret = ret2; - goto out_free; - } - if (no_async) - return -EAGAIN; - rw = req->async_data; - /* it's copied and will be cleaned with ->io */ - iovec = NULL; - /* now use our persistent iterator, if we aren't already */ - iter = &rw->iter; -retry: - rw->bytes_done += ret; - /* if we can retry, do so with the callbacks armed */ - if (!io_rw_should_retry(req)) { - kiocb->ki_flags &= ~IOCB_WAITQ; - return -EAGAIN; - } + if (ret2) + return ret2; + iovec = NULL; + rw = req->async_data; /* - * Now retry read with the IOCB_WAITQ parts set in the iocb. If we - * get -EIOCBQUEUED, then we'll get a notification when the desired - * page gets unlocked. We can also get a partial read here, and if we - * do, then just retry at the new offset. + * Now use our persistent iterator and state, if we aren't already. + * We've restored and mapped the iter to match. */ - ret = io_iter_do_read(req, iter); - if (ret == -EIOCBQUEUED) { - ret = 0; - goto out_free; - } else if (ret > 0 && ret < io_size) { + if (iter != &rw->iter) { + iter = &rw->iter; + state = &rw->iter_state; + } + + do { + /* + * We end up here because of a partial read, either from + * above or inside this loop. Advance the iter by the bytes + * that were consumed. + */ + iov_iter_advance(iter, ret); + if (!iov_iter_count(iter)) + break; + rw->bytes_done += ret; + iov_iter_save_state(iter, state); + + /* if we can retry, do so with the callbacks armed */ + if (!io_rw_should_retry(req)) { + kiocb->ki_flags &= ~IOCB_WAITQ; + return -EAGAIN; + } + + req->result = iov_iter_count(iter); + /* + * Now retry read with the IOCB_WAITQ parts set in the iocb. If + * we get -EIOCBQUEUED, then we'll get a notification when the + * desired page gets unlocked. We can also get a partial read + * here, and if we do, then just retry at the new offset. + */ + ret = io_iter_do_read(req, iter); + if (ret == -EIOCBQUEUED) + return 0; /* we got some bytes, but not all. retry. */ kiocb->ki_flags &= ~IOCB_WAITQ; - goto retry; - } + iov_iter_restore(iter, state); + } while (ret > 0); done: - kiocb_done(kiocb, ret, cs); - ret = 0; + kiocb_done(kiocb, ret, issue_flags); out_free: - /* it's reportedly faster than delegating the null check to kfree() */ + /* it's faster to check here then delegate to kfree */ if (iovec) kfree(iovec); - return ret; + return 0; } static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - ssize_t ret; - - ret = io_prep_rw(req, sqe); - if (ret) - return ret; - if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; - - /* either don't need iovec imported or already have it */ - if (!req->async_data) - return 0; - return io_rw_prep_async(req, WRITE); + return io_prep_rw(req, sqe, WRITE); } -static int io_write(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_write(struct io_kiocb *req, unsigned int issue_flags) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; - struct iov_iter iter_cp; struct io_async_rw *rw = req->async_data; - ssize_t ret, ret2, io_size; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + struct iov_iter_state __state, *state; + ssize_t ret, ret2; + loff_t *ppos; - if (rw) + if (rw) { iter = &rw->iter; - - ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); - if (ret < 0) - return ret; - iter_cp = *iter; - io_size = iov_iter_count(iter); - req->result = io_size; + state = &rw->iter_state; + iov_iter_restore(iter, state); + iovec = NULL; + } else { + ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); + if (ret < 0) + return ret; + state = &__state; + iov_iter_save_state(iter, state); + } + req->result = iov_iter_count(iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3547,7 +3725,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, kiocb->ki_flags |= IOCB_NOWAIT; /* If the file doesn't support async, just async punt */ - if (force_nonblock && !io_file_supports_async(req->file, WRITE)) + if (force_nonblock && !io_file_supports_nowait(req, WRITE)) goto copy_iov; /* file path doesn't support NOWAIT for non-direct_IO */ @@ -3555,7 +3733,9 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, (req->flags & REQ_F_ISREG)) goto copy_iov; - ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size); + ppos = io_kiocb_update_pos(req); + + ret = rw_verify_area(WRITE, req->file, ppos, req->result); if (unlikely(ret)) goto out_free; @@ -3580,28 +3760,36 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, else ret2 = -EINVAL; + if (req->flags & REQ_F_REISSUE) { + req->flags &= ~REQ_F_REISSUE; + ret2 = -EAGAIN; + } + /* * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just * retry them without IOCB_NOWAIT. */ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) ret2 = -EAGAIN; - /* no retry on NONBLOCK marked file */ - if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) + /* no retry on NONBLOCK nor RWF_NOWAIT */ + if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) goto done; if (!force_nonblock || ret2 != -EAGAIN) { /* IOPOLL retry should happen for io-wq threads */ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) goto copy_iov; done: - kiocb_done(kiocb, ret2, cs); + kiocb_done(kiocb, ret2, issue_flags); } else { copy_iov: - /* some cases will consume bytes even on error returns */ - *iter = iter_cp; + iov_iter_restore(iter, state); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); - if (!ret) + if (!ret) { + if (kiocb->ki_flags & IOCB_WRITE) + kiocb_end_write(req); return -EAGAIN; + } + return ret; } out_free: /* it's reportedly faster than delegating the null check to kfree() */ @@ -3610,37 +3798,160 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, return ret; } -static int __io_splice_prep(struct io_kiocb *req, +static int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - struct io_splice* sp = &req->splice; - unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL; + struct io_rename *ren = &req->rename; + const char __user *oldf, *newf; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; + if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) + return -EINVAL; + if (unlikely(req->flags & REQ_F_FIXED_FILE)) + return -EBADF; - sp->file_in = NULL; - sp->len = READ_ONCE(sqe->len); - sp->flags = READ_ONCE(sqe->splice_flags); + ren->old_dfd = READ_ONCE(sqe->fd); + oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); + newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + ren->new_dfd = READ_ONCE(sqe->len); + ren->flags = READ_ONCE(sqe->rename_flags); - if (unlikely(sp->flags & ~valid_flags)) - return -EINVAL; + ren->oldpath = getname(oldf); + if (IS_ERR(ren->oldpath)) + return PTR_ERR(ren->oldpath); - sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), - (sp->flags & SPLICE_F_FD_IN_FIXED)); - if (!sp->file_in) + ren->newpath = getname(newf); + if (IS_ERR(ren->newpath)) { + putname(ren->oldpath); + return PTR_ERR(ren->newpath); + } + + req->flags |= REQ_F_NEED_CLEANUP; + return 0; +} + +static int io_renameat(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_rename *ren = &req->rename; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd, + ren->newpath, ren->flags); + + req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret < 0) + req_set_fail(req); + io_req_complete(req, ret); + return 0; +} + +static int io_unlinkat_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_unlink *un = &req->unlink; + const char __user *fname; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || + sqe->splice_fd_in) + return -EINVAL; + if (unlikely(req->flags & REQ_F_FIXED_FILE)) return -EBADF; + + un->dfd = READ_ONCE(sqe->fd); + + un->flags = READ_ONCE(sqe->unlink_flags); + if (un->flags & ~AT_REMOVEDIR) + return -EINVAL; + + fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); + un->filename = getname(fname); + if (IS_ERR(un->filename)) + return PTR_ERR(un->filename); + req->flags |= REQ_F_NEED_CLEANUP; + return 0; +} - if (!S_ISREG(file_inode(sp->file_in)->i_mode)) { - /* - * Splice operation will be punted aync, and here need to - * modify io_wq_work.flags, so initialize io_wq_work firstly. - */ - io_req_init_async(req); - req->work.flags |= IO_WQ_WORK_UNBOUND; - } +static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_unlink *un = &req->unlink; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + if (un->flags & AT_REMOVEDIR) + ret = do_rmdir(un->dfd, un->filename); + else + ret = do_unlinkat(un->dfd, un->filename); + + req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret < 0) + req_set_fail(req); + io_req_complete(req, ret); + return 0; +} + +static int io_shutdown_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ +#if defined(CONFIG_NET) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags || + sqe->buf_index || sqe->splice_fd_in)) + return -EINVAL; + + req->shutdown.how = READ_ONCE(sqe->len); + return 0; +#else + return -EOPNOTSUPP; +#endif +} + +static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) +{ +#if defined(CONFIG_NET) + struct socket *sock; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + sock = sock_from_file(req->file, &ret); + if (unlikely(!sock)) + return ret; + + ret = __sys_shutdown_sock(sock, req->shutdown.how); + if (ret < 0) + req_set_fail(req); + io_req_complete(req, ret); + return 0; +#else + return -EOPNOTSUPP; +#endif +} + +static int __io_splice_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_splice *sp = &req->splice; + unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + sp->len = READ_ONCE(sqe->len); + sp->flags = READ_ONCE(sqe->splice_flags); + if (unlikely(sp->flags & ~valid_flags)) + return -EINVAL; + sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); return 0; } @@ -3652,60 +3963,75 @@ static int io_tee_prep(struct io_kiocb *req, return __io_splice_prep(req, sqe); } -static int io_tee(struct io_kiocb *req, bool force_nonblock) +static int io_tee(struct io_kiocb *req, unsigned int issue_flags) { struct io_splice *sp = &req->splice; - struct file *in = sp->file_in; struct file *out = sp->file_out; unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; + struct file *in; long ret = 0; - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; + + in = io_file_get(req->ctx, req, sp->splice_fd_in, + (sp->flags & SPLICE_F_FD_IN_FIXED)); + if (!in) { + ret = -EBADF; + goto done; + } + if (sp->len) ret = do_tee(in, out, sp->len, flags); - io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); - req->flags &= ~REQ_F_NEED_CLEANUP; - + if (!(sp->flags & SPLICE_F_FD_IN_FIXED)) + io_put_file(in); +done: if (ret != sp->len) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - struct io_splice* sp = &req->splice; + struct io_splice *sp = &req->splice; sp->off_in = READ_ONCE(sqe->splice_off_in); sp->off_out = READ_ONCE(sqe->off); return __io_splice_prep(req, sqe); } -static int io_splice(struct io_kiocb *req, bool force_nonblock) +static int io_splice(struct io_kiocb *req, unsigned int issue_flags) { struct io_splice *sp = &req->splice; - struct file *in = sp->file_in; struct file *out = sp->file_out; unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; loff_t *poff_in, *poff_out; + struct file *in; long ret = 0; - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; + in = io_file_get(req->ctx, req, sp->splice_fd_in, + (sp->flags & SPLICE_F_FD_IN_FIXED)); + if (!in) { + ret = -EBADF; + goto done; + } + poff_in = (sp->off_in == -1) ? NULL : &sp->off_in; poff_out = (sp->off_out == -1) ? NULL : &sp->off_out; if (sp->len) ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); - io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); - req->flags &= ~REQ_F_NEED_CLEANUP; - + if (!(sp->flags & SPLICE_F_FD_IN_FIXED)) + io_put_file(in); +done: if (ret != sp->len) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } @@ -3713,24 +4039,21 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock) /* * IORING_OP_NOP just posts a completion event, nothing else. */ -static int io_nop(struct io_kiocb *req, struct io_comp_state *cs) +static int io_nop(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - __io_req_complete(req, 0, 0, cs); + __io_req_complete(req, issue_flags, 0, 0); return 0; } -static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; - if (!req->file) - return -EBADF; - if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index || @@ -3746,20 +4069,20 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_fsync(struct io_kiocb *req, bool force_nonblock) +static int io_fsync(struct io_kiocb *req, unsigned int issue_flags) { loff_t end = req->sync.off + req->sync.len; int ret; /* fsync always requires a blocking context */ - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = vfs_fsync_range(req->file, req->sync.off, end > 0 ? end : LLONG_MAX, req->sync.flags & IORING_FSYNC_DATASYNC); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } @@ -3779,17 +4102,19 @@ static int io_fallocate_prep(struct io_kiocb *req, return 0; } -static int io_fallocate(struct io_kiocb *req, bool force_nonblock) +static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) { int ret; /* fallocate always requiring blocking context */ - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, req->sync.len); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); + else + fsnotify_modify(req->file); io_req_complete(req, ret); return 0; } @@ -3799,7 +4124,9 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe const char __user *fname; int ret; - if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (unlikely(sqe->ioprio || sqe->buf_index)) return -EINVAL; if (unlikely(req->flags & REQ_F_FIXED_FILE)) return -EBADF; @@ -3816,20 +4143,21 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe req->open.filename = NULL; return ret; } + + req->open.file_slot = READ_ONCE(sqe->file_index); + if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC)) + return -EINVAL; + req->open.nofile = rlimit(RLIMIT_NOFILE); - req->open.ignore_nonblock = false; req->flags |= REQ_F_NEED_CLEANUP; return 0; } static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - u64 flags, mode; + u64 mode = READ_ONCE(sqe->len); + u64 flags = READ_ONCE(sqe->open_flags); - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) - return -EINVAL; - mode = READ_ONCE(sqe->len); - flags = READ_ONCE(sqe->open_flags); req->open.how = build_open_how(flags, mode); return __io_openat_prep(req, sqe); } @@ -3840,8 +4168,6 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) size_t len; int ret; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) - return -EINVAL; how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); len = READ_ONCE(sqe->len); if (len < OPEN_HOW_SIZE_VER0) @@ -3855,58 +4181,75 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return __io_openat_prep(req, sqe); } -static int io_openat2(struct io_kiocb *req, bool force_nonblock) +static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) { struct open_flags op; struct file *file; + bool resolve_nonblock, nonblock_set; + bool fixed = !!req->open.file_slot; int ret; - if (force_nonblock && !req->open.ignore_nonblock) - return -EAGAIN; - ret = build_open_flags(&req->open.how, &op); if (ret) goto err; + nonblock_set = op.open_flag & O_NONBLOCK; + resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED; + if (issue_flags & IO_URING_F_NONBLOCK) { + /* + * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, + * it'll always -EAGAIN + */ + if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + return -EAGAIN; + op.lookup_flags |= LOOKUP_CACHED; + op.open_flag |= O_NONBLOCK; + } - ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); - if (ret < 0) - goto err; + if (!fixed) { + ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); + if (ret < 0) + goto err; + } file = do_filp_open(req->open.dfd, req->open.filename, &op); if (IS_ERR(file)) { - put_unused_fd(ret); - ret = PTR_ERR(file); /* - * A work-around to ensure that /proc/self works that way - * that it should - if we get -EOPNOTSUPP back, then assume - * that proc_self_get_link() failed us because we're in async - * context. We should be safe to retry this from the task - * itself with force_nonblock == false set, as it should not - * block on lookup. Would be nice to know this upfront and - * avoid the async dance, but doesn't seem feasible. + * We could hang on to this 'fd' on retrying, but seems like + * marginal gain for something that is now known to be a slower + * path. So just put it, and we'll get a new one when we retry. */ - if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) { - req->open.ignore_nonblock = true; - refcount_inc(&req->refs); - io_req_task_queue(req); - return 0; - } - } else { - fsnotify_open(file); - fd_install(ret, file); + if (!fixed) + put_unused_fd(ret); + + ret = PTR_ERR(file); + /* only retry if RESOLVE_CACHED wasn't already set by application */ + if (ret == -EAGAIN && + (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) + return -EAGAIN; + goto err; } + + if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) + file->f_flags &= ~O_NONBLOCK; + fsnotify_open(file); + + if (!fixed) + fd_install(ret, file); + else + ret = io_install_fixed_file(req, file, issue_flags, + req->open.file_slot - 1); err: putname(req->open.filename); req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < 0) - req_set_fail_links(req); - io_req_complete(req, ret); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } -static int io_openat(struct io_kiocb *req, bool force_nonblock) +static int io_openat(struct io_kiocb *req, unsigned int issue_flags) { - return io_openat2(req, force_nonblock); + return io_openat2(req, issue_flags); } static int io_remove_buffers_prep(struct io_kiocb *req, @@ -3947,6 +4290,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, kfree(nxt); if (++i == nbufs) return i; + cond_resched(); } i++; kfree(buf); @@ -3955,13 +4299,13 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, return i; } -static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) { struct io_provide_buf *p = &req->pbuf; struct io_ring_ctx *ctx = req->ctx; struct io_buffer *head; int ret = 0; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; io_ring_submit_lock(ctx, !force_nonblock); @@ -3972,16 +4316,11 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, if (head) ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); - /* need to hold the lock to complete IOPOLL requests */ - if (ctx->flags & IORING_SETUP_IOPOLL) { - __io_req_complete(req, ret, 0, cs); - io_ring_submit_unlock(ctx, !force_nonblock); - } else { - io_ring_submit_unlock(ctx, !force_nonblock); - __io_req_complete(req, ret, 0, cs); - } + /* complete before unlock, IOPOLL may need the lock */ + __io_req_complete(req, issue_flags, ret, 0); + io_ring_submit_unlock(ctx, !force_nonblock); return 0; } @@ -4048,13 +4387,13 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) return i ? i : -ENOMEM; } -static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) { struct io_provide_buf *p = &req->pbuf; struct io_ring_ctx *ctx = req->ctx; struct io_buffer *head, *list; int ret = 0; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; io_ring_submit_lock(ctx, !force_nonblock); @@ -4064,21 +4403,16 @@ static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, ret = io_add_buffers(p, &head); if (ret >= 0 && !list) { - ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL); + ret = xa_insert(&ctx->io_buffers, p->bgid, head, + GFP_KERNEL_ACCOUNT); if (ret < 0) __io_remove_buffers(ctx, head, p->bgid, -1U); } if (ret < 0) - req_set_fail_links(req); - - /* need to hold the lock to complete IOPOLL requests */ - if (ctx->flags & IORING_SETUP_IOPOLL) { - __io_req_complete(req, ret, 0, cs); - io_ring_submit_unlock(ctx, !force_nonblock); - } else { - io_ring_submit_unlock(ctx, !force_nonblock); - __io_req_complete(req, ret, 0, cs); - } + req_set_fail(req); + /* complete before unlock, IOPOLL may need the lock */ + __io_req_complete(req, issue_flags, ret, 0); + io_ring_submit_unlock(ctx, !force_nonblock); return 0; } @@ -4088,7 +4422,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, #if defined(CONFIG_EPOLL) if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) return -EINVAL; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; req->epoll.epfd = READ_ONCE(sqe->fd); @@ -4109,20 +4443,20 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, #endif } -static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) { #if defined(CONFIG_EPOLL) struct io_epoll *ie = &req->epoll; int ret; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock); if (force_nonblock && ret == -EAGAIN) return -EAGAIN; if (ret < 0) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; #else return -EOPNOTSUPP; @@ -4146,18 +4480,18 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) #endif } -static int io_madvise(struct io_kiocb *req, bool force_nonblock) +static int io_madvise(struct io_kiocb *req, unsigned int issue_flags) { #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU) struct io_madvise *ma = &req->madvise; int ret; - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; #else @@ -4178,12 +4512,12 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_fadvise(struct io_kiocb *req, bool force_nonblock) +static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) { struct io_fadvise *fa = &req->fadvise; int ret; - if (force_nonblock) { + if (issue_flags & IO_URING_F_NONBLOCK) { switch (fa->advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_RANDOM: @@ -4196,14 +4530,14 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock) ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); if (ret < 0) - req_set_fail_links(req); - io_req_complete(req, ret); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) return -EINVAL; @@ -4219,89 +4553,96 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_statx(struct io_kiocb *req, bool force_nonblock) +static int io_statx(struct io_kiocb *req, unsigned int issue_flags) { struct io_statx *ctx = &req->statx; int ret; - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask, ctx->buffer); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - /* - * If we queue this for async, it must not be cancellable. That would - * leave the 'file' in an undeterminate state, and here need to modify - * io_wq_work.flags, so initialize io_wq_work firstly. - */ - io_req_init_async(req); - - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || - sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in) + sqe->rw_flags || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) return -EBADF; req->close.fd = READ_ONCE(sqe->fd); - if ((req->file && req->file->f_op == &io_uring_fops)) - return -EBADF; + req->close.file_slot = READ_ONCE(sqe->file_index); + if (req->close.file_slot && req->close.fd) + return -EINVAL; - req->close.put_file = NULL; return 0; } -static int io_close(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_close(struct io_kiocb *req, unsigned int issue_flags) { + struct files_struct *files = current->files; struct io_close *close = &req->close; - int ret; + struct fdtable *fdt; + struct file *file = NULL; + int ret = -EBADF; - /* might be already done during nonblock submission */ - if (!close->put_file) { - ret = __close_fd_get_file(close->fd, &close->put_file); - if (ret < 0) - return (ret == -ENOENT) ? -EBADF : ret; + if (req->close.file_slot) { + ret = io_close_fixed(req, issue_flags); + goto err; + } + + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + if (close->fd >= fdt->max_fds) { + spin_unlock(&files->file_lock); + goto err; + } + file = fdt->fd[close->fd]; + if (!file || file->f_op == &io_uring_fops) { + spin_unlock(&files->file_lock); + file = NULL; + goto err; } /* if the file has a flush method, be safe and punt to async */ - if (close->put_file->f_op->flush && force_nonblock) { - /* not safe to cancel at this point */ - req->work.flags |= IO_WQ_WORK_NO_CANCEL; - /* was never set, but play safe */ - req->flags &= ~REQ_F_NOWAIT; - /* avoid grabbing files - we don't need the files */ - req->flags |= REQ_F_NO_FILE_TABLE; + if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) { + spin_unlock(&files->file_lock); return -EAGAIN; } + ret = __close_fd_get_file(close->fd, &file); + spin_unlock(&files->file_lock); + if (ret < 0) { + if (ret == -ENOENT) + ret = -EBADF; + goto err; + } + /* No ->flush() or already async, safely close from here */ - ret = filp_close(close->put_file, req->work.identity->files); + ret = filp_close(file, current->files); +err: if (ret < 0) - req_set_fail_links(req); - fput(close->put_file); - close->put_file = NULL; - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + if (file) + fput(file); + __io_req_complete(req, issue_flags, ret, 0); return 0; } -static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; - if (!req->file) - return -EBADF; - if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index || @@ -4314,23 +4655,30 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) +static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) { int ret; /* sync_file_range always requires a blocking context */ - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = sync_file_range(req->file, req->sync.off, req->sync.len, req->sync.flags); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } #if defined(CONFIG_NET) +static bool io_net_retry(struct socket *sock, int flags) +{ + if (!(flags & MSG_WAITALL)) + return false; + return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; +} + static int io_setup_async_msg(struct io_kiocb *req, struct io_async_msghdr *kmsg) { @@ -4339,57 +4687,71 @@ static int io_setup_async_msg(struct io_kiocb *req, if (async_msg) return -EAGAIN; if (io_alloc_async_data(req)) { - if (kmsg->iov != kmsg->fast_iov) - kfree(kmsg->iov); + kfree(kmsg->free_iov); return -ENOMEM; } async_msg = req->async_data; req->flags |= REQ_F_NEED_CLEANUP; memcpy(async_msg, kmsg, sizeof(*kmsg)); + if (async_msg->msg.msg_name) + async_msg->msg.msg_name = &async_msg->addr; + /* if were using fast_iov, set it to the new one */ + if (!kmsg->free_iov) { + size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov; + async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx]; + } + return -EAGAIN; } static int io_sendmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { - iomsg->iov = iomsg->fast_iov; iomsg->msg.msg_name = &iomsg->addr; + iomsg->free_iov = iomsg->fast_iov; return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, - req->sr_msg.msg_flags, &iomsg->iov); + req->sr_msg.msg_flags, &iomsg->free_iov); +} + +static int io_sendmsg_prep_async(struct io_kiocb *req) +{ + int ret; + + ret = io_sendmsg_copy_hdr(req, req->async_data); + if (!ret) + req->flags |= REQ_F_NEED_CLEANUP; + return ret; } static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - struct io_async_msghdr *async_msg = req->async_data; struct io_sr_msg *sr = &req->sr_msg; - int ret; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (unlikely(sqe->addr2 || sqe->splice_fd_in || sqe->ioprio)) + if (unlikely(sqe->addr2 || sqe->file_index)) + return -EINVAL; + if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio)) return -EINVAL; - sr->msg_flags = READ_ONCE(sqe->msg_flags); sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); + sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; + if (sr->msg_flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; #ifdef CONFIG_COMPAT if (req->ctx->compat) sr->msg_flags |= MSG_CMSG_COMPAT; #endif - - if (!async_msg || !io_op_defs[req->opcode].needs_async_data) - return 0; - ret = io_sendmsg_copy_hdr(req, async_msg); - if (!ret) - req->flags |= REQ_F_NEED_CLEANUP; - return ret; + sr->done_io = 0; + return 0; } -static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_msghdr iomsg, *kmsg; + struct io_sr_msg *sr = &req->sr_msg; struct socket *sock; unsigned flags; int min_ret = 0; @@ -4399,46 +4761,47 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, if (unlikely(!sock)) return ret; - if (req->async_data) { - kmsg = req->async_data; - kmsg->msg.msg_name = &kmsg->addr; - /* if iov is set, it's allocated already */ - if (!kmsg->iov) - kmsg->iov = kmsg->fast_iov; - kmsg->msg.msg_iter.iov = kmsg->iov; - } else { + kmsg = req->async_data; + if (!kmsg) { ret = io_sendmsg_copy_hdr(req, &iomsg); if (ret) return ret; kmsg = &iomsg; } - flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) + flags = req->sr_msg.msg_flags; + if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; - if (flags & MSG_WAITALL) min_ret = iov_iter_count(&kmsg->msg.msg_iter); ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); - if (force_nonblock && ret == -EAGAIN) - return io_setup_async_msg(req, kmsg); - if (ret == -ERESTARTSYS) - ret = -EINTR; - if (kmsg->iov != kmsg->fast_iov) - kfree(kmsg->iov); + if (ret < min_ret) { + if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) + return io_setup_async_msg(req, kmsg); + if (ret == -ERESTARTSYS) + ret = -EINTR; + if (ret > 0 && io_net_retry(sock, flags)) { + sr->done_io += ret; + req->flags |= REQ_F_PARTIAL_IO; + return io_setup_async_msg(req, kmsg); + } + req_set_fail(req); + } + /* fast path, check for non-NULL to avoid function call */ + if (kmsg->free_iov) + kfree(kmsg->free_iov); req->flags &= ~REQ_F_NEED_CLEANUP; - if (ret < min_ret) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + if (ret >= 0) + ret += sr->done_io; + else if (sr->done_io) + ret = sr->done_io; + __io_req_complete(req, issue_flags, ret, 0); return 0; } -static int io_send(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_send(struct io_kiocb *req, unsigned int issue_flags) { struct io_sr_msg *sr = &req->sr_msg; struct msghdr msg; @@ -4461,25 +4824,33 @@ static int io_send(struct io_kiocb *req, bool force_nonblock, msg.msg_controllen = 0; msg.msg_namelen = 0; - flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) + flags = req->sr_msg.msg_flags; + if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; - if (flags & MSG_WAITALL) min_ret = iov_iter_count(&msg.msg_iter); msg.msg_flags = flags; ret = sock_sendmsg(sock, &msg); - if (force_nonblock && ret == -EAGAIN) - return -EAGAIN; - if (ret == -ERESTARTSYS) - ret = -EINTR; - - if (ret < min_ret) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + if (ret < min_ret) { + if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) + return -EAGAIN; + if (ret == -ERESTARTSYS) + ret = -EINTR; + if (ret > 0 && io_net_retry(sock, flags)) { + sr->len -= ret; + sr->buf += ret; + sr->done_io += ret; + req->flags |= REQ_F_PARTIAL_IO; + return -EAGAIN; + } + req_set_fail(req); + } + if (ret >= 0) + ret += sr->done_io; + else if (sr->done_io) + ret = sr->done_io; + __io_req_complete(req, issue_flags, ret, 0); return 0; } @@ -4499,15 +4870,14 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req, if (req->flags & REQ_F_BUFFER_SELECT) { if (iov_len > 1) return -EINVAL; - if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov))) + if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov))) return -EFAULT; - sr->len = iomsg->iov[0].iov_len; - iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1, - sr->len); - iomsg->iov = NULL; + sr->len = iomsg->fast_iov[0].iov_len; + iomsg->free_iov = NULL; } else { + iomsg->free_iov = iomsg->fast_iov; ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV, - &iomsg->iov, &iomsg->msg.msg_iter, + &iomsg->free_iov, &iomsg->msg.msg_iter, false); if (ret > 0) ret = 0; @@ -4520,16 +4890,14 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req, static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { - struct compat_msghdr __user *msg_compat; struct io_sr_msg *sr = &req->sr_msg; struct compat_iovec __user *uiov; compat_uptr_t ptr; compat_size_t len; int ret; - msg_compat = (struct compat_msghdr __user *) sr->umsg; - ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr, - &ptr, &len); + ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr, + &ptr, &len); if (ret) return ret; @@ -4546,11 +4914,11 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, if (clen < 0) return -EINVAL; sr->len = clen; - iomsg->iov[0].iov_len = clen; - iomsg->iov = NULL; + iomsg->free_iov = NULL; } else { + iomsg->free_iov = iomsg->fast_iov; ret = __import_iovec(READ, (struct iovec __user *)uiov, len, - UIO_FASTIOV, &iomsg->iov, + UIO_FASTIOV, &iomsg->free_iov, &iomsg->msg.msg_iter, true); if (ret < 0) return ret; @@ -4564,7 +4932,6 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { iomsg->msg.msg_name = &iomsg->addr; - iomsg->iov = iomsg->fast_iov; #ifdef CONFIG_COMPAT if (req->ctx->compat) @@ -4594,58 +4961,59 @@ static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) return io_put_kbuf(req, req->sr_msg.kbuf); } -static int io_recvmsg_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static int io_recvmsg_prep_async(struct io_kiocb *req) { - struct io_async_msghdr *async_msg = req->async_data; - struct io_sr_msg *sr = &req->sr_msg; int ret; + ret = io_recvmsg_copy_hdr(req, req->async_data); + if (!ret) + req->flags |= REQ_F_NEED_CLEANUP; + return ret; +} + +static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_sr_msg *sr = &req->sr_msg; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (unlikely(sqe->addr2 || sqe->splice_fd_in || sqe->ioprio)) + if (unlikely(sqe->addr2 || sqe->file_index)) + return -EINVAL; + if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio)) return -EINVAL; - sr->msg_flags = READ_ONCE(sqe->msg_flags); sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); sr->bgid = READ_ONCE(sqe->buf_group); + sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; + if (sr->msg_flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; #ifdef CONFIG_COMPAT if (req->ctx->compat) sr->msg_flags |= MSG_CMSG_COMPAT; #endif - - if (!async_msg || !io_op_defs[req->opcode].needs_async_data) - return 0; - ret = io_recvmsg_copy_hdr(req, async_msg); - if (!ret) - req->flags |= REQ_F_NEED_CLEANUP; - return ret; + sr->done_io = 0; + return 0; } -static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_msghdr iomsg, *kmsg; + struct io_sr_msg *sr = &req->sr_msg; struct socket *sock; struct io_buffer *kbuf; unsigned flags; int min_ret = 0; int ret, cflags = 0; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; sock = sock_from_file(req->file, &ret); if (unlikely(!sock)) return ret; - if (req->async_data) { - kmsg = req->async_data; - kmsg->msg.msg_name = &kmsg->addr; - /* if iov is set, it's allocated already */ - if (!kmsg->iov) - kmsg->iov = kmsg->fast_iov; - kmsg->msg.msg_iter.iov = kmsg->iov; - } else { + kmsg = req->async_data; + if (!kmsg) { ret = io_recvmsg_copy_hdr(req, &iomsg); if (ret) return ret; @@ -4657,39 +5025,49 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, if (IS_ERR(kbuf)) return PTR_ERR(kbuf); kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); - iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov, + kmsg->fast_iov[0].iov_len = req->sr_msg.len; + iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1, req->sr_msg.len); } - flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) + flags = req->sr_msg.msg_flags; + if (force_nonblock) flags |= MSG_DONTWAIT; - if (flags & MSG_WAITALL) min_ret = iov_iter_count(&kmsg->msg.msg_iter); ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, kmsg->uaddr, flags); - if (force_nonblock && ret == -EAGAIN) - return io_setup_async_msg(req, kmsg); - if (ret == -ERESTARTSYS) - ret = -EINTR; + if (ret < min_ret) { + if (ret == -EAGAIN && force_nonblock) + return io_setup_async_msg(req, kmsg); + if (ret == -ERESTARTSYS) + ret = -EINTR; + if (ret > 0 && io_net_retry(sock, flags)) { + sr->done_io += ret; + req->flags |= REQ_F_PARTIAL_IO; + return io_setup_async_msg(req, kmsg); + } + req_set_fail(req); + } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { + req_set_fail(req); + } if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_recv_kbuf(req); - if (kmsg->iov != kmsg->fast_iov) - kfree(kmsg->iov); + /* fast path, check for non-NULL to avoid function call */ + if (kmsg->free_iov) + kfree(kmsg->free_iov); req->flags &= ~REQ_F_NEED_CLEANUP; - if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) - req_set_fail_links(req); - __io_req_complete(req, ret, cflags, cs); + if (ret >= 0) + ret += sr->done_io; + else if (sr->done_io) + ret = sr->done_io; + __io_req_complete(req, issue_flags, ret, cflags); return 0; } -static int io_recv(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_recv(struct io_kiocb *req, unsigned int issue_flags) { struct io_buffer *kbuf; struct io_sr_msg *sr = &req->sr_msg; @@ -4700,6 +5078,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, unsigned flags; int min_ret = 0; int ret, cflags = 0; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; sock = sock_from_file(req->file, &ret); if (unlikely(!sock)) @@ -4723,26 +5102,37 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, msg.msg_iocb = NULL; msg.msg_flags = 0; - flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) + flags = req->sr_msg.msg_flags; + if (force_nonblock) flags |= MSG_DONTWAIT; - if (flags & MSG_WAITALL) min_ret = iov_iter_count(&msg.msg_iter); ret = sock_recvmsg(sock, &msg, flags); - if (force_nonblock && ret == -EAGAIN) - return -EAGAIN; - if (ret == -ERESTARTSYS) - ret = -EINTR; + if (ret < min_ret) { + if (ret == -EAGAIN && force_nonblock) + return -EAGAIN; + if (ret == -ERESTARTSYS) + ret = -EINTR; + if (ret > 0 && io_net_retry(sock, flags)) { + sr->len -= ret; + sr->buf += ret; + sr->done_io += ret; + req->flags |= REQ_F_PARTIAL_IO; + return -EAGAIN; + } + req_set_fail(req); + } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { out_free: + req_set_fail(req); + } if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_recv_kbuf(req); - if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) - req_set_fail_links(req); - __io_req_complete(req, ret, cflags, cs); + if (ret >= 0) + ret += sr->done_io; + else if (sr->done_io) + ret = sr->done_io; + __io_req_complete(req, issue_flags, ret, cflags); return 0; } @@ -4750,48 +5140,78 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_accept *accept = &req->accept; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in) + if (sqe->ioprio || sqe->len || sqe->buf_index) return -EINVAL; accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); accept->flags = READ_ONCE(sqe->accept_flags); accept->nofile = rlimit(RLIMIT_NOFILE); + + accept->file_slot = READ_ONCE(sqe->file_index); + if (accept->file_slot && (accept->flags & SOCK_CLOEXEC)) + return -EINVAL; + if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) + return -EINVAL; + if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) + accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; return 0; } -static int io_accept(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_accept(struct io_kiocb *req, unsigned int issue_flags) { struct io_accept *accept = &req->accept; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; - int ret; + bool fixed = !!accept->file_slot; + struct file *file; + int ret, fd; - if (req->file->f_flags & O_NONBLOCK) - req->flags |= REQ_F_NOWAIT; + if (!fixed) { + fd = __get_unused_fd_flags(accept->flags, accept->nofile); + if (unlikely(fd < 0)) + return fd; + } + file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, + accept->flags); - ret = __sys_accept4_file(req->file, file_flags, accept->addr, - accept->addr_len, accept->flags, - accept->nofile); - if (ret == -EAGAIN && force_nonblock) - return -EAGAIN; - if (ret < 0) { + if (IS_ERR(file)) { + if (!fixed) + put_unused_fd(fd); + ret = PTR_ERR(file); + /* safe to retry */ + req->flags |= REQ_F_PARTIAL_IO; + if (ret == -EAGAIN && force_nonblock) + return -EAGAIN; if (ret == -ERESTARTSYS) ret = -EINTR; - req_set_fail_links(req); + req_set_fail(req); + } else if (!fixed) { + fd_install(fd, file); + ret = fd; + } else { + ret = io_install_fixed_file(req, file, issue_flags, + accept->file_slot - 1); } - __io_req_complete(req, ret, 0, cs); + __io_req_complete(req, issue_flags, ret, 0); return 0; } +static int io_connect_prep_async(struct io_kiocb *req) +{ + struct io_async_connect *io = req->async_data; + struct io_connect *conn = &req->connect; + + return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address); +} + static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_connect *conn = &req->connect; - struct io_async_connect *io = req->async_data; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) @@ -4799,20 +5219,15 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); conn->addr_len = READ_ONCE(sqe->addr2); - - if (!io) - return 0; - - return move_addr_to_kernel(conn->addr, conn->addr_len, - &io->address); + return 0; } -static int io_connect(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_connect(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_connect __io, *io; unsigned file_flags; int ret; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; if (req->async_data) { io = req->async_data; @@ -4836,7 +5251,6 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock, ret = -ENOMEM; goto out; } - io = req->async_data; memcpy(req->async_data, &__io, sizeof(__io)); return -EAGAIN; } @@ -4844,67 +5258,37 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock, ret = -EINTR; out: if (ret < 0) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } #else /* !CONFIG_NET */ -static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) -{ - return -EOPNOTSUPP; -} - -static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_send(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_recvmsg_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) -{ - return -EOPNOTSUPP; -} - -static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_recv(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) -{ - return -EOPNOTSUPP; -} - -static int io_accept(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) -{ - return -EOPNOTSUPP; -} - -static int io_connect(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} +#define IO_NETOP_FN(op) \ +static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \ +{ \ + return -EOPNOTSUPP; \ +} + +#define IO_NETOP_PREP(op) \ +IO_NETOP_FN(op) \ +static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \ +{ \ + return -EOPNOTSUPP; \ +} \ + +#define IO_NETOP_PREP_ASYNC(op) \ +IO_NETOP_PREP(op) \ +static int io_##op##_prep_async(struct io_kiocb *req) \ +{ \ + return -EOPNOTSUPP; \ +} + +IO_NETOP_PREP_ASYNC(sendmsg); +IO_NETOP_PREP_ASYNC(recvmsg); +IO_NETOP_PREP_ASYNC(connect); +IO_NETOP_PREP(accept); +IO_NETOP_FN(send); +IO_NETOP_FN(recv); #endif /* CONFIG_NET */ struct io_poll_table { @@ -4914,68 +5298,47 @@ struct io_poll_table { int error; }; -static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, - __poll_t mask, task_work_func_t func) -{ - bool twa_signal_ok; - int ret; - - /* for instances that support it check for an event match first: */ - if (mask && !(mask & poll->events)) - return 0; - - trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); - - list_del_init(&poll->wait.entry); +#define IO_POLL_CANCEL_FLAG BIT(31) +#define IO_POLL_RETRY_FLAG BIT(30) +#define IO_POLL_REF_MASK GENMASK(29, 0) - req->result = mask; - init_task_work(&req->task_work, func); - percpu_ref_get(&req->ctx->refs); +/* + * We usually have 1-2 refs taken, 128 is more than enough and we want to + * maximise the margin between this amount and the moment when it overflows. + */ +#define IO_POLL_REF_BIAS 128 - /* - * If we using the signalfd wait_queue_head for this wakeup, then - * it's not safe to use TWA_SIGNAL as we could be recursing on the - * tsk->sighand->siglock on doing the wakeup. Should not be needed - * either, as the normal wakeup will suffice. - */ - twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); +static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) +{ + int v; /* - * If this fails, then the task is exiting. When a task exits, the - * work gets canceled, so just cancel this request as well instead - * of executing it. We can't safely execute it anyway, as we may not - * have the needed state needed for it anyway. + * poll_refs are already elevated and we don't have much hope for + * grabbing the ownership. Instead of incrementing set a retry flag + * to notify the loop that there might have been some change. */ - ret = io_req_task_work_add(req, twa_signal_ok); - if (unlikely(ret)) { - struct task_struct *tsk; - - WRITE_ONCE(poll->canceled, true); - tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); - } - return 1; + v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); + if (v & IO_POLL_REF_MASK) + return false; + return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); } -static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) - __acquires(&req->ctx->completion_lock) +/* + * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can + * bump it and acquire ownership. It's disallowed to modify requests while not + * owning it, that prevents from races for enqueueing task_work's and b/w + * arming poll and wakeups. + */ +static inline bool io_poll_get_ownership(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; - - if (!req->result && !READ_ONCE(poll->canceled)) { - struct poll_table_struct pt = { ._key = poll->events }; - - req->result = vfs_poll(req->file, &pt) & poll->events; - } - - spin_lock_irq(&ctx->completion_lock); - if (!req->result && !READ_ONCE(poll->canceled)) { - add_wait_queue(poll->head, &poll->wait); - return true; - } + if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) + return io_poll_get_ownership_slowpath(req); + return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); +} - return false; +static void io_poll_mark_cancelled(struct io_kiocb *req) +{ + atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); } static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) @@ -4993,99 +5356,263 @@ static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) return &req->apoll->poll; } -static void io_poll_remove_double(struct io_kiocb *req) +static void io_poll_req_insert(struct io_kiocb *req) { - struct io_poll_iocb *poll = io_poll_get_double(req); + struct io_ring_ctx *ctx = req->ctx; + struct hlist_head *list; + + list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; + hlist_add_head(&req->hash_node, list); +} - lockdep_assert_held(&req->ctx->completion_lock); +static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, + wait_queue_func_t wake_func) +{ + poll->head = NULL; +#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) + /* mask in events that we always want/need */ + poll->events = events | IO_POLL_UNMASK; + INIT_LIST_HEAD(&poll->wait.entry); + init_waitqueue_func_entry(&poll->wait, wake_func); +} - if (poll && poll->head) { - struct wait_queue_head *head = poll->head; +static inline void io_poll_remove_entry(struct io_poll_iocb *poll) +{ + struct wait_queue_head *head = smp_load_acquire(&poll->head); - spin_lock(&head->lock); + if (head) { + spin_lock_irq(&head->lock); list_del_init(&poll->wait.entry); - if (poll->wait.private) - refcount_dec(&req->refs); poll->head = NULL; - spin_unlock(&head->lock); + spin_unlock_irq(&head->lock); } } -static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) +static void io_poll_remove_entries(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; - - io_poll_remove_double(req); - req->poll.done = true; - io_cqring_fill_event(req, error ? error : mangle_poll(mask)); - io_commit_cqring(ctx); -} + struct io_poll_iocb *poll = io_poll_get_single(req); + struct io_poll_iocb *poll_double = io_poll_get_double(req); -static void io_poll_task_func(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + /* + * While we hold the waitqueue lock and the waitqueue is nonempty, + * wake_up_pollfree() will wait for us. However, taking the waitqueue + * lock in the first place can race with the waitqueue being freed. + * + * We solve this as eventpoll does: by taking advantage of the fact that + * all users of wake_up_pollfree() will RCU-delay the actual free. If + * we enter rcu_read_lock() and see that the pointer to the queue is + * non-NULL, we can then lock it without the memory being freed out from + * under us. + * + * Keep holding rcu_read_lock() as long as we hold the queue lock, in + * case the caller deletes the entry from the queue, leaving it empty. + * In that case, only RCU prevents the queue memory from being freed. + */ + rcu_read_lock(); + io_poll_remove_entry(poll); + if (poll_double) + io_poll_remove_entry(poll_double); + rcu_read_unlock(); +} + +/* + * All poll tw should go through this. Checks for poll events, manages + * references, does rewait, etc. + * + * Returns a negative error on failure. >0 when no action require, which is + * either spurious wakeup or multishot CQE is served. 0 when it's done with + * the request, then the mask is stored in req->result. + */ +static int io_poll_check_events(struct io_kiocb *req) +{ struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *nxt; + struct io_poll_iocb *poll = io_poll_get_single(req); + int v; - if (io_poll_rewait(req, &req->poll)) { - spin_unlock_irq(&ctx->completion_lock); - } else { - hash_del(&req->hash_node); - io_poll_complete(req, req->result, 0); - spin_unlock_irq(&ctx->completion_lock); + /* req->task == current here, checking PF_EXITING is safe */ + if (unlikely(req->task->flags & PF_EXITING)) + io_poll_mark_cancelled(req); - nxt = io_put_req_find_next(req); - io_cqring_ev_posted(ctx); - if (nxt) - __io_req_task_submit(nxt); + do { + v = atomic_read(&req->poll_refs); + + /* tw handler should be the owner, and so have some references */ + if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) + return 0; + if (v & IO_POLL_CANCEL_FLAG) + return -ECANCELED; + /* + * cqe.res contains only events of the first wake up + * and all others are be lost. Redo vfs_poll() to get + * up to date state. + */ + if ((v & IO_POLL_REF_MASK) != 1) + req->result = 0; + if (v & IO_POLL_RETRY_FLAG) { + req->result = 0; + /* + * We won't find new events that came in between + * vfs_poll and the ref put unless we clear the + * flag in advance. + */ + atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); + v &= ~IO_POLL_RETRY_FLAG; + } + + if (!req->result) { + struct poll_table_struct pt = { ._key = poll->events }; + + req->result = vfs_poll(req->file, &pt) & poll->events; + } + + /* multishot, just fill an CQE and proceed */ + if (req->result && !(poll->events & EPOLLONESHOT)) { + __poll_t mask = mangle_poll(req->result & poll->events); + bool filled; + + spin_lock(&ctx->completion_lock); + filled = io_fill_cqe_aux(ctx, req->user_data, mask, + IORING_CQE_F_MORE); + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + if (unlikely(!filled)) + return -ECANCELED; + io_cqring_ev_posted(ctx); + } else if (req->result) { + return 0; + } + + /* force the next iteration to vfs_poll() */ + req->result = 0; + + /* + * Release all references, retry if someone tried to restart + * task_work while we were executing it. + */ + } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & + IO_POLL_REF_MASK); + + return 1; +} + +static void io_poll_task_func(struct io_kiocb *req, bool *locked) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + ret = io_poll_check_events(req); + if (ret > 0) + return; + + if (!ret) { + req->result = mangle_poll(req->result & req->poll.events); + } else { + req->result = ret; + req_set_fail(req); } - percpu_ref_put(&ctx->refs); + io_poll_remove_entries(req); + spin_lock(&ctx->completion_lock); + hash_del(&req->hash_node); + spin_unlock(&ctx->completion_lock); + io_req_complete_post(req, req->result, 0); +} + +static void io_apoll_task_func(struct io_kiocb *req, bool *locked) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + ret = io_poll_check_events(req); + if (ret > 0) + return; + + io_poll_remove_entries(req); + spin_lock(&ctx->completion_lock); + hash_del(&req->hash_node); + spin_unlock(&ctx->completion_lock); + + if (!ret) + io_req_task_submit(req, locked); + else + io_req_complete_failed(req, ret); } -static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, - int sync, void *key) +static void __io_poll_execute(struct io_kiocb *req, int mask) +{ + req->result = mask; + if (req->opcode == IORING_OP_POLL_ADD) + req->io_task_work.func = io_poll_task_func; + else + req->io_task_work.func = io_apoll_task_func; + + trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); + io_req_task_work_add(req); +} + +static inline void io_poll_execute(struct io_kiocb *req, int res) +{ + if (io_poll_get_ownership(req)) + __io_poll_execute(req, res); +} + +static void io_poll_cancel_req(struct io_kiocb *req) +{ + io_poll_mark_cancelled(req); + /* kick tw, which should complete the request */ + io_poll_execute(req, 0); +} + +static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, + void *key) { struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = io_poll_get_single(req); + struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb, + wait); __poll_t mask = key_to_poll(key); - /* for instances that support it check for an event match first: */ + if (unlikely(mask & POLLFREE)) { + io_poll_mark_cancelled(req); + /* we have to kick tw in case it's not already */ + io_poll_execute(req, 0); + + /* + * If the waitqueue is being freed early but someone is already + * holds ownership over it, we have to tear down the request as + * best we can. That means immediately removing the request from + * its waitqueue and preventing all further accesses to the + * waitqueue via the request. + */ + list_del_init(&poll->wait.entry); + + /* + * Careful: this *must* be the last step, since as soon + * as req->head is NULL'ed out, the request can be + * completed and freed, since aio_poll_complete_work() + * will no longer need to take the waitqueue lock. + */ + smp_store_release(&poll->head, NULL); + return 1; + } + + /* for instances that support it check for an event match first */ if (mask && !(mask & poll->events)) return 0; - list_del_init(&wait->entry); + if (io_poll_get_ownership(req)) { + /* + * If we trigger a multishot poll off our own wakeup path, + * disable multishot as there is a circular dependency between + * CQ posting and triggering the event. + */ + if (mask & EPOLL_URING_WAKE) + poll->events |= EPOLLONESHOT; - if (poll && poll->head) { - bool done; - - spin_lock(&poll->head->lock); - done = list_empty(&poll->wait.entry); - if (!done) - list_del_init(&poll->wait.entry); - /* make sure double remove sees this as being gone */ - wait->private = NULL; - spin_unlock(&poll->head->lock); - if (!done) { - /* use wait func handler, so it matches the rq type */ - poll->wait.func(&poll->wait, mode, sync, key); - } + __io_poll_execute(req, mask); } - refcount_dec(&req->refs); return 1; } -static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, - wait_queue_func_t wake_func) -{ - poll->head = NULL; - poll->done = false; - poll->canceled = false; - poll->events = events; - INIT_LIST_HEAD(&poll->wait.entry); - init_waitqueue_func_entry(&poll->wait, wake_func); -} - static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, struct wait_queue_head *head, struct io_poll_iocb **poll_ptr) @@ -5098,29 +5625,31 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, * if this happens. */ if (unlikely(pt->nr_entries)) { - struct io_poll_iocb *poll_one = poll; + struct io_poll_iocb *first = poll; + /* double add on the same waitqueue head, ignore */ + if (first->head == head) + return; /* already have a 2nd entry, fail a third attempt */ if (*poll_ptr) { + if ((*poll_ptr)->head == head) + return; pt->error = -EINVAL; return; } - /* double add on the same waitqueue head, ignore */ - if (poll->head == head) - return; + poll = kmalloc(sizeof(*poll), GFP_ATOMIC); if (!poll) { pt->error = -ENOMEM; return; } - io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); - refcount_inc(&req->refs); - poll->wait.private = req; + io_init_poll_iocb(poll, first->events, first->wait.func); *poll_ptr = poll; } pt->nr_entries++; poll->head = head; + poll->wait.private = req; if (poll->events & EPOLLEXCLUSIVE) add_wait_queue_exclusive(head, &poll->wait); @@ -5128,83 +5657,23 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, add_wait_queue(head, &poll->wait); } -static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, +static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, struct poll_table_struct *p) { struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); - struct async_poll *apoll = pt->req->apoll; - - __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); -} - -static void io_async_task_func(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct async_poll *apoll = req->apoll; - struct io_ring_ctx *ctx = req->ctx; - - trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); - - if (io_poll_rewait(req, &apoll->poll)) { - spin_unlock_irq(&ctx->completion_lock); - percpu_ref_put(&ctx->refs); - return; - } - - /* If req is still hashed, it cannot have been canceled. Don't check. */ - if (hash_hashed(&req->hash_node)) - hash_del(&req->hash_node); - - io_poll_remove_double(req); - spin_unlock_irq(&ctx->completion_lock); - - if (!READ_ONCE(apoll->poll.canceled)) - __io_req_task_submit(req); - else - __io_req_task_cancel(req, -ECANCELED); - - percpu_ref_put(&ctx->refs); - kfree(apoll->double_poll); - kfree(apoll); -} - -static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, - void *key) -{ - struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = &req->apoll->poll; - - trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data, - key_to_poll(key)); - - return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func); -} - -static void io_poll_req_insert(struct io_kiocb *req) -{ - struct io_ring_ctx *ctx = req->ctx; - struct hlist_head *list; - list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; - hlist_add_head(&req->hash_node, list); + __io_queue_proc(&pt->req->poll, pt, head, + (struct io_poll_iocb **) &pt->req->async_data); } -static __poll_t __io_arm_poll_handler(struct io_kiocb *req, - struct io_poll_iocb *poll, - struct io_poll_table *ipt, __poll_t mask, - wait_queue_func_t wake_func) - __acquires(&ctx->completion_lock) +static int __io_arm_poll_handler(struct io_kiocb *req, + struct io_poll_iocb *poll, + struct io_poll_table *ipt, __poll_t mask) { struct io_ring_ctx *ctx = req->ctx; - bool cancel = false; - - if (req->file->f_op->may_pollfree) { - spin_lock_irq(&ctx->completion_lock); - return -EOPNOTSUPP; - } INIT_HLIST_NODE(&req->hash_node); - io_init_poll_iocb(poll, mask, wake_func); + io_init_poll_iocb(poll, mask, io_poll_wake); poll->file = req->file; poll->wait.private = req; @@ -5213,169 +5682,143 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req, ipt->error = 0; ipt->nr_entries = 0; + /* + * Take the ownership to delay any tw execution up until we're done + * with poll arming. see io_poll_get_ownership(). + */ + atomic_set(&req->poll_refs, 1); mask = vfs_poll(req->file, &ipt->pt) & poll->events; - if (unlikely(!ipt->nr_entries) && !ipt->error) - ipt->error = -EINVAL; - - spin_lock_irq(&ctx->completion_lock); - if (ipt->error) - io_poll_remove_double(req); - if (likely(poll->head)) { - spin_lock(&poll->head->lock); - if (unlikely(list_empty(&poll->wait.entry))) { - if (ipt->error) - cancel = true; + + if (mask && (poll->events & EPOLLONESHOT)) { + io_poll_remove_entries(req); + /* no one else has access to the req, forget about the ref */ + return mask; + } + if (!mask && unlikely(ipt->error || !ipt->nr_entries)) { + io_poll_remove_entries(req); + if (!ipt->error) + ipt->error = -EINVAL; + return 0; + } + + spin_lock(&ctx->completion_lock); + io_poll_req_insert(req); + spin_unlock(&ctx->completion_lock); + + if (mask) { + /* can't multishot if failed, just queue the event we've got */ + if (unlikely(ipt->error || !ipt->nr_entries)) { + poll->events |= EPOLLONESHOT; ipt->error = 0; - mask = 0; } - if (mask || ipt->error) - list_del_init(&poll->wait.entry); - else if (cancel) - WRITE_ONCE(poll->canceled, true); - else if (!poll->done) /* actually waiting for an event */ - io_poll_req_insert(req); - spin_unlock(&poll->head->lock); + __io_poll_execute(req, mask); + return 0; } - return mask; + /* + * Try to release ownership. If we see a change of state, e.g. + * poll was waken up, queue up a tw, it'll deal with it. + */ + if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) + __io_poll_execute(req, 0); + return 0; +} + +static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, + struct poll_table_struct *p) +{ + struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); + struct async_poll *apoll = pt->req->apoll; + + __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); } -static bool io_arm_poll_handler(struct io_kiocb *req) +enum { + IO_APOLL_OK, + IO_APOLL_ABORTED, + IO_APOLL_READY +}; + +static int io_arm_poll_handler(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; struct io_ring_ctx *ctx = req->ctx; struct async_poll *apoll; struct io_poll_table ipt; - __poll_t mask, ret; - int rw; + __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI; + int ret; if (!req->file || !file_can_poll(req->file)) - return false; - if (req->flags & REQ_F_POLLED) - return false; - if (def->pollin) - rw = READ; - else if (def->pollout) - rw = WRITE; - else - return false; - /* if we can't nonblock try, then no point in arming a poll handler */ - if (!io_file_supports_async(req->file, rw)) - return false; - - apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); - if (unlikely(!apoll)) - return false; - apoll->double_poll = NULL; - - req->flags |= REQ_F_POLLED; - req->apoll = apoll; + return IO_APOLL_ABORTED; + if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) + return IO_APOLL_ABORTED; + if (!def->pollin && !def->pollout) + return IO_APOLL_ABORTED; - mask = 0; - if (def->pollin) + if (def->pollin) { mask |= POLLIN | POLLRDNORM; - if (def->pollout) - mask |= POLLOUT | POLLWRNORM; - - /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ - if ((req->opcode == IORING_OP_RECVMSG) && - (req->sr_msg.msg_flags & MSG_ERRQUEUE)) - mask &= ~POLLIN; - - mask |= POLLERR | POLLPRI; - - ipt.pt._qproc = io_async_queue_proc; - - ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, - io_async_wake); - if (ret || ipt.error) { - io_poll_remove_double(req); - spin_unlock_irq(&ctx->completion_lock); - kfree(apoll->double_poll); - kfree(apoll); - return false; - } - spin_unlock_irq(&ctx->completion_lock); - trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask, - apoll->poll.events); - return true; -} - -static bool __io_poll_remove_one(struct io_kiocb *req, - struct io_poll_iocb *poll) -{ - bool do_complete = false; - spin_lock(&poll->head->lock); - WRITE_ONCE(poll->canceled, true); - if (!list_empty(&poll->wait.entry)) { - list_del_init(&poll->wait.entry); - do_complete = true; + /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ + if ((req->opcode == IORING_OP_RECVMSG) && + (req->sr_msg.msg_flags & MSG_ERRQUEUE)) + mask &= ~POLLIN; + } else { + mask |= POLLOUT | POLLWRNORM; } - spin_unlock(&poll->head->lock); - hash_del(&req->hash_node); - return do_complete; -} -static bool io_poll_remove_one(struct io_kiocb *req) -{ - bool do_complete; - - io_poll_remove_double(req); - - if (req->opcode == IORING_OP_POLL_ADD) { - do_complete = __io_poll_remove_one(req, &req->poll); + if (req->flags & REQ_F_POLLED) { + apoll = req->apoll; + kfree(apoll->double_poll); } else { - struct async_poll *apoll = req->apoll; - - /* non-poll requests have submit ref still */ - do_complete = __io_poll_remove_one(req, &apoll->poll); - if (do_complete) { - io_put_req(req); - kfree(apoll->double_poll); - kfree(apoll); - } + apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); } + if (unlikely(!apoll)) + return IO_APOLL_ABORTED; + apoll->double_poll = NULL; + req->apoll = apoll; + req->flags |= REQ_F_POLLED; + ipt.pt._qproc = io_async_queue_proc; - if (do_complete) { - io_cqring_fill_event(req, -ECANCELED); - io_commit_cqring(req->ctx); - req_set_fail_links(req); - io_put_req_deferred(req, 1); - } + ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask); + if (ret || ipt.error) + return ret ? IO_APOLL_READY : IO_APOLL_ABORTED; - return do_complete; + trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, + mask, apoll->poll.events); + return IO_APOLL_OK; } /* * Returns true if we found and killed one or more poll requests */ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, - struct files_struct *files) + bool cancel_all) { struct hlist_node *tmp; struct io_kiocb *req; - int posted = 0, i; + bool found = false; + int i; - spin_lock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list; list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { - if (io_match_task(req, tsk, files)) - posted += io_poll_remove_one(req); + if (io_match_task_safe(req, tsk, cancel_all)) { + hlist_del_init(&req->hash_node); + io_poll_cancel_req(req); + found = true; + } } } - spin_unlock_irq(&ctx->completion_lock); - - if (posted) - io_cqring_ev_posted(ctx); - - return posted != 0; + spin_unlock(&ctx->completion_lock); + return found; } -static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) +static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr, + bool poll_only) + __must_hold(&ctx->completion_lock) { struct hlist_head *list; struct io_kiocb *req; @@ -5384,107 +5827,161 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) hlist_for_each_entry(req, list, hash_node) { if (sqe_addr != req->user_data) continue; - if (io_poll_remove_one(req)) - return 0; - return -EALREADY; + if (poll_only && req->opcode != IORING_OP_POLL_ADD) + continue; + return req; } - - return -ENOENT; + return NULL; } -static int io_poll_remove_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static bool io_poll_disarm(struct io_kiocb *req) + __must_hold(&ctx->completion_lock) { - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || - sqe->poll_events) - return -EINVAL; - - req->poll.addr = READ_ONCE(sqe->addr); - return 0; + if (!io_poll_get_ownership(req)) + return false; + io_poll_remove_entries(req); + hash_del(&req->hash_node); + return true; } -/* - * Find a running poll command that matches one specified in sqe->addr, - * and remove it if found. - */ -static int io_poll_remove(struct io_kiocb *req) +static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr, + bool poll_only) + __must_hold(&ctx->completion_lock) { - struct io_ring_ctx *ctx = req->ctx; - u64 addr; - int ret; - - addr = req->poll.addr; - spin_lock_irq(&ctx->completion_lock); - ret = io_poll_cancel(ctx, addr); - spin_unlock_irq(&ctx->completion_lock); + struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only); - if (ret < 0) - req_set_fail_links(req); - io_req_complete(req, ret); + if (!req) + return -ENOENT; + io_poll_cancel_req(req); return 0; } -static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, - void *key) +static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, + unsigned int flags) { - struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = &req->poll; + u32 events; - return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func); + events = READ_ONCE(sqe->poll32_events); +#ifdef __BIG_ENDIAN + events = swahw32(events); +#endif + if (!(flags & IORING_POLL_ADD_MULTI)) + events |= EPOLLONESHOT; + return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT)); } -static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, - struct poll_table_struct *p) +static int io_poll_update_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) { - struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); + struct io_poll_update *upd = &req->poll_update; + u32 flags; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) + return -EINVAL; + flags = READ_ONCE(sqe->len); + if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | + IORING_POLL_ADD_MULTI)) + return -EINVAL; + /* meaningless without update */ + if (flags == IORING_POLL_ADD_MULTI) + return -EINVAL; - __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data); + upd->old_user_data = READ_ONCE(sqe->addr); + upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; + upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; + + upd->new_user_data = READ_ONCE(sqe->off); + if (!upd->update_user_data && upd->new_user_data) + return -EINVAL; + if (upd->update_events) + upd->events = io_poll_parse_events(sqe, flags); + else if (sqe->poll32_events) + return -EINVAL; + + return 0; } static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_poll_iocb *poll = &req->poll; - u32 events; + u32 flags; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) + if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr) + return -EINVAL; + flags = READ_ONCE(sqe->len); + if (flags & ~IORING_POLL_ADD_MULTI) return -EINVAL; - events = READ_ONCE(sqe->poll32_events); -#ifdef __BIG_ENDIAN - events = swahw32(events); -#endif - poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP | - (events & EPOLLEXCLUSIVE); + io_req_set_refcount(req); + poll->events = io_poll_parse_events(sqe, flags); return 0; } -static int io_poll_add(struct io_kiocb *req) +static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) { struct io_poll_iocb *poll = &req->poll; - struct io_ring_ctx *ctx = req->ctx; struct io_poll_table ipt; - __poll_t mask; + int ret; ipt.pt._qproc = io_poll_queue_proc; - mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, - io_poll_wake); + ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events); + if (!ret && ipt.error) + req_set_fail(req); + ret = ret ?: ipt.error; + if (ret) + __io_req_complete(req, issue_flags, ret, 0); + return 0; +} - if (mask) { /* no async, we'd stolen it */ - ipt.error = 0; - io_poll_complete(req, mask, 0); +static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *preq; + int ret2, ret = 0; + + spin_lock(&ctx->completion_lock); + preq = io_poll_find(ctx, req->poll_update.old_user_data, true); + if (!preq || !io_poll_disarm(preq)) { + spin_unlock(&ctx->completion_lock); + ret = preq ? -EALREADY : -ENOENT; + goto out; } - spin_unlock_irq(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); - if (mask) { - io_cqring_ev_posted(ctx); - io_put_req(req); + if (req->poll_update.update_events || req->poll_update.update_user_data) { + /* only mask one event flags, keep behavior flags */ + if (req->poll_update.update_events) { + preq->poll.events &= ~0xffff; + preq->poll.events |= req->poll_update.events & 0xffff; + preq->poll.events |= IO_POLL_UNMASK; + } + if (req->poll_update.update_user_data) + preq->user_data = req->poll_update.new_user_data; + + ret2 = io_poll_add(preq, issue_flags); + /* successfully updated, don't complete poll request */ + if (!ret2) + goto out; } - return ipt.error; + req_set_fail(preq); + io_req_complete(preq, -ECANCELED); +out: + if (ret < 0) + req_set_fail(req); + /* complete update request, we're done with it */ + io_req_complete(req, ret); + return 0; +} + +static void io_req_task_timeout(struct io_kiocb *req, bool *locked) +{ + req_set_fail(req); + io_req_complete_post(req, -ETIME, 0); } static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) @@ -5495,88 +5992,182 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) struct io_ring_ctx *ctx = req->ctx; unsigned long flags; - spin_lock_irqsave(&ctx->completion_lock, flags); + spin_lock_irqsave(&ctx->timeout_lock, flags); list_del_init(&req->timeout.list); atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); + spin_unlock_irqrestore(&ctx->timeout_lock, flags); - io_cqring_fill_event(req, -ETIME); - io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); - - io_cqring_ev_posted(ctx); - req_set_fail_links(req); - io_put_req(req); + req->io_task_work.func = io_req_task_timeout; + io_req_task_work_add(req); return HRTIMER_NORESTART; } -static int __io_timeout_cancel(struct io_kiocb *req) +static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, + __u64 user_data) + __must_hold(&ctx->timeout_lock) { - struct io_timeout_data *io = req->async_data; - int ret; + struct io_timeout_data *io; + struct io_kiocb *req; + bool found = false; - ret = hrtimer_try_to_cancel(&io->timer); - if (ret == -1) - return -EALREADY; + list_for_each_entry(req, &ctx->timeout_list, timeout.list) { + found = user_data == req->user_data; + if (found) + break; + } + if (!found) + return ERR_PTR(-ENOENT); + + io = req->async_data; + if (hrtimer_try_to_cancel(&io->timer) == -1) + return ERR_PTR(-EALREADY); list_del_init(&req->timeout.list); + return req; +} - req_set_fail_links(req); - io_cqring_fill_event(req, -ECANCELED); - io_put_req_deferred(req, 1); +static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) + __must_hold(&ctx->completion_lock) + __must_hold(&ctx->timeout_lock) +{ + struct io_kiocb *req = io_timeout_extract(ctx, user_data); + + if (IS_ERR(req)) + return PTR_ERR(req); + + req_set_fail(req); + io_fill_cqe_req(req, -ECANCELED, 0); + io_put_req_deferred(req); return 0; } -static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) +static clockid_t io_timeout_get_clock(struct io_timeout_data *data) +{ + switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { + case IORING_TIMEOUT_BOOTTIME: + return CLOCK_BOOTTIME; + case IORING_TIMEOUT_REALTIME: + return CLOCK_REALTIME; + default: + /* can't happen, vetted at prep time */ + WARN_ON_ONCE(1); + fallthrough; + case 0: + return CLOCK_MONOTONIC; + } +} + +static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, + struct timespec64 *ts, enum hrtimer_mode mode) + __must_hold(&ctx->timeout_lock) { + struct io_timeout_data *io; struct io_kiocb *req; - int ret = -ENOENT; + bool found = false; - list_for_each_entry(req, &ctx->timeout_list, timeout.list) { - if (user_data == req->user_data) { - ret = 0; + list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) { + found = user_data == req->user_data; + if (found) break; - } } + if (!found) + return -ENOENT; - if (ret == -ENOENT) - return ret; + io = req->async_data; + if (hrtimer_try_to_cancel(&io->timer) == -1) + return -EALREADY; + hrtimer_init(&io->timer, io_timeout_get_clock(io), mode); + io->timer.function = io_link_timeout_fn; + hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); + return 0; +} + +static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, + struct timespec64 *ts, enum hrtimer_mode mode) + __must_hold(&ctx->timeout_lock) +{ + struct io_kiocb *req = io_timeout_extract(ctx, user_data); + struct io_timeout_data *data; + + if (IS_ERR(req)) + return PTR_ERR(req); - return __io_timeout_cancel(req); + req->timeout.off = 0; /* noseq */ + data = req->async_data; + list_add_tail(&req->timeout.list, &ctx->timeout_list); + hrtimer_init(&data->timer, io_timeout_get_clock(data), mode); + data->timer.function = io_timeout_fn; + hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode); + return 0; } static int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { + struct io_timeout_rem *tr = &req->timeout_rem; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; - if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags || - sqe->splice_fd_in) + if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in) return -EINVAL; - req->timeout_rem.addr = READ_ONCE(sqe->addr); + tr->ltimeout = false; + tr->addr = READ_ONCE(sqe->addr); + tr->flags = READ_ONCE(sqe->timeout_flags); + if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { + if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) + return -EINVAL; + if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) + tr->ltimeout = true; + if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) + return -EINVAL; + if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) + return -EFAULT; + } else if (tr->flags) { + /* timeout removal doesn't support flags */ + return -EINVAL; + } + return 0; } +static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) +{ + return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS + : HRTIMER_MODE_REL; +} + /* * Remove or update an existing timeout command */ -static int io_timeout_remove(struct io_kiocb *req) +static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) { + struct io_timeout_rem *tr = &req->timeout_rem; struct io_ring_ctx *ctx = req->ctx; int ret; - spin_lock_irq(&ctx->completion_lock); - ret = io_timeout_cancel(ctx, req->timeout_rem.addr); + if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { + spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); + ret = io_timeout_cancel(ctx, tr->addr); + spin_unlock_irq(&ctx->timeout_lock); + spin_unlock(&ctx->completion_lock); + } else { + enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); + + spin_lock_irq(&ctx->timeout_lock); + if (tr->ltimeout) + ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); + else + ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); + spin_unlock_irq(&ctx->timeout_lock); + } - io_cqring_fill_event(req, ret); - io_commit_cqring(ctx); - spin_unlock_irq(&ctx->completion_lock); - io_cqring_ev_posted(ctx); if (ret < 0) - req_set_fail_links(req); - io_put_req(req); + req_set_fail(req); + io_req_complete_post(req, ret, 0); return 0; } @@ -5595,38 +6186,52 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (off && is_timeout_link) return -EINVAL; flags = READ_ONCE(sqe->timeout_flags); - if (flags & ~IORING_TIMEOUT_ABS) + if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK)) + return -EINVAL; + /* more than one clock specified is invalid, obviously */ + if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) return -EINVAL; + INIT_LIST_HEAD(&req->timeout.list); req->timeout.off = off; + if (unlikely(off && !req->ctx->off_timeout_used)) + req->ctx->off_timeout_used = true; if (!req->async_data && io_alloc_async_data(req)) return -ENOMEM; data = req->async_data; data->req = req; + data->flags = flags; if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) return -EFAULT; - if (flags & IORING_TIMEOUT_ABS) - data->mode = HRTIMER_MODE_ABS; - else - data->mode = HRTIMER_MODE_REL; - INIT_LIST_HEAD(&req->timeout.list); - hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); + data->mode = io_translate_timeout_mode(flags); + hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); + + if (is_timeout_link) { + struct io_submit_link *link = &req->ctx->submit_state.link; + + if (!link->head) + return -EINVAL; + if (link->last->opcode == IORING_OP_LINK_TIMEOUT) + return -EINVAL; + req->timeout.head = link->last; + link->last->flags |= REQ_F_ARM_LTIMEOUT; + } return 0; } -static int io_timeout(struct io_kiocb *req) +static int io_timeout(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_timeout_data *data = req->async_data; struct list_head *entry; u32 tail, off = req->timeout.off; - spin_lock_irq(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); /* * sqe->off holds how many events that need to occur for this @@ -5665,23 +6270,34 @@ static int io_timeout(struct io_kiocb *req) list_add(&req->timeout.list, entry); data->timer.function = io_timeout_fn; hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); - spin_unlock_irq(&ctx->completion_lock); + spin_unlock_irq(&ctx->timeout_lock); return 0; } +struct io_cancel_data { + struct io_ring_ctx *ctx; + u64 user_data; +}; + static bool io_cancel_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); + struct io_cancel_data *cd = data; - return req->user_data == (unsigned long) data; + return req->ctx == cd->ctx && req->user_data == cd->user_data; } -static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) +static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data, + struct io_ring_ctx *ctx) { + struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, }; enum io_wq_cancel cancel_ret; int ret = 0; - cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false); + if (!tctx || !tctx->io_wq) + return -ENOENT; + + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -5697,35 +6313,27 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) return ret; } -static void io_async_find_and_cancel(struct io_ring_ctx *ctx, - struct io_kiocb *req, __u64 sqe_addr, - int success_ret) +static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) { - unsigned long flags; + struct io_ring_ctx *ctx = req->ctx; int ret; - ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr); - if (ret != -ENOENT) { - spin_lock_irqsave(&ctx->completion_lock, flags); - goto done; - } + WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); - spin_lock_irqsave(&ctx->completion_lock, flags); - ret = io_timeout_cancel(ctx, sqe_addr); + ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); if (ret != -ENOENT) - goto done; - ret = io_poll_cancel(ctx, sqe_addr); -done: - if (!ret) - ret = success_ret; - io_cqring_fill_event(req, ret); - io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); - io_cqring_ev_posted(ctx); + return ret; - if (ret < 0) - req_set_fail_links(req); - io_put_req(req); + spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); + ret = io_timeout_cancel(ctx, sqe_addr); + spin_unlock_irq(&ctx->timeout_lock); + if (ret != -ENOENT) + goto out; + ret = io_poll_cancel(ctx, sqe_addr, false); +out: + spin_unlock(&ctx->completion_lock); + return ret; } static int io_async_cancel_prep(struct io_kiocb *req, @@ -5743,52 +6351,72 @@ static int io_async_cancel_prep(struct io_kiocb *req, return 0; } -static int io_async_cancel(struct io_kiocb *req) +static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; + u64 sqe_addr = req->cancel.addr; + struct io_tctx_node *node; + int ret; + + ret = io_try_cancel_userdata(req, sqe_addr); + if (ret != -ENOENT) + goto done; + + /* slow path, try all io-wq's */ + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + ret = -ENOENT; + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { + struct io_uring_task *tctx = node->task->io_uring; - io_async_find_and_cancel(ctx, req, req->cancel.addr, 0); + ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); + if (ret != -ENOENT) + break; + } + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); +done: + if (ret < 0) + req_set_fail(req); + io_req_complete_post(req, ret, 0); return 0; } -static int io_files_update_prep(struct io_kiocb *req, +static int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL)) - return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; - if (sqe->ioprio || sqe->rw_flags) + if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in) return -EINVAL; - req->files_update.offset = READ_ONCE(sqe->off); - req->files_update.nr_args = READ_ONCE(sqe->len); - if (!req->files_update.nr_args) + req->rsrc_update.offset = READ_ONCE(sqe->off); + req->rsrc_update.nr_args = READ_ONCE(sqe->len); + if (!req->rsrc_update.nr_args) return -EINVAL; - req->files_update.arg = READ_ONCE(sqe->addr); + req->rsrc_update.arg = READ_ONCE(sqe->addr); return 0; } -static int io_files_update(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; - struct io_uring_files_update up; + struct io_uring_rsrc_update2 up; int ret; - if (force_nonblock) - return -EAGAIN; - - up.offset = req->files_update.offset; - up.fds = req->files_update.arg; + up.offset = req->rsrc_update.offset; + up.data = req->rsrc_update.arg; + up.nr = 0; + up.tags = 0; + up.resv = 0; + up.resv2 = 0; - mutex_lock(&ctx->uring_lock); - ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args); - mutex_unlock(&ctx->uring_lock); + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, + &up, req->rsrc_update.nr_args); + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); if (ret < 0) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } @@ -5808,11 +6436,11 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) case IORING_OP_POLL_ADD: return io_poll_add_prep(req, sqe); case IORING_OP_POLL_REMOVE: - return io_poll_remove_prep(req, sqe); + return io_poll_update_prep(req, sqe); case IORING_OP_FSYNC: - return io_prep_fsync(req, sqe); + return io_fsync_prep(req, sqe); case IORING_OP_SYNC_FILE_RANGE: - return io_prep_sfr(req, sqe); + return io_sfr_prep(req, sqe); case IORING_OP_SENDMSG: case IORING_OP_SEND: return io_sendmsg_prep(req, sqe); @@ -5838,7 +6466,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) case IORING_OP_CLOSE: return io_close_prep(req, sqe); case IORING_OP_FILES_UPDATE: - return io_files_update_prep(req, sqe); + return io_rsrc_update_prep(req, sqe); case IORING_OP_STATX: return io_statx_prep(req, sqe); case IORING_OP_FADVISE: @@ -5857,100 +6485,131 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_remove_buffers_prep(req, sqe); case IORING_OP_TEE: return io_tee_prep(req, sqe); + case IORING_OP_SHUTDOWN: + return io_shutdown_prep(req, sqe); + case IORING_OP_RENAMEAT: + return io_renameat_prep(req, sqe); + case IORING_OP_UNLINKAT: + return io_unlinkat_prep(req, sqe); } printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", req->opcode); - return-EINVAL; + return -EINVAL; } -static int io_req_defer_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static int io_req_prep_async(struct io_kiocb *req) { - if (!sqe) + if (!io_op_defs[req->opcode].needs_async_setup) return 0; + if (WARN_ON_ONCE(req->async_data)) + return -EFAULT; if (io_alloc_async_data(req)) return -EAGAIN; - return io_req_prep(req, sqe); + + switch (req->opcode) { + case IORING_OP_READV: + return io_rw_prep_async(req, READ); + case IORING_OP_WRITEV: + return io_rw_prep_async(req, WRITE); + case IORING_OP_SENDMSG: + return io_sendmsg_prep_async(req); + case IORING_OP_RECVMSG: + return io_recvmsg_prep_async(req); + case IORING_OP_CONNECT: + return io_connect_prep_async(req); + } + printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n", + req->opcode); + return -EFAULT; } static u32 io_get_sequence(struct io_kiocb *req) { - struct io_kiocb *pos; - struct io_ring_ctx *ctx = req->ctx; - u32 total_submitted, nr_reqs = 1; - - if (req->flags & REQ_F_LINK_HEAD) - list_for_each_entry(pos, &req->link_list, link_list) - nr_reqs++; + u32 seq = req->ctx->cached_sq_head; - total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped; - return total_submitted - nr_reqs; + /* need original cached_sq_head, but it was increased for each req */ + io_for_each_link(req, req) + seq--; + return seq; } -static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static bool io_drain_req(struct io_kiocb *req) { + struct io_kiocb *pos; struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; int ret; u32 seq; + if (req->flags & REQ_F_FAIL) { + io_req_complete_fail_submit(req); + return true; + } + + /* + * If we need to drain a request in the middle of a link, drain the + * head request and the next request/link after the current link. + * Considering sequential execution of links, IOSQE_IO_DRAIN will be + * maintained for every request of our link. + */ + if (ctx->drain_next) { + req->flags |= REQ_F_IO_DRAIN; + ctx->drain_next = false; + } + /* not interested in head, start from the first linked */ + io_for_each_link(pos, req->link) { + if (pos->flags & REQ_F_IO_DRAIN) { + ctx->drain_next = true; + req->flags |= REQ_F_IO_DRAIN; + break; + } + } + /* Still need defer if there is pending req in defer list. */ + spin_lock(&ctx->completion_lock); if (likely(list_empty_careful(&ctx->defer_list) && - !(req->flags & REQ_F_IO_DRAIN))) - return 0; + !(req->flags & REQ_F_IO_DRAIN))) { + spin_unlock(&ctx->completion_lock); + ctx->drain_active = false; + return false; + } + spin_unlock(&ctx->completion_lock); seq = io_get_sequence(req); /* Still a chance to pass the sequence check */ if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) - return 0; + return false; - if (!req->async_data) { - ret = io_req_defer_prep(req, sqe); - if (ret) - return ret; - } + ret = io_req_prep_async(req); + if (ret) + goto fail; io_prep_async_link(req); de = kmalloc(sizeof(*de), GFP_KERNEL); - if (!de) - return -ENOMEM; + if (!de) { + ret = -ENOMEM; +fail: + io_req_complete_failed(req, ret); + return true; + } - spin_lock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { - spin_unlock_irq(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); kfree(de); - io_queue_async_work(req); - return -EIOCBQUEUED; + io_queue_async_work(req, NULL); + return true; } trace_io_uring_defer(ctx, req, req->user_data); de->req = req; de->seq = seq; list_add_tail(&de->list, &ctx->defer_list); - spin_unlock_irq(&ctx->completion_lock); - return -EIOCBQUEUED; -} - -static void io_req_drop_files(struct io_kiocb *req) -{ - struct io_ring_ctx *ctx = req->ctx; - struct io_uring_task *tctx = req->task->io_uring; - unsigned long flags; - - if (req->work.flags & IO_WQ_WORK_FILES) { - put_files_struct(req->work.identity->files); - put_nsproxy(req->work.identity->nsproxy); - } - spin_lock_irqsave(&ctx->inflight_lock, flags); - list_del(&req->inflight_entry); - spin_unlock_irqrestore(&ctx->inflight_lock, flags); - req->flags &= ~REQ_F_INFLIGHT; - req->work.flags &= ~IO_WQ_WORK_FILES; - if (atomic_read(&tctx->in_idle)) - wake_up(&tctx->wait); + spin_unlock(&ctx->completion_lock); + return true; } -static void __io_clean_op(struct io_kiocb *req) +static void io_clean_op(struct io_kiocb *req) { if (req->flags & REQ_F_BUFFER_SELECTED) { switch (req->opcode) { @@ -5964,7 +6623,6 @@ static void __io_clean_op(struct io_kiocb *req) kfree(req->sr_msg.kbuf); break; } - req->flags &= ~REQ_F_BUFFER_SELECTED; } if (req->flags & REQ_F_NEED_CLEANUP) { @@ -5976,458 +6634,556 @@ static void __io_clean_op(struct io_kiocb *req) case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE: { struct io_async_rw *io = req->async_data; - if (io->free_iovec) - kfree(io->free_iovec); + + kfree(io->free_iovec); break; } case IORING_OP_RECVMSG: case IORING_OP_SENDMSG: { struct io_async_msghdr *io = req->async_data; - if (io->iov != io->fast_iov) - kfree(io->iov); + + kfree(io->free_iov); break; } - case IORING_OP_SPLICE: - case IORING_OP_TEE: - io_put_file(req, req->splice.file_in, - (req->splice.flags & SPLICE_F_FD_IN_FIXED)); - break; case IORING_OP_OPENAT: case IORING_OP_OPENAT2: if (req->open.filename) putname(req->open.filename); break; + case IORING_OP_RENAMEAT: + putname(req->rename.oldpath); + putname(req->rename.newpath); + break; + case IORING_OP_UNLINKAT: + putname(req->unlink.filename); + break; } - req->flags &= ~REQ_F_NEED_CLEANUP; } + if ((req->flags & REQ_F_POLLED) && req->apoll) { + kfree(req->apoll->double_poll); + kfree(req->apoll); + req->apoll = NULL; + } + if (req->flags & REQ_F_INFLIGHT) { + struct io_uring_task *tctx = req->task->io_uring; + + atomic_dec(&tctx->inflight_tracked); + } + if (req->flags & REQ_F_CREDS) + put_cred(req->creds); + + req->flags &= ~IO_REQ_CLEAN_FLAGS; } -static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; + const struct cred *creds = NULL; int ret; + if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) + creds = override_creds(req->creds); + switch (req->opcode) { case IORING_OP_NOP: - ret = io_nop(req, cs); + ret = io_nop(req, issue_flags); break; case IORING_OP_READV: case IORING_OP_READ_FIXED: case IORING_OP_READ: - ret = io_read(req, force_nonblock, cs); + ret = io_read(req, issue_flags); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE: - ret = io_write(req, force_nonblock, cs); + ret = io_write(req, issue_flags); break; case IORING_OP_FSYNC: - ret = io_fsync(req, force_nonblock); + ret = io_fsync(req, issue_flags); break; case IORING_OP_POLL_ADD: - ret = io_poll_add(req); + ret = io_poll_add(req, issue_flags); break; case IORING_OP_POLL_REMOVE: - ret = io_poll_remove(req); + ret = io_poll_update(req, issue_flags); break; case IORING_OP_SYNC_FILE_RANGE: - ret = io_sync_file_range(req, force_nonblock); + ret = io_sync_file_range(req, issue_flags); break; case IORING_OP_SENDMSG: - ret = io_sendmsg(req, force_nonblock, cs); + ret = io_sendmsg(req, issue_flags); break; case IORING_OP_SEND: - ret = io_send(req, force_nonblock, cs); + ret = io_send(req, issue_flags); break; case IORING_OP_RECVMSG: - ret = io_recvmsg(req, force_nonblock, cs); + ret = io_recvmsg(req, issue_flags); break; case IORING_OP_RECV: - ret = io_recv(req, force_nonblock, cs); + ret = io_recv(req, issue_flags); break; case IORING_OP_TIMEOUT: - ret = io_timeout(req); + ret = io_timeout(req, issue_flags); break; case IORING_OP_TIMEOUT_REMOVE: - ret = io_timeout_remove(req); + ret = io_timeout_remove(req, issue_flags); break; case IORING_OP_ACCEPT: - ret = io_accept(req, force_nonblock, cs); + ret = io_accept(req, issue_flags); break; case IORING_OP_CONNECT: - ret = io_connect(req, force_nonblock, cs); + ret = io_connect(req, issue_flags); break; case IORING_OP_ASYNC_CANCEL: - ret = io_async_cancel(req); + ret = io_async_cancel(req, issue_flags); break; case IORING_OP_FALLOCATE: - ret = io_fallocate(req, force_nonblock); + ret = io_fallocate(req, issue_flags); break; case IORING_OP_OPENAT: - ret = io_openat(req, force_nonblock); + ret = io_openat(req, issue_flags); break; case IORING_OP_CLOSE: - ret = io_close(req, force_nonblock, cs); + ret = io_close(req, issue_flags); break; case IORING_OP_FILES_UPDATE: - ret = io_files_update(req, force_nonblock, cs); + ret = io_files_update(req, issue_flags); break; case IORING_OP_STATX: - ret = io_statx(req, force_nonblock); + ret = io_statx(req, issue_flags); break; case IORING_OP_FADVISE: - ret = io_fadvise(req, force_nonblock); + ret = io_fadvise(req, issue_flags); break; case IORING_OP_MADVISE: - ret = io_madvise(req, force_nonblock); + ret = io_madvise(req, issue_flags); break; case IORING_OP_OPENAT2: - ret = io_openat2(req, force_nonblock); + ret = io_openat2(req, issue_flags); break; case IORING_OP_EPOLL_CTL: - ret = io_epoll_ctl(req, force_nonblock, cs); + ret = io_epoll_ctl(req, issue_flags); break; case IORING_OP_SPLICE: - ret = io_splice(req, force_nonblock); + ret = io_splice(req, issue_flags); break; case IORING_OP_PROVIDE_BUFFERS: - ret = io_provide_buffers(req, force_nonblock, cs); + ret = io_provide_buffers(req, issue_flags); break; case IORING_OP_REMOVE_BUFFERS: - ret = io_remove_buffers(req, force_nonblock, cs); + ret = io_remove_buffers(req, issue_flags); break; case IORING_OP_TEE: - ret = io_tee(req, force_nonblock); + ret = io_tee(req, issue_flags); + break; + case IORING_OP_SHUTDOWN: + ret = io_shutdown(req, issue_flags); + break; + case IORING_OP_RENAMEAT: + ret = io_renameat(req, issue_flags); + break; + case IORING_OP_UNLINKAT: + ret = io_unlinkat(req, issue_flags); break; default: ret = -EINVAL; break; } + if (creds) + revert_creds(creds); if (ret) return ret; - /* If the op doesn't have a file, we're not polling for it */ - if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) { - const bool in_async = io_wq_current_is_worker(); - - /* workqueue context doesn't hold uring_lock, grab it now */ - if (in_async) - mutex_lock(&ctx->uring_lock); - + if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) io_iopoll_req_issued(req); - if (in_async) - mutex_unlock(&ctx->uring_lock); - } - return 0; } -static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work) +static struct io_wq_work *io_wq_free_work(struct io_wq_work *work) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + + req = io_put_req_find_next(req); + return req ? &req->work : NULL; +} + +static void io_wq_submit_work(struct io_wq_work *work) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *timeout; int ret = 0; + /* one will be dropped by ->io_free_work() after returning to io-wq */ + if (!(req->flags & REQ_F_REFCOUNT)) + __io_req_set_refcount(req, 2); + else + req_ref_get(req); + timeout = io_prep_linked_timeout(req); if (timeout) io_queue_linked_timeout(timeout); - /* if NO_CANCEL is set, we must still run the work */ - if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) == - IO_WQ_WORK_CANCEL) { + /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ + if (work->flags & IO_WQ_WORK_CANCEL) ret = -ECANCELED; - } if (!ret) { do { - ret = io_issue_sqe(req, false, NULL); + ret = io_issue_sqe(req, 0); /* * We can get EAGAIN for polled IO even though we're * forcing a sync submission from here, since we can't * wait for request slots on the block side. */ - if (ret != -EAGAIN) + if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) break; cond_resched(); } while (1); } - if (ret) { - struct io_ring_ctx *lock_ctx = NULL; - - if (req->ctx->flags & IORING_SETUP_IOPOLL) - lock_ctx = req->ctx; - - /* - * io_iopoll_complete() does not hold completion_lock to - * complete polled io, so here for polled io, we can not call - * io_req_complete() directly, otherwise there maybe concurrent - * access to cqring, defer_list, etc, which is not safe. Given - * that io_iopoll_complete() is always called under uring_lock, - * so here for polled io, we also get uring_lock to complete - * it. - */ - if (lock_ctx) - mutex_lock(&lock_ctx->uring_lock); - - req_set_fail_links(req); - io_req_complete(req, ret); - - if (lock_ctx) - mutex_unlock(&lock_ctx->uring_lock); - } + /* avoid locking problems by failing it from a clean context */ + if (ret) + io_req_task_queue_fail(req, ret); +} - return io_steal_work(req); +static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table, + unsigned i) +{ + return &table->files[i]; } static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, int index) { - struct fixed_file_table *table; + struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index); - table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT]; - return table->files[index & IORING_FILE_TABLE_MASK]; + return (struct file *) (slot->file_ptr & FFS_MASK); } -static struct file *io_file_get(struct io_submit_state *state, - struct io_kiocb *req, int fd, bool fixed) +static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file) +{ + unsigned long file_ptr = (unsigned long) file; + + if (__io_file_supports_nowait(file, READ)) + file_ptr |= FFS_ASYNC_READ; + if (__io_file_supports_nowait(file, WRITE)) + file_ptr |= FFS_ASYNC_WRITE; + if (S_ISREG(file_inode(file)->i_mode)) + file_ptr |= FFS_ISREG; + file_slot->file_ptr = file_ptr; +} + +static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx, + struct io_kiocb *req, int fd) { - struct io_ring_ctx *ctx = req->ctx; struct file *file; + unsigned long file_ptr; - if (fixed) { - if (unlikely((unsigned int)fd >= ctx->nr_user_files)) - return NULL; - fd = array_index_nospec(fd, ctx->nr_user_files); - file = io_file_from_index(ctx, fd); - if (file) { - req->fixed_file_refs = &ctx->file_data->node->refs; - percpu_ref_get(req->fixed_file_refs); - } - } else { - trace_io_uring_file_get(ctx, fd); - file = __io_file_get(state, fd); - } + if (unlikely((unsigned int)fd >= ctx->nr_user_files)) + return NULL; + fd = array_index_nospec(fd, ctx->nr_user_files); + file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr; + file = (struct file *) (file_ptr & FFS_MASK); + file_ptr &= ~FFS_MASK; + /* mask in overlapping REQ_F and FFS bits */ + req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); + io_req_set_rsrc_node(req); + return file; +} - if (file && file->f_op == &io_uring_fops && - !(req->flags & REQ_F_INFLIGHT)) { - io_req_init_async(req); - req->flags |= REQ_F_INFLIGHT; +static struct file *io_file_get_normal(struct io_ring_ctx *ctx, + struct io_kiocb *req, int fd) +{ + struct file *file = fget(fd); - spin_lock_irq(&ctx->inflight_lock); - list_add(&req->inflight_entry, &ctx->inflight_list); - spin_unlock_irq(&ctx->inflight_lock); - } + trace_io_uring_file_get(ctx, fd); + /* we don't allow fixed io_uring files */ + if (file && unlikely(file->f_op == &io_uring_fops)) + io_req_track_inflight(req); return file; } -static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, - int fd) +static inline struct file *io_file_get(struct io_ring_ctx *ctx, + struct io_kiocb *req, int fd, bool fixed) { - bool fixed; + if (fixed) + return io_file_get_fixed(ctx, req, fd); + else + return io_file_get_normal(ctx, req, fd); +} - fixed = (req->flags & REQ_F_FIXED_FILE) != 0; - if (unlikely(!fixed && io_async_submit(req->ctx))) - return -EBADF; +static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) +{ + struct io_kiocb *prev = req->timeout.prev; + int ret = -ENOENT; - req->file = io_file_get(state, req, fd, fixed); - if (req->file || io_op_defs[req->opcode].needs_file_no_error) - return 0; - return -EBADF; + if (prev) { + if (!(req->task->flags & PF_EXITING)) + ret = io_try_cancel_userdata(req, prev->user_data); + io_req_complete_post(req, ret ?: -ETIME, 0); + io_put_req(prev); + } else { + io_req_complete_post(req, -ETIME, 0); + } } static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) { struct io_timeout_data *data = container_of(timer, struct io_timeout_data, timer); - struct io_kiocb *req = data->req; + struct io_kiocb *prev, *req = data->req; struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *prev = NULL; unsigned long flags; - spin_lock_irqsave(&ctx->completion_lock, flags); + spin_lock_irqsave(&ctx->timeout_lock, flags); + prev = req->timeout.head; + req->timeout.head = NULL; /* * We don't expect the list to be empty, that will only happen if we * race with the completion of the linked work. */ - if (!list_empty(&req->link_list)) { - prev = list_entry(req->link_list.prev, struct io_kiocb, - link_list); - list_del_init(&req->link_list); - if (!refcount_inc_not_zero(&prev->refs)) + if (prev) { + io_remove_next_linked(prev); + if (!req_ref_inc_not_zero(prev)) prev = NULL; } - list_del(&req->timeout.list); - spin_unlock_irqrestore(&ctx->completion_lock, flags); + req->timeout.prev = prev; + spin_unlock_irqrestore(&ctx->timeout_lock, flags); - if (prev) { - io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); - io_put_req_deferred(prev, 1); - } else { - io_cqring_add_event(req, -ETIME, 0); - io_put_req_deferred(req, 1); - } + req->io_task_work.func = io_req_task_link_timeout; + io_req_task_work_add(req); return HRTIMER_NORESTART; } -static void __io_queue_linked_timeout(struct io_kiocb *req) +static void io_queue_linked_timeout(struct io_kiocb *req) { + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->timeout_lock); /* - * If the list is now empty, then our linked request finished before - * we got a chance to setup the timer + * If the back reference is NULL, then our linked request finished + * before we got a chance to setup the timer */ - if (!list_empty(&req->link_list)) { + if (req->timeout.head) { struct io_timeout_data *data = req->async_data; data->timer.function = io_link_timeout_fn; hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); + list_add_tail(&req->timeout.list, &ctx->ltimeout_list); } -} - -static void io_queue_linked_timeout(struct io_kiocb *req) -{ - struct io_ring_ctx *ctx = req->ctx; - - spin_lock_irq(&ctx->completion_lock); - __io_queue_linked_timeout(req); - spin_unlock_irq(&ctx->completion_lock); - + spin_unlock_irq(&ctx->timeout_lock); /* drop submission reference */ io_put_req(req); } -static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) -{ - struct io_kiocb *nxt; - - if (!(req->flags & REQ_F_LINK_HEAD)) - return NULL; - if (req->flags & REQ_F_LINK_TIMEOUT) - return NULL; - - nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, - link_list); - if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT) - return NULL; - - nxt->flags |= REQ_F_LTIMEOUT_ACTIVE; - req->flags |= REQ_F_LINK_TIMEOUT; - return nxt; -} - -static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs) +static void __io_queue_sqe(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) { struct io_kiocb *linked_timeout; - const struct cred *old_creds = NULL; int ret; -again: - linked_timeout = io_prep_linked_timeout(req); - - if ((req->flags & REQ_F_WORK_INITIALIZED) && - (req->work.flags & IO_WQ_WORK_CREDS) && - req->work.identity->creds != current_cred()) { - if (old_creds) - revert_creds(old_creds); - if (old_creds == req->work.identity->creds) - old_creds = NULL; /* restored original creds */ - else - old_creds = override_creds(req->work.identity->creds); - } - - ret = io_issue_sqe(req, true, cs); +issue_sqe: + ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ - if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { - if (!io_arm_poll_handler(req)) { + if (likely(!ret)) { + if (req->flags & REQ_F_COMPLETE_INLINE) { + struct io_ring_ctx *ctx = req->ctx; + struct io_submit_state *state = &ctx->submit_state; + + state->compl_reqs[state->compl_nr++] = req; + if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) + io_submit_flush_completions(ctx); + return; + } + + linked_timeout = io_prep_linked_timeout(req); + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); + } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { + linked_timeout = io_prep_linked_timeout(req); + + switch (io_arm_poll_handler(req)) { + case IO_APOLL_READY: + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); + goto issue_sqe; + case IO_APOLL_ABORTED: /* * Queued up for async execution, worker will release * submit reference when the iocb is actually submitted. */ - io_queue_async_work(req); + io_queue_async_work(req, NULL); + break; } if (linked_timeout) io_queue_linked_timeout(linked_timeout); - } else if (likely(!ret)) { - /* drop submission reference */ - req = io_put_req_find_next(req); - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); - - if (req) { - if (!(req->flags & REQ_F_FORCE_ASYNC)) - goto again; - io_queue_async_work(req); - } } else { - /* un-prep timeout, so it'll be killed as any other linked */ - req->flags &= ~REQ_F_LINK_TIMEOUT; - req_set_fail_links(req); - io_put_req(req); - io_req_complete(req, ret); + io_req_complete_failed(req, ret); } - - if (old_creds) - revert_creds(old_creds); } -static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_comp_state *cs) +static inline void io_queue_sqe(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) { - int ret; + if (unlikely(req->ctx->drain_active) && io_drain_req(req)) + return; - ret = io_req_defer(req, sqe); - if (ret) { - if (ret != -EIOCBQUEUED) { -fail_req: - req_set_fail_links(req); - io_put_req(req); - io_req_complete(req, ret); - } - } else if (req->flags & REQ_F_FORCE_ASYNC) { - if (!req->async_data) { - ret = io_req_defer_prep(req, sqe); - if (unlikely(ret)) - goto fail_req; - } - io_queue_async_work(req); + if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { + __io_queue_sqe(req); + } else if (req->flags & REQ_F_FAIL) { + io_req_complete_fail_submit(req); } else { - if (sqe) { - ret = io_req_prep(req, sqe); - if (unlikely(ret)) - goto fail_req; - } - __io_queue_sqe(req, cs); + int ret = io_req_prep_async(req); + + if (unlikely(ret)) + io_req_complete_failed(req, ret); + else + io_queue_async_work(req, NULL); } } -static inline void io_queue_link_head(struct io_kiocb *req, - struct io_comp_state *cs) +/* + * Check SQE restrictions (opcode and flags). + * + * Returns 'true' if SQE is allowed, 'false' otherwise. + */ +static inline bool io_check_restriction(struct io_ring_ctx *ctx, + struct io_kiocb *req, + unsigned int sqe_flags) { - if (unlikely(req->flags & REQ_F_FAIL_LINK)) { - io_put_req(req); - io_req_complete(req, -ECANCELED); - } else - io_queue_sqe(req, NULL, cs); + if (likely(!ctx->restricted)) + return true; + + if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) + return false; + + if ((sqe_flags & ctx->restrictions.sqe_flags_required) != + ctx->restrictions.sqe_flags_required) + return false; + + if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | + ctx->restrictions.sqe_flags_required)) + return false; + + return true; } -static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **link, struct io_comp_state *cs) +static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, + const struct io_uring_sqe *sqe) + __must_hold(&ctx->uring_lock) { - struct io_ring_ctx *ctx = req->ctx; + struct io_submit_state *state; + unsigned int sqe_flags; + int personality, ret = 0; + + /* req is partially pre-initialised, see io_preinit_req() */ + req->opcode = READ_ONCE(sqe->opcode); + /* same numerical values with corresponding REQ_F_*, safe to copy */ + req->flags = sqe_flags = READ_ONCE(sqe->flags); + req->user_data = READ_ONCE(sqe->user_data); + req->file = NULL; + req->fixed_rsrc_refs = NULL; + req->task = current; + + /* enforce forwards compatibility on users */ + if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) + return -EINVAL; + if (unlikely(req->opcode >= IORING_OP_LAST)) + return -EINVAL; + if (!io_check_restriction(ctx, req, sqe_flags)) + return -EACCES; + + if ((sqe_flags & IOSQE_BUFFER_SELECT) && + !io_op_defs[req->opcode].buffer_select) + return -EOPNOTSUPP; + if (unlikely(sqe_flags & IOSQE_IO_DRAIN)) + ctx->drain_active = true; + + personality = READ_ONCE(sqe->personality); + if (personality) { + req->creds = xa_load(&ctx->personalities, personality); + if (!req->creds) + return -EINVAL; + get_cred(req->creds); + req->flags |= REQ_F_CREDS; + } + state = &ctx->submit_state; + + /* + * Plug now if we have more than 1 IO left after this, and the target + * is potentially a read/write to block based storage. + */ + if (!state->plug_started && state->ios_left > 1 && + io_op_defs[req->opcode].plug) { + blk_start_plug(&state->plug); + state->plug_started = true; + } + + if (io_op_defs[req->opcode].needs_file) { + req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), + (sqe_flags & IOSQE_FIXED_FILE)); + if (unlikely(!req->file)) + ret = -EBADF; + } + + state->ios_left--; + return ret; +} + +static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, + const struct io_uring_sqe *sqe) + __must_hold(&ctx->uring_lock) +{ + struct io_submit_link *link = &ctx->submit_state.link; int ret; + ret = io_init_req(ctx, req, sqe); + if (unlikely(ret)) { +fail_req: + /* fail even hard links since we don't submit */ + if (link->head) { + /* + * we can judge a link req is failed or cancelled by if + * REQ_F_FAIL is set, but the head is an exception since + * it may be set REQ_F_FAIL because of other req's failure + * so let's leverage req->result to distinguish if a head + * is set REQ_F_FAIL because of its failure or other req's + * failure so that we can set the correct ret code for it. + * init result here to avoid affecting the normal path. + */ + if (!(link->head->flags & REQ_F_FAIL)) + req_fail_link_node(link->head, -ECANCELED); + } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { + /* + * the current req is a normal req, we should return + * error and thus break the submittion loop. + */ + io_req_complete_failed(req, ret); + return ret; + } + req_fail_link_node(req, ret); + } else { + ret = io_req_prep(req, sqe); + if (unlikely(ret)) + goto fail_req; + } + + /* don't need @sqe from now on */ + trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, + req->flags, true, + ctx->flags & IORING_SETUP_SQPOLL); + /* * If we already have a head request, queue this one for async * submittal once the head completes. If we don't have a head but @@ -6435,49 +7191,32 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, * submitted sync once the chain is complete. If none of those * conditions are true (normal request), then just queue it. */ - if (*link) { - struct io_kiocb *head = *link; - - /* - * Taking sequential execution of a link, draining both sides - * of the link also fullfils IOSQE_IO_DRAIN semantics for all - * requests in the link. So, it drains the head and the - * next after the link request. The last one is done via - * drain_next flag to persist the effect across calls. - */ - if (req->flags & REQ_F_IO_DRAIN) { - head->flags |= REQ_F_IO_DRAIN; - ctx->drain_next = 1; - } - ret = io_req_defer_prep(req, sqe); - if (unlikely(ret)) { - /* fail even hard links since we don't submit */ - head->flags |= REQ_F_FAIL_LINK; - return ret; + if (link->head) { + struct io_kiocb *head = link->head; + + if (!(req->flags & REQ_F_FAIL)) { + ret = io_req_prep_async(req); + if (unlikely(ret)) { + req_fail_link_node(req, ret); + if (!(head->flags & REQ_F_FAIL)) + req_fail_link_node(head, -ECANCELED); + } } trace_io_uring_link(ctx, req, head); - list_add_tail(&req->link_list, &head->link_list); + link->last->link = req; + link->last = req; /* last request of a link, enqueue the link */ if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { - io_queue_link_head(head, cs); - *link = NULL; + link->head = NULL; + io_queue_sqe(head); } } else { - if (unlikely(ctx->drain_next)) { - req->flags |= REQ_F_IO_DRAIN; - ctx->drain_next = 0; - } if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { - req->flags |= REQ_F_LINK_HEAD; - INIT_LIST_HEAD(&req->link_list); - - ret = io_req_defer_prep(req, sqe); - if (unlikely(ret)) - req->flags |= REQ_F_FAIL_LINK; - *link = req; + link->head = req; + link->last = req; } else { - io_queue_sqe(req, sqe, cs); + io_queue_sqe(req); } } @@ -6487,29 +7226,27 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, /* * Batched submission is done, ensure local IO is flushed out. */ -static void io_submit_state_end(struct io_submit_state *state) +static void io_submit_state_end(struct io_submit_state *state, + struct io_ring_ctx *ctx) { - if (!list_empty(&state->comp.list)) - io_submit_flush_completions(&state->comp); - blk_finish_plug(&state->plug); - io_state_file_put(state); - if (state->free_reqs) - kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); + if (state->link.head) + io_queue_sqe(state->link.head); + if (state->compl_nr) + io_submit_flush_completions(ctx); + if (state->plug_started) + blk_finish_plug(&state->plug); } /* * Start submission side cache. */ static void io_submit_state_start(struct io_submit_state *state, - struct io_ring_ctx *ctx, unsigned int max_ios) -{ - blk_start_plug(&state->plug); - state->comp.nr = 0; - INIT_LIST_HEAD(&state->comp.list); - state->comp.ctx = ctx; - state->free_reqs = 0; - state->file = NULL; + unsigned int max_ios) +{ + state->plug_started = false; state->ios_left = max_ios; + /* set only head, no need to init link_last in advance */ + state->link.head = NULL; } static void io_commit_sqring(struct io_ring_ctx *ctx) @@ -6525,443 +7262,276 @@ static void io_commit_sqring(struct io_ring_ctx *ctx) } /* - * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory - * that is mapped by userspace. This means that care needs to be taken to - * ensure that reads are stable, as we cannot rely on userspace always - * being a good citizen. If members of the sqe are validated and then later - * used, it's important that those reads are done through READ_ONCE() to - * prevent a re-load down the line. - */ -static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) -{ - u32 *sq_array = ctx->sq_array; - unsigned head; - - /* - * The cached sq head (or cq tail) serves two purposes: - * - * 1) allows us to batch the cost of updating the user visible - * head updates. - * 2) allows the kernel side to track the head on its own, even - * though the application is the one updating it. - */ - head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]); - if (likely(head < ctx->sq_entries)) - return &ctx->sq_sqes[head]; - - /* drop invalid entries */ - ctx->cached_sq_dropped++; - WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped); - return NULL; -} - -static inline void io_consume_sqe(struct io_ring_ctx *ctx) -{ - ctx->cached_sq_head++; -} - -/* - * Check SQE restrictions (opcode and flags). - * - * Returns 'true' if SQE is allowed, 'false' otherwise. - */ -static inline bool io_check_restriction(struct io_ring_ctx *ctx, - struct io_kiocb *req, - unsigned int sqe_flags) -{ - if (!ctx->restricted) - return true; - - if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) - return false; - - if ((sqe_flags & ctx->restrictions.sqe_flags_required) != - ctx->restrictions.sqe_flags_required) - return false; - - if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | - ctx->restrictions.sqe_flags_required)) - return false; - - return true; -} - -#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ - IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ - IOSQE_BUFFER_SELECT) - -static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, - const struct io_uring_sqe *sqe, - struct io_submit_state *state) -{ - unsigned int sqe_flags; - int id, ret; - - req->opcode = READ_ONCE(sqe->opcode); - req->user_data = READ_ONCE(sqe->user_data); - req->async_data = NULL; - req->file = NULL; - req->ctx = ctx; - req->flags = 0; - /* one is dropped after submission, the other at completion */ - refcount_set(&req->refs, 2); - req->task = current; - req->result = 0; - - if (unlikely(req->opcode >= IORING_OP_LAST)) - return -EINVAL; - - if (unlikely(io_sq_thread_acquire_mm(ctx, req))) - return -EFAULT; - - sqe_flags = READ_ONCE(sqe->flags); - /* enforce forwards compatibility on users */ - if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) - return -EINVAL; - - if (unlikely(!io_check_restriction(ctx, req, sqe_flags))) - return -EACCES; - - if ((sqe_flags & IOSQE_BUFFER_SELECT) && - !io_op_defs[req->opcode].buffer_select) - return -EOPNOTSUPP; - - id = READ_ONCE(sqe->personality); - if (id) { - struct io_identity *iod; - - iod = xa_load(&ctx->personalities, id); - if (unlikely(!iod)) - return -EINVAL; - refcount_inc(&iod->count); - - __io_req_init_async(req); - get_cred(iod->creds); - req->work.identity = iod; - req->work.flags |= IO_WQ_WORK_CREDS; - } - - /* same numerical values with corresponding REQ_F_*, safe to copy */ - req->flags |= sqe_flags; + * Fetch an sqe, if one is available. Note this returns a pointer to memory + * that is mapped by userspace. This means that care needs to be taken to + * ensure that reads are stable, as we cannot rely on userspace always + * being a good citizen. If members of the sqe are validated and then later + * used, it's important that those reads are done through READ_ONCE() to + * prevent a re-load down the line. + */ +static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) +{ + unsigned head, mask = ctx->sq_entries - 1; + unsigned sq_idx = ctx->cached_sq_head++ & mask; - if (!io_op_defs[req->opcode].needs_file) - return 0; + /* + * The cached sq head (or cq tail) serves two purposes: + * + * 1) allows us to batch the cost of updating the user visible + * head updates. + * 2) allows the kernel side to track the head on its own, even + * though the application is the one updating it. + */ + head = READ_ONCE(ctx->sq_array[sq_idx]); + if (likely(head < ctx->sq_entries)) + return &ctx->sq_sqes[head]; - ret = io_req_set_file(state, req, READ_ONCE(sqe->fd)); - state->ios_left--; - return ret; + /* drop invalid entries */ + ctx->cq_extra--; + WRITE_ONCE(ctx->rings->sq_dropped, + READ_ONCE(ctx->rings->sq_dropped) + 1); + return NULL; } static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) + __must_hold(&ctx->uring_lock) { - struct io_submit_state state; - struct io_kiocb *link = NULL; - int i, submitted = 0; - - /* if we have a backlog and couldn't flush it all, return BUSY */ - if (test_bit(0, &ctx->sq_check_overflow)) { - if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL)) - return -EBUSY; - } + int submitted = 0; /* make sure SQ entry isn't read before tail */ nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx)); - if (!percpu_ref_tryget_many(&ctx->refs, nr)) return -EAGAIN; + io_get_task_refs(nr); - percpu_counter_add(¤t->io_uring->inflight, nr); - refcount_add(nr, ¤t->usage); - - io_submit_state_start(&state, ctx, nr); - - for (i = 0; i < nr; i++) { + io_submit_state_start(&ctx->submit_state, nr); + while (submitted < nr) { const struct io_uring_sqe *sqe; struct io_kiocb *req; - int err; - sqe = io_get_sqe(ctx); - if (unlikely(!sqe)) { - io_consume_sqe(ctx); - break; - } - req = io_alloc_req(ctx, &state); + req = io_alloc_req(ctx); if (unlikely(!req)) { if (!submitted) submitted = -EAGAIN; break; } - io_consume_sqe(ctx); + sqe = io_get_sqe(ctx); + if (unlikely(!sqe)) { + list_add(&req->inflight_entry, &ctx->submit_state.free_list); + break; + } /* will complete beyond this point, count as submitted */ submitted++; - - err = io_init_req(ctx, req, sqe, &state); - if (unlikely(err)) { -fail_req: - io_put_req(req); - io_req_complete(req, err); + if (io_submit_sqe(ctx, req, sqe)) break; - } - - trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, - true, io_async_submit(ctx)); - err = io_submit_sqe(req, sqe, &link, &state.comp); - if (err) - goto fail_req; } if (unlikely(submitted != nr)) { int ref_used = (submitted == -EAGAIN) ? 0 : submitted; - struct io_uring_task *tctx = current->io_uring; int unused = nr - ref_used; + current->io_uring->cached_refs += unused; percpu_ref_put_many(&ctx->refs, unused); - percpu_counter_sub(&tctx->inflight, unused); - put_task_struct_many(current, unused); } - if (link) - io_queue_link_head(link, &state.comp); - io_submit_state_end(&state); + io_submit_state_end(&ctx->submit_state, ctx); /* Commit SQ ring head once we've consumed and submitted all SQEs */ io_commit_sqring(ctx); return submitted; } -static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx) +static inline bool io_sqd_events_pending(struct io_sq_data *sqd) { - /* Tell userspace we may need a wakeup call */ - spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; - spin_unlock_irq(&ctx->completion_lock); + return READ_ONCE(sqd->state); } -static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx) +static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx) { - spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; - spin_unlock_irq(&ctx->completion_lock); + /* Tell userspace we may need a wakeup call */ + spin_lock(&ctx->completion_lock); + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP); + spin_unlock(&ctx->completion_lock); } -static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode, - int sync, void *key) +static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx) { - struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry); - int ret; - - ret = autoremove_wake_function(wqe, mode, sync, key); - if (ret) { - unsigned long flags; - - spin_lock_irqsave(&ctx->completion_lock, flags); - ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; - spin_unlock_irqrestore(&ctx->completion_lock, flags); - } - return ret; + spin_lock(&ctx->completion_lock); + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP); + spin_unlock(&ctx->completion_lock); } -enum sq_ret { - SQT_IDLE = 1, - SQT_SPIN = 2, - SQT_DID_WORK = 4, -}; - -static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx, - unsigned long start_jiffies, bool cap_entries) +static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) { - unsigned long timeout = start_jiffies + ctx->sq_thread_idle; - struct io_sq_data *sqd = ctx->sq_data; unsigned int to_submit; int ret = 0; -again: - if (!list_empty(&ctx->iopoll_list)) { - unsigned nr_events = 0; - - mutex_lock(&ctx->uring_lock); - if (!list_empty(&ctx->iopoll_list) && !need_resched()) - io_do_iopoll(ctx, &nr_events, 0); - mutex_unlock(&ctx->uring_lock); - } - to_submit = io_sqring_entries(ctx); + /* if we're handling multiple rings, cap submit size for fairness */ + if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE) + to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; - /* - * If submit got -EBUSY, flag us as needing the application - * to enter the kernel to reap and flush events. - */ - if (!to_submit || ret == -EBUSY || need_resched()) { - /* - * Drop cur_mm before scheduling, we can't hold it for - * long periods (or over schedule()). Do this before - * adding ourselves to the waitqueue, as the unuse/drop - * may sleep. - */ - io_sq_thread_drop_mm(); + if (!list_empty(&ctx->iopoll_list) || to_submit) { + unsigned nr_events = 0; + const struct cred *creds = NULL; - /* - * We're polling. If we're within the defined idle - * period, then let us spin without work before going - * to sleep. The exception is if we got EBUSY doing - * more IO, we should wait for the application to - * reap events and wake us up. - */ - if (!list_empty(&ctx->iopoll_list) || need_resched() || - (!time_after(jiffies, timeout) && ret != -EBUSY && - !percpu_ref_is_dying(&ctx->refs))) - return SQT_SPIN; + if (ctx->sq_creds != current_cred()) + creds = override_creds(ctx->sq_creds); - prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry, - TASK_INTERRUPTIBLE); + mutex_lock(&ctx->uring_lock); + if (!list_empty(&ctx->iopoll_list)) + io_do_iopoll(ctx, &nr_events, 0); /* - * While doing polled IO, before going to sleep, we need - * to check if there are new reqs added to iopoll_list, - * it is because reqs may have been punted to io worker - * and will be added to iopoll_list later, hence check - * the iopoll_list again. + * Don't submit if refs are dying, good for io_uring_register(), + * but also it is relied upon by io_ring_exit_work() */ - if ((ctx->flags & IORING_SETUP_IOPOLL) && - !list_empty_careful(&ctx->iopoll_list)) { - finish_wait(&sqd->wait, &ctx->sqo_wait_entry); - goto again; - } + if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) && + !(ctx->flags & IORING_SETUP_R_DISABLED)) + ret = io_submit_sqes(ctx, to_submit); + mutex_unlock(&ctx->uring_lock); - to_submit = io_sqring_entries(ctx); - if (!to_submit || ret == -EBUSY) - return SQT_IDLE; + if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait)) + wake_up(&ctx->sqo_sq_wait); + if (creds) + revert_creds(creds); } - finish_wait(&sqd->wait, &ctx->sqo_wait_entry); - io_ring_clear_wakeup_flag(ctx); - - /* if we're handling multiple rings, cap submit size for fairness */ - if (cap_entries && to_submit > 8) - to_submit = 8; - - mutex_lock(&ctx->uring_lock); - if (likely(!percpu_ref_is_dying(&ctx->refs) && !ctx->sqo_dead)) - ret = io_submit_sqes(ctx, to_submit); - mutex_unlock(&ctx->uring_lock); + return ret; +} - if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait)) - wake_up(&ctx->sqo_sq_wait); +static void io_sqd_update_thread_idle(struct io_sq_data *sqd) +{ + struct io_ring_ctx *ctx; + unsigned sq_thread_idle = 0; - return SQT_DID_WORK; + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); + sqd->sq_thread_idle = sq_thread_idle; } -static void io_sqd_init_new(struct io_sq_data *sqd) +static bool io_sqd_handle_event(struct io_sq_data *sqd) { - struct io_ring_ctx *ctx; + bool did_sig = false; + struct ksignal ksig; - while (!list_empty(&sqd->ctx_new_list)) { - ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list); - init_wait(&ctx->sqo_wait_entry); - ctx->sqo_wait_entry.func = io_sq_wake_function; - list_move_tail(&ctx->sqd_list, &sqd->ctx_list); - complete(&ctx->sq_thread_comp); + if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || + signal_pending(current)) { + mutex_unlock(&sqd->lock); + if (signal_pending(current)) + did_sig = get_signal(&ksig); + cond_resched(); + mutex_lock(&sqd->lock); } + return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); } static int io_sq_thread(void *data) { - struct cgroup_subsys_state *cur_css = NULL; - const struct cred *old_cred = NULL; struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; - unsigned long start_jiffies; + unsigned long timeout = 0; + char buf[TASK_COMM_LEN]; + DEFINE_WAIT(wait); - start_jiffies = jiffies; - while (!kthread_should_stop()) { - enum sq_ret ret = 0; - bool cap_entries; + snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); + set_task_comm(current, buf); - /* - * Any changes to the sqd lists are synchronized through the - * kthread parking. This synchronizes the thread vs users, - * the users are synchronized on the sqd->ctx_lock. - */ - if (kthread_should_park()) { - kthread_parkme(); - /* - * When sq thread is unparked, in case the previous park operation - * comes from io_put_sq_data(), which means that sq thread is going - * to be stopped, so here needs to have a check. - */ - if (kthread_should_stop()) + if (sqd->sq_cpu != -1) + set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); + else + set_cpus_allowed_ptr(current, cpu_online_mask); + current->flags |= PF_NO_SETAFFINITY; + + mutex_lock(&sqd->lock); + while (1) { + bool cap_entries, sqt_spin = false; + + if (io_sqd_events_pending(sqd) || signal_pending(current)) { + if (io_sqd_handle_event(sqd)) break; + timeout = jiffies + sqd->sq_thread_idle; } - if (unlikely(!list_empty(&sqd->ctx_new_list))) - io_sqd_init_new(sqd); - cap_entries = !list_is_singular(&sqd->ctx_list); - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { - if (current->cred != ctx->creds) { - if (old_cred) - revert_creds(old_cred); - old_cred = override_creds(ctx->creds); - } - io_sq_thread_associate_blkcg(ctx, &cur_css); -#ifdef CONFIG_AUDIT - current->loginuid = ctx->loginuid; - current->sessionid = ctx->sessionid; -#endif + int ret = __io_sq_thread(ctx, cap_entries); - ret |= __io_sq_thread(ctx, start_jiffies, cap_entries); - - io_sq_thread_drop_mm(); + if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list))) + sqt_spin = true; } + if (io_run_task_work()) + sqt_spin = true; - if (ret & SQT_SPIN) { - io_run_task_work(); - io_sq_thread_drop_mm(); + if (sqt_spin || !time_after(jiffies, timeout)) { cond_resched(); - } else if (ret == SQT_IDLE) { - if (kthread_should_park()) - continue; - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + if (sqt_spin) + timeout = jiffies + sqd->sq_thread_idle; + continue; + } + + prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); + if (!io_sqd_events_pending(sqd) && !current->task_works) { + bool needs_sched = true; + + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { io_ring_set_wakeup_flag(ctx); - schedule(); - start_jiffies = jiffies; + + if ((ctx->flags & IORING_SETUP_IOPOLL) && + !list_empty_careful(&ctx->iopoll_list)) { + needs_sched = false; + break; + } + if (io_sqring_entries(ctx)) { + needs_sched = false; + break; + } + } + + if (needs_sched) { + mutex_unlock(&sqd->lock); + schedule(); + mutex_lock(&sqd->lock); + } list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) io_ring_clear_wakeup_flag(ctx); } + + finish_wait(&sqd->wait, &wait); + timeout = jiffies + sqd->sq_thread_idle; } + io_uring_cancel_generic(true, sqd); + sqd->thread = NULL; + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + io_ring_set_wakeup_flag(ctx); io_run_task_work(); - io_sq_thread_drop_mm(); - - if (cur_css) - io_sq_thread_unassociate_blkcg(); - if (old_cred) - revert_creds(old_cred); - - kthread_parkme(); + mutex_unlock(&sqd->lock); - return 0; + complete(&sqd->exited); + do_exit(0); } struct io_wait_queue { struct wait_queue_entry wq; struct io_ring_ctx *ctx; - unsigned to_wait; + unsigned cq_tail; unsigned nr_timeouts; }; static inline bool io_should_wake(struct io_wait_queue *iowq) { struct io_ring_ctx *ctx = iowq->ctx; + int dist = ctx->cached_cq_tail - (int) iowq->cq_tail; /* * Wake up if we have enough events, or if a timeout occurred since we * started waiting. For timeouts, we always want to return to userspace, * regardless of event count. */ - return io_cqring_events(ctx) >= iowq->to_wait || - atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; + return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; } static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, @@ -6974,7 +7544,7 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, * Cannot safely flush overflowed CQEs from here, ensure we wake up * the task, and the next invocation will do it. */ - if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow)) + if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow)) return autoremove_wake_function(curr, mode, wake_flags, key); return -1; } @@ -6985,43 +7555,60 @@ static int io_run_task_work_sig(void) return 1; if (!signal_pending(current)) return 0; - if (current->jobctl & JOBCTL_TASK_WORK) { - spin_lock_irq(¤t->sighand->siglock); - current->jobctl &= ~JOBCTL_TASK_WORK; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - return 1; - } + if (test_thread_flag(TIF_NOTIFY_SIGNAL)) + return -ERESTARTSYS; return -EINTR; } +/* when returns >0, the caller should retry */ +static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, + struct io_wait_queue *iowq, + ktime_t *timeout) +{ + int ret; + + /* make sure we run task_work before checking for signals */ + ret = io_run_task_work_sig(); + if (ret || io_should_wake(iowq)) + return ret; + /* let the caller flush overflows, retry */ + if (test_bit(0, &ctx->check_cq_overflow)) + return 1; + + if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS)) + return -ETIME; + return 1; +} + /* * Wait until events become available, if we don't already have some. The * application must reap them itself, as they reside on the shared cq ring. */ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, - const sigset_t __user *sig, size_t sigsz) -{ - struct io_wait_queue iowq = { - .wq = { - .private = current, - .func = io_wake_function, - .entry = LIST_HEAD_INIT(iowq.wq.entry), - }, - .ctx = ctx, - .to_wait = min_events, - }; + const sigset_t __user *sig, size_t sigsz, + struct __kernel_timespec __user *uts) +{ + struct io_wait_queue iowq; struct io_rings *rings = ctx->rings; - int ret = 0; + ktime_t timeout = KTIME_MAX; + int ret; do { - io_cqring_overflow_flush(ctx, false, NULL, NULL); + io_cqring_overflow_flush(ctx); if (io_cqring_events(ctx) >= min_events) return 0; if (!io_run_task_work()) break; } while (1); + if (uts) { + struct timespec64 ts; + + if (get_timespec64(&ts, uts)) + return -EFAULT; + timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); + } + if (sig) { #ifdef CONFIG_COMPAT if (in_compat_syscall()) @@ -7031,37 +7618,271 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, #endif ret = set_user_sigmask(sig, sigsz); - if (ret) - return ret; + if (ret) + return ret; + } + + init_waitqueue_func_entry(&iowq.wq, io_wake_function); + iowq.wq.private = current; + INIT_LIST_HEAD(&iowq.wq.entry); + iowq.ctx = ctx; + iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); + iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; + + trace_io_uring_cqring_wait(ctx, min_events); + do { + /* if we can't even flush overflow, don't wait for more */ + if (!io_cqring_overflow_flush(ctx)) { + ret = -EBUSY; + break; + } + prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, + TASK_INTERRUPTIBLE); + ret = io_cqring_wait_schedule(ctx, &iowq, &timeout); + finish_wait(&ctx->cq_wait, &iowq.wq); + cond_resched(); + } while (ret > 0); + + restore_saved_sigmask_unless(ret == -EINTR); + + return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; +} + +static void io_free_page_table(void **table, size_t size) +{ + unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); + + for (i = 0; i < nr_tables; i++) + kfree(table[i]); + kfree(table); +} + +static void **io_alloc_page_table(size_t size) +{ + unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); + size_t init_size = size; + void **table; + + table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT); + if (!table) + return NULL; + + for (i = 0; i < nr_tables; i++) { + unsigned int this_size = min_t(size_t, size, PAGE_SIZE); + + table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT); + if (!table[i]) { + io_free_page_table(table, init_size); + return NULL; + } + size -= this_size; + } + return table; +} + +static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) +{ + percpu_ref_exit(&ref_node->refs); + kfree(ref_node); +} + +static void io_rsrc_node_ref_zero(struct percpu_ref *ref) +{ + struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); + struct io_ring_ctx *ctx = node->rsrc_data->ctx; + unsigned long flags; + bool first_add = false; + unsigned long delay = HZ; + + spin_lock_irqsave(&ctx->rsrc_ref_lock, flags); + node->done = true; + + /* if we are mid-quiesce then do not delay */ + if (node->rsrc_data->quiesce) + delay = 0; + + while (!list_empty(&ctx->rsrc_ref_list)) { + node = list_first_entry(&ctx->rsrc_ref_list, + struct io_rsrc_node, node); + /* recycle ref nodes in order */ + if (!node->done) + break; + list_del(&node->node); + first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist); + } + spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags); + + if (first_add) + mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay); +} + +static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) +{ + struct io_rsrc_node *ref_node; + + ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); + if (!ref_node) + return NULL; + + if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero, + 0, GFP_KERNEL)) { + kfree(ref_node); + return NULL; + } + INIT_LIST_HEAD(&ref_node->node); + INIT_LIST_HEAD(&ref_node->rsrc_list); + ref_node->done = false; + return ref_node; +} + +static void io_rsrc_node_switch(struct io_ring_ctx *ctx, + struct io_rsrc_data *data_to_kill) +{ + WARN_ON_ONCE(!ctx->rsrc_backup_node); + WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node); + + if (data_to_kill) { + struct io_rsrc_node *rsrc_node = ctx->rsrc_node; + + rsrc_node->rsrc_data = data_to_kill; + spin_lock_irq(&ctx->rsrc_ref_lock); + list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list); + spin_unlock_irq(&ctx->rsrc_ref_lock); + + atomic_inc(&data_to_kill->refs); + percpu_ref_kill(&rsrc_node->refs); + ctx->rsrc_node = NULL; + } + + if (!ctx->rsrc_node) { + ctx->rsrc_node = ctx->rsrc_backup_node; + ctx->rsrc_backup_node = NULL; + } +} + +static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx) +{ + if (ctx->rsrc_backup_node) + return 0; + ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx); + return ctx->rsrc_backup_node ? 0 : -ENOMEM; +} + +static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx) +{ + int ret; + + /* As we may drop ->uring_lock, other task may have started quiesce */ + if (data->quiesce) + return -ENXIO; + + data->quiesce = true; + do { + ret = io_rsrc_node_switch_start(ctx); + if (ret) + break; + io_rsrc_node_switch(ctx, data); + + /* kill initial ref, already quiesced if zero */ + if (atomic_dec_and_test(&data->refs)) + break; + mutex_unlock(&ctx->uring_lock); + flush_delayed_work(&ctx->rsrc_put_work); + ret = wait_for_completion_interruptible(&data->done); + if (!ret) { + mutex_lock(&ctx->uring_lock); + if (atomic_read(&data->refs) > 0) { + /* + * it has been revived by another thread while + * we were unlocked + */ + mutex_unlock(&ctx->uring_lock); + } else { + break; + } + } + + atomic_inc(&data->refs); + /* wait for all works potentially completing data->done */ + flush_delayed_work(&ctx->rsrc_put_work); + reinit_completion(&data->done); + + ret = io_run_task_work_sig(); + mutex_lock(&ctx->uring_lock); + } while (ret >= 0); + data->quiesce = false; + + return ret; +} + +static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx) +{ + unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK; + unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT; + + return &data->tags[table_idx][off]; +} + +static void io_rsrc_data_free(struct io_rsrc_data *data) +{ + size_t size = data->nr * sizeof(data->tags[0][0]); + + if (data->tags) + io_free_page_table((void **)data->tags, size); + kfree(data); +} + +static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put, + u64 __user *utags, unsigned nr, + struct io_rsrc_data **pdata) +{ + struct io_rsrc_data *data; + int ret = -ENOMEM; + unsigned i; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0])); + if (!data->tags) { + kfree(data); + return -ENOMEM; } - iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); - trace_io_uring_cqring_wait(ctx, min_events); - do { - io_cqring_overflow_flush(ctx, false, NULL, NULL); - prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, - TASK_INTERRUPTIBLE); - /* make sure we run task_work before checking for signals */ - ret = io_run_task_work_sig(); - if (ret > 0) { - finish_wait(&ctx->wait, &iowq.wq); - continue; - } - else if (ret < 0) - break; - if (io_should_wake(&iowq)) - break; - if (test_bit(0, &ctx->cq_check_overflow)) { - finish_wait(&ctx->wait, &iowq.wq); - continue; + data->nr = nr; + data->ctx = ctx; + data->do_put = do_put; + if (utags) { + ret = -EFAULT; + for (i = 0; i < nr; i++) { + u64 *tag_slot = io_get_tag_slot(data, i); + + if (copy_from_user(tag_slot, &utags[i], + sizeof(*tag_slot))) + goto fail; } - schedule(); - } while (1); - finish_wait(&ctx->wait, &iowq.wq); + } - restore_saved_sigmask_unless(ret == -EINTR); + atomic_set(&data->refs, 1); + init_completion(&data->done); + *pdata = data; + return 0; +fail: + io_rsrc_data_free(data); + return ret; +} - return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; +static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files) +{ + table->files = kvcalloc(nr_files, sizeof(table->files[0]), + GFP_KERNEL_ACCOUNT); + return !!table->files; +} + +static void io_free_file_tables(struct io_file_table *table) +{ + kvfree(table->files); + table->files = NULL; } static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) @@ -7085,92 +7906,97 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) fput(file); } #endif + io_free_file_tables(&ctx->file_table); + io_rsrc_data_free(ctx->file_data); + ctx->file_data = NULL; + ctx->nr_user_files = 0; } -static void io_file_ref_kill(struct percpu_ref *ref) +static int io_sqe_files_unregister(struct io_ring_ctx *ctx) { - struct fixed_file_data *data; + unsigned nr = ctx->nr_user_files; + int ret; - data = container_of(ref, struct fixed_file_data, refs); - complete(&data->done); -} + if (!ctx->file_data) + return -ENXIO; -static void io_sqe_files_set_node(struct fixed_file_data *file_data, - struct fixed_file_ref_node *ref_node) -{ - spin_lock_bh(&file_data->lock); - file_data->node = ref_node; - list_add_tail(&ref_node->node, &file_data->ref_list); - spin_unlock_bh(&file_data->lock); - percpu_ref_get(&file_data->refs); + /* + * Quiesce may unlock ->uring_lock, and while it's not held + * prevent new requests using the table. + */ + ctx->nr_user_files = 0; + ret = io_rsrc_ref_quiesce(ctx->file_data, ctx); + ctx->nr_user_files = nr; + if (!ret) + __io_sqe_files_unregister(ctx); + return ret; } -static int io_sqe_files_unregister(struct io_ring_ctx *ctx) +static void io_sq_thread_unpark(struct io_sq_data *sqd) + __releases(&sqd->lock) { - struct fixed_file_data *data = ctx->file_data; - struct fixed_file_ref_node *backup_node, *ref_node = NULL; - unsigned nr_tables, i; - int ret; + WARN_ON_ONCE(sqd->thread == current); - if (!data) - return -ENXIO; - backup_node = alloc_fixed_file_ref_node(ctx); - if (!backup_node) - return -ENOMEM; + /* + * Do the dance but not conditional clear_bit() because it'd race with + * other threads incrementing park_pending and setting the bit. + */ + clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); + if (atomic_dec_return(&sqd->park_pending)) + set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); + mutex_unlock(&sqd->lock); +} - spin_lock_bh(&data->lock); - ref_node = data->node; - spin_unlock_bh(&data->lock); - if (ref_node) - percpu_ref_kill(&ref_node->refs); +static void io_sq_thread_park(struct io_sq_data *sqd) + __acquires(&sqd->lock) +{ + WARN_ON_ONCE(sqd->thread == current); - percpu_ref_kill(&data->refs); + atomic_inc(&sqd->park_pending); + set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); + mutex_lock(&sqd->lock); + if (sqd->thread) + wake_up_process(sqd->thread); +} - /* wait for all refs nodes to complete */ - flush_delayed_work(&ctx->file_put_work); - do { - ret = wait_for_completion_interruptible(&data->done); - if (!ret) - break; - ret = io_run_task_work_sig(); - if (ret < 0) { - percpu_ref_resurrect(&data->refs); - reinit_completion(&data->done); - io_sqe_files_set_node(data, backup_node); - return ret; - } - } while (1); +static void io_sq_thread_stop(struct io_sq_data *sqd) +{ + WARN_ON_ONCE(sqd->thread == current); + WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); - __io_sqe_files_unregister(ctx); - nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE); - for (i = 0; i < nr_tables; i++) - kfree(data->table[i].files); - kfree(data->table); - percpu_ref_exit(&data->refs); - kfree(data); - ctx->file_data = NULL; - ctx->nr_user_files = 0; - destroy_fixed_file_ref_node(backup_node); - return 0; + set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); + mutex_lock(&sqd->lock); + if (sqd->thread) + wake_up_process(sqd->thread); + mutex_unlock(&sqd->lock); + wait_for_completion(&sqd->exited); } static void io_put_sq_data(struct io_sq_data *sqd) { if (refcount_dec_and_test(&sqd->refs)) { - /* - * The park is a bit of a work-around, without it we get - * warning spews on shutdown with SQPOLL set and affinity - * set to a single CPU. - */ - if (sqd->thread) { - kthread_park(sqd->thread); - kthread_stop(sqd->thread); - } + WARN_ON_ONCE(atomic_read(&sqd->park_pending)); + io_sq_thread_stop(sqd); kfree(sqd); } } +static void io_sq_thread_finish(struct io_ring_ctx *ctx) +{ + struct io_sq_data *sqd = ctx->sq_data; + + if (sqd) { + io_sq_thread_park(sqd); + list_del_init(&ctx->sqd_list); + io_sqd_update_thread_idle(sqd); + io_sq_thread_unpark(sqd); + + io_put_sq_data(sqd); + ctx->sq_data = NULL; + } +} + static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) { struct io_ring_ctx *ctx_attach; @@ -7191,92 +8017,46 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) fdput(f); return ERR_PTR(-EINVAL); } + if (sqd->task_tgid != current->tgid) { + fdput(f); + return ERR_PTR(-EPERM); + } refcount_inc(&sqd->refs); fdput(f); return sqd; } -static struct io_sq_data *io_get_sq_data(struct io_uring_params *p) +static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, + bool *attached) { struct io_sq_data *sqd; - if (p->flags & IORING_SETUP_ATTACH_WQ) - return io_attach_sq_data(p); + *attached = false; + if (p->flags & IORING_SETUP_ATTACH_WQ) { + sqd = io_attach_sq_data(p); + if (!IS_ERR(sqd)) { + *attached = true; + return sqd; + } + /* fall through for EPERM case, setup new sqd/task */ + if (PTR_ERR(sqd) != -EPERM) + return sqd; + } sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); if (!sqd) return ERR_PTR(-ENOMEM); + atomic_set(&sqd->park_pending, 0); refcount_set(&sqd->refs, 1); INIT_LIST_HEAD(&sqd->ctx_list); - INIT_LIST_HEAD(&sqd->ctx_new_list); - mutex_init(&sqd->ctx_lock); mutex_init(&sqd->lock); init_waitqueue_head(&sqd->wait); + init_completion(&sqd->exited); return sqd; } -static void io_sq_thread_unpark(struct io_sq_data *sqd) - __releases(&sqd->lock) -{ - if (!sqd->thread) - return; - kthread_unpark(sqd->thread); - mutex_unlock(&sqd->lock); -} - -static void io_sq_thread_park(struct io_sq_data *sqd) - __acquires(&sqd->lock) -{ - if (!sqd->thread) - return; - mutex_lock(&sqd->lock); - kthread_park(sqd->thread); -} - -static void io_sq_thread_stop(struct io_ring_ctx *ctx) -{ - struct io_sq_data *sqd = ctx->sq_data; - - if (sqd) { - if (sqd->thread) { - /* - * We may arrive here from the error branch in - * io_sq_offload_create() where the kthread is created - * without being waked up, thus wake it up now to make - * sure the wait will complete. - */ - wake_up_process(sqd->thread); - wait_for_completion(&ctx->sq_thread_comp); - - io_sq_thread_park(sqd); - } - - mutex_lock(&sqd->ctx_lock); - list_del(&ctx->sqd_list); - mutex_unlock(&sqd->ctx_lock); - - if (sqd->thread) { - finish_wait(&sqd->wait, &ctx->sqo_wait_entry); - io_sq_thread_unpark(sqd); - } - - io_put_sq_data(sqd); - ctx->sq_data = NULL; - } -} - -static void io_finish_async(struct io_ring_ctx *ctx) -{ - io_sq_thread_stop(ctx); - - if (ctx->io_wq) { - io_wq_destroy(ctx->io_wq); - ctx->io_wq = NULL; - } -} - #if defined(CONFIG_UNIX) /* * Ensure the UNIX gc is aware of our file set, so we are certain that @@ -7301,9 +8081,10 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) } skb->sk = sk; + skb->scm_io_uring = 1; nr_files = 0; - fpl->user = get_uid(ctx->user); + fpl->user = get_uid(current_user()); for (i = 0; i < nr; i++) { struct file *file = io_file_from_index(ctx, i + offset); @@ -7379,35 +8160,9 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx) } #endif -static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data, - unsigned nr_tables, unsigned nr_files) -{ - int i; - - for (i = 0; i < nr_tables; i++) { - struct fixed_file_table *table = &file_data->table[i]; - unsigned this_files; - - this_files = min(nr_files, IORING_MAX_FILES_TABLE); - table->files = kcalloc(this_files, sizeof(struct file *), - GFP_KERNEL_ACCOUNT); - if (!table->files) - break; - nr_files -= this_files; - } - - if (i == nr_tables) - return 0; - - for (i = 0; i < nr_tables; i++) { - struct fixed_file_table *table = &file_data->table[i]; - kfree(table->files); - } - return 1; -} - -static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file) +static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) { + struct file *file = prsrc->file; #if defined(CONFIG_UNIX) struct sock *sock = ctx->ring_sock->sk; struct sk_buff_head list, *head = &sock->sk_receive_queue; @@ -7468,117 +8223,61 @@ static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file) #endif } -struct io_file_put { - struct list_head list; - struct file *file; -}; - -static void __io_file_put_work(struct fixed_file_ref_node *ref_node) +static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) { - struct fixed_file_data *file_data = ref_node->file_data; - struct io_ring_ctx *ctx = file_data->ctx; - struct io_file_put *pfile, *tmp; + struct io_rsrc_data *rsrc_data = ref_node->rsrc_data; + struct io_ring_ctx *ctx = rsrc_data->ctx; + struct io_rsrc_put *prsrc, *tmp; - list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) { - list_del(&pfile->list); - io_ring_file_put(ctx, pfile->file); - kfree(pfile); + list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) { + list_del(&prsrc->list); + + if (prsrc->tag) { + bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL; + + io_ring_submit_lock(ctx, lock_ring); + spin_lock(&ctx->completion_lock); + io_fill_cqe_aux(ctx, prsrc->tag, 0, 0); + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + io_cqring_ev_posted(ctx); + io_ring_submit_unlock(ctx, lock_ring); + } + + rsrc_data->do_put(ctx, prsrc); + kfree(prsrc); } - percpu_ref_exit(&ref_node->refs); - kfree(ref_node); - percpu_ref_put(&file_data->refs); + io_rsrc_node_destroy(ref_node); + if (atomic_dec_and_test(&rsrc_data->refs)) + complete(&rsrc_data->done); } -static void io_file_put_work(struct work_struct *work) +static void io_rsrc_put_work(struct work_struct *work) { struct io_ring_ctx *ctx; struct llist_node *node; - ctx = container_of(work, struct io_ring_ctx, file_put_work.work); - node = llist_del_all(&ctx->file_put_llist); + ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work); + node = llist_del_all(&ctx->rsrc_put_llist); while (node) { - struct fixed_file_ref_node *ref_node; + struct io_rsrc_node *ref_node; struct llist_node *next = node->next; - ref_node = llist_entry(node, struct fixed_file_ref_node, llist); - __io_file_put_work(ref_node); + ref_node = llist_entry(node, struct io_rsrc_node, llist); + __io_rsrc_put_work(ref_node); node = next; } } -static void io_file_data_ref_zero(struct percpu_ref *ref) -{ - struct fixed_file_ref_node *ref_node; - struct fixed_file_data *data; - struct io_ring_ctx *ctx; - bool first_add = false; - int delay = HZ; - - ref_node = container_of(ref, struct fixed_file_ref_node, refs); - data = ref_node->file_data; - ctx = data->ctx; - - spin_lock_bh(&data->lock); - ref_node->done = true; - - while (!list_empty(&data->ref_list)) { - ref_node = list_first_entry(&data->ref_list, - struct fixed_file_ref_node, node); - /* recycle ref nodes in order */ - if (!ref_node->done) - break; - list_del(&ref_node->node); - first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist); - } - spin_unlock_bh(&data->lock); - - if (percpu_ref_is_dying(&data->refs)) - delay = 0; - - if (!delay) - mod_delayed_work(system_wq, &ctx->file_put_work, 0); - else if (first_add) - queue_delayed_work(system_wq, &ctx->file_put_work, delay); -} - -static struct fixed_file_ref_node *alloc_fixed_file_ref_node( - struct io_ring_ctx *ctx) -{ - struct fixed_file_ref_node *ref_node; - - ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); - if (!ref_node) - return NULL; - - if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero, - 0, GFP_KERNEL)) { - kfree(ref_node); - return NULL; - } - INIT_LIST_HEAD(&ref_node->node); - INIT_LIST_HEAD(&ref_node->file_list); - ref_node->file_data = ctx->file_data; - ref_node->done = false; - return ref_node; -} - -static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node) -{ - percpu_ref_exit(&ref_node->refs); - kfree(ref_node); -} - static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, - unsigned nr_args) + unsigned nr_args, u64 __user *tags) { __s32 __user *fds = (__s32 __user *) arg; - unsigned nr_tables, i; struct file *file; - int fd, ret = -ENOMEM; - struct fixed_file_ref_node *ref_node; - struct fixed_file_data *file_data; + int fd, ret; + unsigned i; if (ctx->file_data) return -EBUSY; @@ -7588,44 +8287,34 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, return -EMFILE; if (nr_args > rlimit(RLIMIT_NOFILE)) return -EMFILE; + ret = io_rsrc_node_switch_start(ctx); + if (ret) + return ret; + ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args, + &ctx->file_data); + if (ret) + return ret; - file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL_ACCOUNT); - if (!file_data) - return -ENOMEM; - file_data->ctx = ctx; - init_completion(&file_data->done); - INIT_LIST_HEAD(&file_data->ref_list); - spin_lock_init(&file_data->lock); - - nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE); - file_data->table = kcalloc(nr_tables, sizeof(*file_data->table), - GFP_KERNEL_ACCOUNT); - if (!file_data->table) - goto out_free; - - if (percpu_ref_init(&file_data->refs, io_file_ref_kill, - PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) + ret = -ENOMEM; + if (!io_alloc_file_tables(&ctx->file_table, nr_args)) goto out_free; - if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args)) - goto out_ref; - ctx->file_data = file_data; - for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { - struct fixed_file_table *table; - unsigned index; - if (copy_from_user(&fd, &fds[i], sizeof(fd))) { ret = -EFAULT; goto out_fput; } /* allow sparse sets */ - if (fd == -1) + if (fd == -1) { + ret = -EINVAL; + if (unlikely(*io_get_tag_slot(ctx->file_data, i))) + goto out_fput; continue; + } file = fget(fd); ret = -EBADF; - if (!file) + if (unlikely(!file)) goto out_fput; /* @@ -7639,24 +8328,16 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, fput(file); goto out_fput; } - table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT]; - index = i & IORING_FILE_TABLE_MASK; - table->files[index] = file; + io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file); } ret = io_sqe_files_scm(ctx); if (ret) { - io_sqe_files_unregister(ctx); + __io_sqe_files_unregister(ctx); return ret; } - ref_node = alloc_fixed_file_ref_node(ctx); - if (!ref_node) { - io_sqe_files_unregister(ctx); - return -ENOMEM; - } - - io_sqe_files_set_node(file_data, ref_node); + io_rsrc_node_switch(ctx, NULL); return ret; out_fput: for (i = 0; i < ctx->nr_user_files; i++) { @@ -7664,14 +8345,10 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, if (file) fput(file); } - for (i = 0; i < nr_tables; i++) - kfree(file_data->table[i].files); + io_free_file_tables(&ctx->file_table); ctx->nr_user_files = 0; -out_ref: - percpu_ref_exit(&file_data->refs); out_free: - kfree(file_data->table); - kfree(file_data); + io_rsrc_data_free(ctx->file_data); ctx->file_data = NULL; return ret; } @@ -7719,63 +8396,159 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file, #endif } -static int io_queue_file_removal(struct fixed_file_data *data, - struct file *file) +static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, + struct io_rsrc_node *node, void *rsrc) +{ + u64 *tag_slot = io_get_tag_slot(data, idx); + struct io_rsrc_put *prsrc; + + prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL); + if (!prsrc) + return -ENOMEM; + + prsrc->tag = *tag_slot; + *tag_slot = 0; + prsrc->rsrc = rsrc; + list_add(&prsrc->list, &node->rsrc_list); + return 0; +} + +static int io_install_fixed_file(struct io_kiocb *req, struct file *file, + unsigned int issue_flags, u32 slot_index) +{ + struct io_ring_ctx *ctx = req->ctx; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_switch = false; + struct io_fixed_file *file_slot; + int ret = -EBADF; + + io_ring_submit_lock(ctx, !force_nonblock); + if (file->f_op == &io_uring_fops) + goto err; + ret = -ENXIO; + if (!ctx->file_data) + goto err; + ret = -EINVAL; + if (slot_index >= ctx->nr_user_files) + goto err; + + slot_index = array_index_nospec(slot_index, ctx->nr_user_files); + file_slot = io_fixed_file_slot(&ctx->file_table, slot_index); + + if (file_slot->file_ptr) { + struct file *old_file; + + ret = io_rsrc_node_switch_start(ctx); + if (ret) + goto err; + + old_file = (struct file *)(file_slot->file_ptr & FFS_MASK); + ret = io_queue_rsrc_removal(ctx->file_data, slot_index, + ctx->rsrc_node, old_file); + if (ret) + goto err; + file_slot->file_ptr = 0; + needs_switch = true; + } + + *io_get_tag_slot(ctx->file_data, slot_index) = 0; + io_fixed_file_set(file_slot, file); + ret = io_sqe_file_register(ctx, file, slot_index); + if (ret) { + file_slot->file_ptr = 0; + goto err; + } + + ret = 0; +err: + if (needs_switch) + io_rsrc_node_switch(ctx, ctx->file_data); + io_ring_submit_unlock(ctx, !force_nonblock); + if (ret) + fput(file); + return ret; +} + +static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) { - struct io_file_put *pfile; - struct fixed_file_ref_node *ref_node = data->node; + unsigned int offset = req->close.file_slot - 1; + struct io_ring_ctx *ctx = req->ctx; + struct io_fixed_file *file_slot; + struct file *file; + int ret; - pfile = kzalloc(sizeof(*pfile), GFP_KERNEL); - if (!pfile) - return -ENOMEM; + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + ret = -ENXIO; + if (unlikely(!ctx->file_data)) + goto out; + ret = -EINVAL; + if (offset >= ctx->nr_user_files) + goto out; + ret = io_rsrc_node_switch_start(ctx); + if (ret) + goto out; + + offset = array_index_nospec(offset, ctx->nr_user_files); + file_slot = io_fixed_file_slot(&ctx->file_table, offset); + ret = -EBADF; + if (!file_slot->file_ptr) + goto out; - pfile->file = file; - list_add(&pfile->list, &ref_node->file_list); + file = (struct file *)(file_slot->file_ptr & FFS_MASK); + ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file); + if (ret) + goto out; - return 0; + file_slot->file_ptr = 0; + io_rsrc_node_switch(ctx, ctx->file_data); + ret = 0; +out: + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + return ret; } static int __io_sqe_files_update(struct io_ring_ctx *ctx, - struct io_uring_files_update *up, + struct io_uring_rsrc_update2 *up, unsigned nr_args) { - struct fixed_file_data *data = ctx->file_data; - struct fixed_file_ref_node *ref_node; + u64 __user *tags = u64_to_user_ptr(up->tags); + __s32 __user *fds = u64_to_user_ptr(up->data); + struct io_rsrc_data *data = ctx->file_data; + struct io_fixed_file *file_slot; struct file *file; - __s32 __user *fds; - int fd, i, err; - __u32 done; + int fd, i, err = 0; + unsigned int done; bool needs_switch = false; - if (check_add_overflow(up->offset, nr_args, &done)) - return -EOVERFLOW; - if (done > ctx->nr_user_files) + if (!ctx->file_data) + return -ENXIO; + if (up->offset + nr_args > ctx->nr_user_files) return -EINVAL; - ref_node = alloc_fixed_file_ref_node(ctx); - if (!ref_node) - return -ENOMEM; - - done = 0; - fds = u64_to_user_ptr(up->fds); - while (nr_args) { - struct fixed_file_table *table; - unsigned index; + for (done = 0; done < nr_args; done++) { + u64 tag = 0; - err = 0; - if (copy_from_user(&fd, &fds[done], sizeof(fd))) { + if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) || + copy_from_user(&fd, &fds[done], sizeof(fd))) { err = -EFAULT; break; } - i = array_index_nospec(up->offset, ctx->nr_user_files); - table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; - index = i & IORING_FILE_TABLE_MASK; - if (table->files[index]) { - file = table->files[index]; - err = io_queue_file_removal(data, file); + if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) { + err = -EINVAL; + break; + } + if (fd == IORING_REGISTER_FILES_SKIP) + continue; + + i = array_index_nospec(up->offset + done, ctx->nr_user_files); + file_slot = io_fixed_file_slot(&ctx->file_table, i); + + if (file_slot->file_ptr) { + file = (struct file *)(file_slot->file_ptr & FFS_MASK); + err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file); if (err) break; - table->files[index] = NULL; + file_slot->file_ptr = 0; needs_switch = true; } if (fd != -1) { @@ -7797,106 +8570,61 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, err = -EBADF; break; } - table->files[index] = file; + *io_get_tag_slot(data, i) = tag; + io_fixed_file_set(file_slot, file); err = io_sqe_file_register(ctx, file, i); if (err) { - table->files[index] = NULL; + file_slot->file_ptr = 0; fput(file); break; } } - nr_args--; - done++; - up->offset++; } - if (needs_switch) { - percpu_ref_kill(&data->node->refs); - io_sqe_files_set_node(data, ref_node); - } else - destroy_fixed_file_ref_node(ref_node); - + if (needs_switch) + io_rsrc_node_switch(ctx, data); return done ? done : err; } -static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg, - unsigned nr_args) -{ - struct io_uring_files_update up; - - if (!ctx->file_data) - return -ENXIO; - if (!nr_args) - return -EINVAL; - if (copy_from_user(&up, arg, sizeof(up))) - return -EFAULT; - if (up.resv) - return -EINVAL; - - return __io_sqe_files_update(ctx, &up, nr_args); -} - -static void io_free_work(struct io_wq_work *work) -{ - struct io_kiocb *req = container_of(work, struct io_kiocb, work); - - /* Consider that io_steal_work() relies on this ref */ - io_put_req(req); -} - -static int io_init_wq_offload(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, + struct task_struct *task) { + struct io_wq_hash *hash; struct io_wq_data data; - struct fd f; - struct io_ring_ctx *ctx_attach; unsigned int concurrency; - int ret = 0; - - data.user = ctx->user; - data.free_work = io_free_work; - data.do_work = io_wq_submit_work; - if (!(p->flags & IORING_SETUP_ATTACH_WQ)) { - /* Do QD, or 4 * CPUS, whatever is smallest */ - concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - - ctx->io_wq = io_wq_create(concurrency, &data); - if (IS_ERR(ctx->io_wq)) { - ret = PTR_ERR(ctx->io_wq); - ctx->io_wq = NULL; + mutex_lock(&ctx->uring_lock); + hash = ctx->hash_map; + if (!hash) { + hash = kzalloc(sizeof(*hash), GFP_KERNEL); + if (!hash) { + mutex_unlock(&ctx->uring_lock); + return ERR_PTR(-ENOMEM); } - return ret; + refcount_set(&hash->refs, 1); + init_waitqueue_head(&hash->wait); + ctx->hash_map = hash; } + mutex_unlock(&ctx->uring_lock); - f = fdget(p->wq_fd); - if (!f.file) - return -EBADF; - - if (f.file->f_op != &io_uring_fops) { - ret = -EINVAL; - goto out_fput; - } + data.hash = hash; + data.task = task; + data.free_work = io_wq_free_work; + data.do_work = io_wq_submit_work; - ctx_attach = f.file->private_data; - /* @io_wq is protected by holding the fd */ - if (!io_wq_get(ctx_attach->io_wq, &data)) { - ret = -EINVAL; - goto out_fput; - } + /* Do QD, or 4 * CPUS, whatever is smallest */ + concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - ctx->io_wq = ctx_attach->io_wq; -out_fput: - fdput(f); - return ret; + return io_wq_create(concurrency, &data); } -static int io_uring_alloc_task_context(struct task_struct *task) +static int io_uring_alloc_task_context(struct task_struct *task, + struct io_ring_ctx *ctx) { struct io_uring_task *tctx; int ret; - tctx = kmalloc(sizeof(*tctx), GFP_KERNEL); + tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); if (unlikely(!tctx)) return -ENOMEM; @@ -7906,14 +8634,22 @@ static int io_uring_alloc_task_context(struct task_struct *task) return ret; } + tctx->io_wq = io_init_wq_offload(ctx, task); + if (IS_ERR(tctx->io_wq)) { + ret = PTR_ERR(tctx->io_wq); + percpu_counter_destroy(&tctx->inflight); + kfree(tctx); + return ret; + } + xa_init(&tctx->xa); init_waitqueue_head(&tctx->wait); - tctx->last = NULL; atomic_set(&tctx->in_idle, 0); - tctx->sqpoll = false; - io_init_identity(&tctx->__identity); - tctx->identity = &tctx->__identity; + atomic_set(&tctx->inflight_tracked, 0); task->io_uring = tctx; + spin_lock_init(&tctx->task_lock); + INIT_WQ_LIST(&tctx->task_list); + init_task_work(&tctx->task_work, tctx_task_work); return 0; } @@ -7922,9 +8658,9 @@ void __io_uring_free(struct task_struct *tsk) struct io_uring_task *tctx = tsk->io_uring; WARN_ON_ONCE(!xa_empty(&tctx->xa)); - WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1); - if (tctx->identity != &tctx->__identity) - kfree(tctx->identity); + WARN_ON_ONCE(tctx->io_wq); + WARN_ON_ONCE(tctx->cached_refs); + percpu_counter_destroy(&tctx->inflight); kfree(tctx); tsk->io_uring = NULL; @@ -7935,54 +8671,71 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, { int ret; + /* Retain compatibility with failing for an invalid attach attempt */ + if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == + IORING_SETUP_ATTACH_WQ) { + struct fd f; + + f = fdget(p->wq_fd); + if (!f.file) + return -ENXIO; + if (f.file->f_op != &io_uring_fops) { + fdput(f); + return -EINVAL; + } + fdput(f); + } if (ctx->flags & IORING_SETUP_SQPOLL) { + struct task_struct *tsk; struct io_sq_data *sqd; + bool attached; - ret = -EPERM; - if (!capable(CAP_SYS_ADMIN)) - goto err; - - sqd = io_get_sq_data(p); + sqd = io_get_sq_data(p, &attached); if (IS_ERR(sqd)) { ret = PTR_ERR(sqd); goto err; } + ctx->sq_creds = get_current_cred(); ctx->sq_data = sqd; - io_sq_thread_park(sqd); - mutex_lock(&sqd->ctx_lock); - list_add(&ctx->sqd_list, &sqd->ctx_new_list); - mutex_unlock(&sqd->ctx_lock); - io_sq_thread_unpark(sqd); - ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); if (!ctx->sq_thread_idle) ctx->sq_thread_idle = HZ; - if (sqd->thread) - goto done; + io_sq_thread_park(sqd); + list_add(&ctx->sqd_list, &sqd->ctx_list); + io_sqd_update_thread_idle(sqd); + /* don't attach to a dying SQPOLL thread, would be racy */ + ret = (attached && !sqd->thread) ? -ENXIO : 0; + io_sq_thread_unpark(sqd); + + if (ret < 0) + goto err; + if (attached) + return 0; if (p->flags & IORING_SETUP_SQ_AFF) { int cpu = p->sq_thread_cpu; ret = -EINVAL; - if (cpu >= nr_cpu_ids) - goto err; - if (!cpu_online(cpu)) - goto err; - - sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd, - cpu, "io_uring-sq"); + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) + goto err_sqpoll; + sqd->sq_cpu = cpu; } else { - sqd->thread = kthread_create(io_sq_thread, sqd, - "io_uring-sq"); + sqd->sq_cpu = -1; } - if (IS_ERR(sqd->thread)) { - ret = PTR_ERR(sqd->thread); - sqd->thread = NULL; - goto err; + + sqd->task_pid = current->pid; + sqd->task_tgid = current->tgid; + tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); + if (IS_ERR(tsk)) { + ret = PTR_ERR(tsk); + goto err_sqpoll; } - ret = io_uring_alloc_task_context(sqd->thread); + + sqd->thread = tsk; + ret = io_uring_alloc_task_context(tsk, ctx); + wake_up_new_task(tsk); if (ret) goto err; } else if (p->flags & IORING_SETUP_SQ_AFF) { @@ -7991,26 +8744,14 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, goto err; } -done: - ret = io_init_wq_offload(ctx, p); - if (ret) - goto err; - return 0; +err_sqpoll: + complete(&ctx->sq_data->exited); err: - io_finish_async(ctx); + io_sq_thread_finish(ctx); return ret; } -static void io_sq_offload_start(struct io_ring_ctx *ctx) -{ - struct io_sq_data *sqd = ctx->sq_data; - - ctx->flags &= ~IORING_SETUP_R_DISABLED; - if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd && sqd->thread) - wake_up_process(sqd->thread); -} - static inline void __io_unaccount_mem(struct user_struct *user, unsigned long nr_pages) { @@ -8036,37 +8777,27 @@ static inline int __io_account_mem(struct user_struct *user, return 0; } -static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, - enum io_mem_account acct) +static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) { - if (ctx->limit_mem) + if (ctx->user) __io_unaccount_mem(ctx->user, nr_pages); - if (ctx->mm_account) { - if (acct == ACCT_LOCKED) - ctx->mm_account->locked_vm -= nr_pages; - else if (acct == ACCT_PINNED) - atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); - } + if (ctx->mm_account) + atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); } -static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, - enum io_mem_account acct) +static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) { int ret; - if (ctx->limit_mem) { + if (ctx->user) { ret = __io_account_mem(ctx->user, nr_pages); if (ret) return ret; } - if (ctx->mm_account) { - if (acct == ACCT_LOCKED) - ctx->mm_account->locked_vm += nr_pages; - else if (acct == ACCT_PINNED) - atomic64_add(nr_pages, &ctx->mm_account->pinned_vm); - } + if (ctx->mm_account) + atomic64_add(nr_pages, &ctx->mm_account->pinned_vm); return 0; } @@ -8085,10 +8816,9 @@ static void io_mem_free(void *ptr) static void *io_mem_alloc(size_t size) { - gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP | - __GFP_NORETRY; + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; - return (void *) __get_free_pages(gfp_flags, get_order(size)); + return (void *) __get_free_pages(gfp, get_order(size)); } static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, @@ -8120,41 +8850,58 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, return off; } -static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries) +static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot) { - size_t pages; - - pages = (size_t)1 << get_order( - rings_size(sq_entries, cq_entries, NULL)); - pages += (size_t)1 << get_order( - array_size(sizeof(struct io_uring_sqe), sq_entries)); + struct io_mapped_ubuf *imu = *slot; + unsigned int i; - return pages; + if (imu != ctx->dummy_ubuf) { + for (i = 0; i < imu->nr_bvecs; i++) + unpin_user_page(imu->bvec[i].bv_page); + if (imu->acct_pages) + io_unaccount_mem(ctx, imu->acct_pages); + kvfree(imu); + } + *slot = NULL; } -static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx) +static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) { - int i, j; + io_buffer_unmap(ctx, &prsrc->buf); + prsrc->buf = NULL; +} - if (!ctx->user_bufs) - return -ENXIO; +static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx) +{ + unsigned int i; - for (i = 0; i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; + for (i = 0; i < ctx->nr_user_bufs; i++) + io_buffer_unmap(ctx, &ctx->user_bufs[i]); + kfree(ctx->user_bufs); + io_rsrc_data_free(ctx->buf_data); + ctx->user_bufs = NULL; + ctx->buf_data = NULL; + ctx->nr_user_bufs = 0; +} - for (j = 0; j < imu->nr_bvecs; j++) - unpin_user_page(imu->bvec[j].bv_page); +static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) +{ + unsigned nr = ctx->nr_user_bufs; + int ret; - if (imu->acct_pages) - io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED); - kvfree(imu->bvec); - imu->nr_bvecs = 0; - } + if (!ctx->buf_data) + return -ENXIO; - kfree(ctx->user_bufs); - ctx->user_bufs = NULL; + /* + * Quiesce may unlock ->uring_lock, and while it's not held + * prevent new requests using the table. + */ ctx->nr_user_bufs = 0; - return 0; + ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx); + ctx->nr_user_bufs = nr; + if (!ret) + __io_sqe_buffers_unregister(ctx); + return ret; } static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, @@ -8206,7 +8953,7 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, /* check previously registered pages */ for (i = 0; i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; + struct io_mapped_ubuf *imu = ctx->user_bufs[i]; for (j = 0; j < imu->nr_bvecs; j++) { if (!PageCompound(imu->bvec[j].bv_page)) @@ -8225,6 +8972,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, { int i, ret; + imu->acct_pages = 0; for (i = 0; i < nr_pages; i++) { if (!PageCompound(pages[i])) { imu->acct_pages++; @@ -8244,147 +8992,252 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, if (!imu->acct_pages) return 0; - ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED); + ret = io_account_mem(ctx, imu->acct_pages); if (ret) imu->acct_pages = 0; return ret; } -static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, - unsigned nr_args) +static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, + struct io_mapped_ubuf **pimu, + struct page **last_hpage) { + struct io_mapped_ubuf *imu = NULL; struct vm_area_struct **vmas = NULL; struct page **pages = NULL; - struct page *last_hpage = NULL; - int i, j, got_pages = 0; - int ret = -EINVAL; + unsigned long off, start, end, ubuf; + size_t size; + int ret, pret, nr_pages, i; - if (ctx->user_bufs) - return -EBUSY; - if (!nr_args || nr_args > UIO_MAXIOV) - return -EINVAL; + if (!iov->iov_base) { + *pimu = ctx->dummy_ubuf; + return 0; + } - ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf), - GFP_KERNEL); - if (!ctx->user_bufs) - return -ENOMEM; + ubuf = (unsigned long) iov->iov_base; + end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = ubuf >> PAGE_SHIFT; + nr_pages = end - start; - for (i = 0; i < nr_args; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; - unsigned long off, start, end, ubuf; - int pret, nr_pages; - struct iovec iov; - size_t size; + *pimu = NULL; + ret = -ENOMEM; - ret = io_copy_iov(ctx, &iov, arg, i); - if (ret) - goto err; + pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto done; + + vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *), + GFP_KERNEL); + if (!vmas) + goto done; + + imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); + if (!imu) + goto done; + ret = 0; + mmap_read_lock(current->mm); + pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, + pages, vmas); + if (pret == nr_pages) { + /* don't support file backed memory */ + for (i = 0; i < nr_pages; i++) { + struct vm_area_struct *vma = vmas[i]; + + if (vma_is_shmem(vma)) + continue; + if (vma->vm_file && + !is_file_hugepages(vma->vm_file)) { + ret = -EOPNOTSUPP; + break; + } + } + } else { + ret = pret < 0 ? pret : -EFAULT; + } + mmap_read_unlock(current->mm); + if (ret) { /* - * Don't impose further limits on the size and buffer - * constraints here, we'll -EINVAL later when IO is - * submitted if they are wrong. + * if we did partial map, or found file backed vmas, + * release any pages we did get */ - ret = -EFAULT; - if (!iov.iov_base || !iov.iov_len) - goto err; + if (pret > 0) + unpin_user_pages(pages, pret); + goto done; + } - /* arbitrary limit, but we need something */ - if (iov.iov_len > SZ_1G) - goto err; + ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage); + if (ret) { + unpin_user_pages(pages, pret); + goto done; + } - ubuf = (unsigned long) iov.iov_base; - end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; - start = ubuf >> PAGE_SHIFT; - nr_pages = end - start; + off = ubuf & ~PAGE_MASK; + size = iov->iov_len; + for (i = 0; i < nr_pages; i++) { + size_t vec_len; + + vec_len = min_t(size_t, size, PAGE_SIZE - off); + imu->bvec[i].bv_page = pages[i]; + imu->bvec[i].bv_len = vec_len; + imu->bvec[i].bv_offset = off; + off = 0; + size -= vec_len; + } + /* store original address for later verification */ + imu->ubuf = ubuf; + imu->ubuf_end = ubuf + iov->iov_len; + imu->nr_bvecs = nr_pages; + *pimu = imu; + ret = 0; +done: + if (ret) + kvfree(imu); + kvfree(pages); + kvfree(vmas); + return ret; +} - ret = 0; - if (!pages || nr_pages > got_pages) { - kvfree(vmas); - kvfree(pages); - pages = kvmalloc_array(nr_pages, sizeof(struct page *), - GFP_KERNEL); - vmas = kvmalloc_array(nr_pages, - sizeof(struct vm_area_struct *), - GFP_KERNEL); - if (!pages || !vmas) { - ret = -ENOMEM; - goto err; - } - got_pages = nr_pages; - } +static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args) +{ + ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL); + return ctx->user_bufs ? 0 : -ENOMEM; +} - imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec), - GFP_KERNEL); - ret = -ENOMEM; - if (!imu->bvec) - goto err; +static int io_buffer_validate(struct iovec *iov) +{ + unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1); - ret = 0; - mmap_read_lock(current->mm); - pret = pin_user_pages(ubuf, nr_pages, - FOLL_WRITE | FOLL_LONGTERM, - pages, vmas); - if (pret == nr_pages) { - /* don't support file backed memory */ - for (j = 0; j < nr_pages; j++) { - struct vm_area_struct *vma = vmas[j]; - - if (vma->vm_file && - !is_file_hugepages(vma->vm_file)) { - ret = -EOPNOTSUPP; - break; - } - } - } else { - ret = pret < 0 ? pret : -EFAULT; - } - mmap_read_unlock(current->mm); - if (ret) { - /* - * if we did partial map, or found file backed vmas, - * release any pages we did get - */ - if (pret > 0) - unpin_user_pages(pages, pret); - kvfree(imu->bvec); - goto err; + /* + * Don't impose further limits on the size and buffer + * constraints here, we'll -EINVAL later when IO is + * submitted if they are wrong. + */ + if (!iov->iov_base) + return iov->iov_len ? -EFAULT : 0; + if (!iov->iov_len) + return -EFAULT; + + /* arbitrary limit, but we need something */ + if (iov->iov_len > SZ_1G) + return -EFAULT; + + if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp)) + return -EOVERFLOW; + + return 0; +} + +static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, + unsigned int nr_args, u64 __user *tags) +{ + struct page *last_hpage = NULL; + struct io_rsrc_data *data; + int i, ret; + struct iovec iov; + + if (ctx->user_bufs) + return -EBUSY; + if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS) + return -EINVAL; + ret = io_rsrc_node_switch_start(ctx); + if (ret) + return ret; + ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data); + if (ret) + return ret; + ret = io_buffers_map_alloc(ctx, nr_args); + if (ret) { + io_rsrc_data_free(data); + return ret; + } + + for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) { + ret = io_copy_iov(ctx, &iov, arg, i); + if (ret) + break; + ret = io_buffer_validate(&iov); + if (ret) + break; + if (!iov.iov_base && *io_get_tag_slot(data, i)) { + ret = -EINVAL; + break; } - ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage); - if (ret) { - unpin_user_pages(pages, pret); - kvfree(imu->bvec); - goto err; + ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i], + &last_hpage); + if (ret) + break; + } + + WARN_ON_ONCE(ctx->buf_data); + + ctx->buf_data = data; + if (ret) + __io_sqe_buffers_unregister(ctx); + else + io_rsrc_node_switch(ctx, NULL); + return ret; +} + +static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, + struct io_uring_rsrc_update2 *up, + unsigned int nr_args) +{ + u64 __user *tags = u64_to_user_ptr(up->tags); + struct iovec iov, __user *iovs = u64_to_user_ptr(up->data); + struct page *last_hpage = NULL; + bool needs_switch = false; + __u32 done; + int i, err; + + if (!ctx->buf_data) + return -ENXIO; + if (up->offset + nr_args > ctx->nr_user_bufs) + return -EINVAL; + + for (done = 0; done < nr_args; done++) { + struct io_mapped_ubuf *imu; + int offset = up->offset + done; + u64 tag = 0; + + err = io_copy_iov(ctx, &iov, iovs, done); + if (err) + break; + if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) { + err = -EFAULT; + break; + } + err = io_buffer_validate(&iov); + if (err) + break; + if (!iov.iov_base && tag) { + err = -EINVAL; + break; } + err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage); + if (err) + break; - off = ubuf & ~PAGE_MASK; - size = iov.iov_len; - for (j = 0; j < nr_pages; j++) { - size_t vec_len; - - vec_len = min_t(size_t, size, PAGE_SIZE - off); - imu->bvec[j].bv_page = pages[j]; - imu->bvec[j].bv_len = vec_len; - imu->bvec[j].bv_offset = off; - off = 0; - size -= vec_len; + i = array_index_nospec(offset, ctx->nr_user_bufs); + if (ctx->user_bufs[i] != ctx->dummy_ubuf) { + err = io_queue_rsrc_removal(ctx->buf_data, i, + ctx->rsrc_node, ctx->user_bufs[i]); + if (unlikely(err)) { + io_buffer_unmap(ctx, &imu); + break; + } + ctx->user_bufs[i] = NULL; + needs_switch = true; } - /* store original address for later verification */ - imu->ubuf = ubuf; - imu->len = iov.iov_len; - imu->nr_bvecs = nr_pages; - ctx->nr_user_bufs++; + ctx->user_bufs[i] = imu; + *io_get_tag_slot(ctx->buf_data, offset) = tag; } - kvfree(pages); - kvfree(vmas); - return 0; -err: - kvfree(pages); - kvfree(vmas); - io_sqe_buffer_unregister(ctx); - return ret; + + if (needs_switch) + io_rsrc_node_switch(ctx, ctx->buf_data); + return done ? done : err; } static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg) @@ -8401,6 +9254,7 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg) ctx->cq_ev_fd = eventfd_ctx_fdget(fd); if (IS_ERR(ctx->cq_ev_fd)) { int ret = PTR_ERR(ctx->cq_ev_fd); + ctx->cq_ev_fd = NULL; return ret; } @@ -8428,26 +9282,68 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) __io_remove_buffers(ctx, buf, index, -1U); } -static void io_ring_ctx_free(struct io_ring_ctx *ctx) +static void io_req_cache_free(struct list_head *list) { - io_finish_async(ctx); - io_sqe_buffer_unregister(ctx); + struct io_kiocb *req, *nxt; - if (ctx->sqo_task) { - put_task_struct(ctx->sqo_task); - ctx->sqo_task = NULL; - mmdrop(ctx->mm_account); - ctx->mm_account = NULL; + list_for_each_entry_safe(req, nxt, list, inflight_entry) { + list_del(&req->inflight_entry); + kmem_cache_free(req_cachep, req); } +} -#ifdef CONFIG_BLK_CGROUP - if (ctx->sqo_blkcg_css) - css_put(ctx->sqo_blkcg_css); -#endif +static void io_req_caches_free(struct io_ring_ctx *ctx) +{ + struct io_submit_state *state = &ctx->submit_state; + + mutex_lock(&ctx->uring_lock); + + if (state->free_reqs) { + kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); + state->free_reqs = 0; + } + + io_flush_cached_locked_reqs(ctx, state); + io_req_cache_free(&state->free_list); + mutex_unlock(&ctx->uring_lock); +} + +static void io_wait_rsrc_data(struct io_rsrc_data *data) +{ + if (data && !atomic_dec_and_test(&data->refs)) + wait_for_completion(&data->done); +} + +static void io_ring_ctx_free(struct io_ring_ctx *ctx) +{ + io_sq_thread_finish(ctx); + + /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ + io_wait_rsrc_data(ctx->buf_data); + io_wait_rsrc_data(ctx->file_data); - io_sqe_files_unregister(ctx); + mutex_lock(&ctx->uring_lock); + if (ctx->buf_data) + __io_sqe_buffers_unregister(ctx); + if (ctx->file_data) + __io_sqe_files_unregister(ctx); + if (ctx->rings) + __io_cqring_overflow_flush(ctx, true); + mutex_unlock(&ctx->uring_lock); io_eventfd_unregister(ctx); io_destroy_buffers(ctx); + if (ctx->sq_creds) + put_cred(ctx->sq_creds); + + /* there are no registered resources left, nobody uses it */ + if (ctx->rsrc_node) + io_rsrc_node_destroy(ctx->rsrc_node); + if (ctx->rsrc_backup_node) + io_rsrc_node_destroy(ctx->rsrc_backup_node); + flush_delayed_work(&ctx->rsrc_put_work); + + WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); + WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist)); #if defined(CONFIG_UNIX) if (ctx->ring_sock) { @@ -8455,15 +9351,23 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) sock_release(ctx->ring_sock); } #endif + WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); + + if (ctx->mm_account) { + mmdrop(ctx->mm_account); + ctx->mm_account = NULL; + } io_mem_free(ctx->rings); io_mem_free(ctx->sq_sqes); percpu_ref_exit(&ctx->refs); free_uid(ctx->user); - put_cred(ctx->creds); + io_req_caches_free(ctx); + if (ctx->hash_map) + io_wq_put_hash(ctx->hash_map); kfree(ctx->cancel_hash); - kmem_cache_free(req_cachep, ctx->fallback_req); + kfree(ctx->dummy_ubuf); kfree(ctx); } @@ -8472,7 +9376,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) struct io_ring_ctx *ctx = file->private_data; __poll_t mask = 0; - poll_wait(file, &ctx->cq_wait, wait); + poll_wait(file, &ctx->poll_wait, wait); /* * synchronizes with barrier from wq_has_sleeper call in * io_commit_cqring @@ -8494,38 +9398,63 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) * Users may get EPOLLIN meanwhile seeing nothing in cqring, this * pushs them to do the flush. */ - if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow)) + if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } -static int io_uring_fasync(int fd, struct file *file, int on) -{ - struct io_ring_ctx *ctx = file->private_data; - - return fasync_helper(fd, file, on, &ctx->cq_fasync); -} - static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) { - struct io_identity *iod; + const struct cred *creds; - iod = xa_erase(&ctx->personalities, id); - if (iod) { - put_cred(iod->creds); - if (refcount_dec_and_test(&iod->count)) - kfree(iod); + creds = xa_erase(&ctx->personalities, id); + if (creds) { + put_cred(creds); return 0; } return -EINVAL; } +struct io_tctx_exit { + struct callback_head task_work; + struct completion completion; + struct io_ring_ctx *ctx; +}; + +static void io_tctx_exit_cb(struct callback_head *cb) +{ + struct io_uring_task *tctx = current->io_uring; + struct io_tctx_exit *work; + + work = container_of(cb, struct io_tctx_exit, task_work); + /* + * When @in_idle, we're in cancellation and it's racy to remove the + * node. It'll be removed by the end of cancellation, just ignore it. + * tctx can be NULL if the queueing of this task_work raced with + * work cancelation off the exec path. + */ + if (tctx && !atomic_read(&tctx->in_idle)) + io_uring_del_tctx_node((unsigned long)work->ctx); + complete(&work->completion); +} + +static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + + return req->ctx == data; +} + static void io_ring_exit_work(struct work_struct *work) { - struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, - exit_work); + struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); + unsigned long timeout = jiffies + HZ * 60 * 5; + unsigned long interval = HZ / 20; + struct io_tctx_exit exit; + struct io_tctx_node *node; + int ret; /* * If we're doing polled IO and end up having requests being @@ -8534,53 +9463,104 @@ static void io_ring_exit_work(struct work_struct *work) * as nobody else will be looking for them. */ do { - io_iopoll_try_reap_events(ctx); - } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); + io_uring_try_cancel_requests(ctx, NULL, true); + if (ctx->sq_data) { + struct io_sq_data *sqd = ctx->sq_data; + struct task_struct *tsk; + + io_sq_thread_park(sqd); + tsk = sqd->thread; + if (tsk && tsk->io_uring && tsk->io_uring->io_wq) + io_wq_cancel_cb(tsk->io_uring->io_wq, + io_cancel_ctx_cb, ctx, true); + io_sq_thread_unpark(sqd); + } + + if (WARN_ON_ONCE(time_after(jiffies, timeout))) { + /* there is little hope left, don't run it too often */ + interval = HZ * 60; + } + } while (!wait_for_completion_timeout(&ctx->ref_comp, interval)); + + init_completion(&exit.completion); + init_task_work(&exit.task_work, io_tctx_exit_cb); + exit.ctx = ctx; + /* + * Some may use context even when all refs and requests have been put, + * and they are free to do so while still holding uring_lock or + * completion_lock, see io_req_task_submit(). Apart from other work, + * this lock/unlock section also waits them to finish. + */ + mutex_lock(&ctx->uring_lock); + while (!list_empty(&ctx->tctx_list)) { + WARN_ON_ONCE(time_after(jiffies, timeout)); + + node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, + ctx_node); + /* don't spin on a single task if cancellation failed */ + list_rotate_left(&ctx->tctx_list); + ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); + if (WARN_ON_ONCE(ret)) + continue; + wake_up_process(node->task); + + mutex_unlock(&ctx->uring_lock); + wait_for_completion(&exit.completion); + mutex_lock(&ctx->uring_lock); + } + mutex_unlock(&ctx->uring_lock); + spin_lock(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); + io_ring_ctx_free(ctx); } -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) +/* Returns true if we found and killed one or more timeouts */ +static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, + bool cancel_all) { - struct io_kiocb *req = container_of(work, struct io_kiocb, work); + struct io_kiocb *req, *tmp; + int canceled = 0; - return req->ctx == data; + spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { + if (io_match_task(req, tsk, cancel_all)) { + io_kill_timeout(req, -ECANCELED); + canceled++; + } + } + spin_unlock_irq(&ctx->timeout_lock); + if (canceled != 0) + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + if (canceled != 0) + io_cqring_ev_posted(ctx); + return canceled != 0; } static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { unsigned long index; - struct io_identify *iod; + struct creds *creds; mutex_lock(&ctx->uring_lock); percpu_ref_kill(&ctx->refs); - /* if force is set, the ring is going away. always drop after that */ - - if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead)) - ctx->sqo_dead = 1; - - ctx->cq_overflow_flushed = 1; if (ctx->rings) - __io_cqring_overflow_flush(ctx, true, NULL, NULL); + __io_cqring_overflow_flush(ctx, true); + xa_for_each(&ctx->personalities, index, creds) + io_unregister_personality(ctx, index); mutex_unlock(&ctx->uring_lock); - io_kill_timeouts(ctx, NULL, NULL); - io_poll_remove_all(ctx, NULL, NULL); - - if (ctx->io_wq) - io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true); + io_kill_timeouts(ctx, NULL, true); + io_poll_remove_all(ctx, NULL, true); /* if we failed setting up the ctx, we might not have any rings */ io_iopoll_try_reap_events(ctx); - xa_for_each(&ctx->personalities, index, iod) - io_unregister_personality(ctx, index); - /* - * Do this upfront, so we won't have a grace period where the ring - * is closed but resources aren't reaped yet. This can cause - * spurious failure in setting up a new ring. - */ - io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries), - ACCT_LOCKED); + /* drop cached put refs after potentially doing completions */ + if (current->io_uring) + io_uring_drop_tctx_refs(current); INIT_WORK(&ctx->exit_work, io_ring_exit_work); /* @@ -8603,352 +9583,290 @@ static int io_uring_release(struct inode *inode, struct file *file) struct io_task_cancel { struct task_struct *task; - struct files_struct *files; + bool all; }; static bool io_cancel_task_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_task_cancel *cancel = data; - bool ret; - - if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) { - unsigned long flags; - struct io_ring_ctx *ctx = req->ctx; - /* protect against races with linked timeouts */ - spin_lock_irqsave(&ctx->completion_lock, flags); - ret = io_match_task(req, cancel->task, cancel->files); - spin_unlock_irqrestore(&ctx->completion_lock, flags); - } else { - ret = io_match_task(req, cancel->task, cancel->files); - } - return ret; + return io_match_task_safe(req, cancel->task, cancel->all); } -static void io_cancel_defer_files(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) +static bool io_cancel_defer_files(struct io_ring_ctx *ctx, + struct task_struct *task, bool cancel_all) { - struct io_defer_entry *de = NULL; + struct io_defer_entry *de; LIST_HEAD(list); - spin_lock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_task(de->req, task, files)) { + if (io_match_task_safe(de->req, task, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } } - spin_unlock_irq(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); + if (list_empty(&list)) + return false; while (!list_empty(&list)) { de = list_first_entry(&list, struct io_defer_entry, list); list_del_init(&de->list); - req_set_fail_links(de->req); - io_put_req(de->req); - io_req_complete(de->req, -ECANCELED); + io_req_complete_failed(de->req, -ECANCELED); kfree(de); } + return true; } -static int io_uring_count_inflight(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) -{ - struct io_kiocb *req; - int cnt = 0; - - spin_lock_irq(&ctx->inflight_lock); - list_for_each_entry(req, &ctx->inflight_list, inflight_entry) - cnt += io_match_task(req, task, files); - spin_unlock_irq(&ctx->inflight_lock); - return cnt; -} - -static void io_uring_cancel_files(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) +static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) { - while (!list_empty_careful(&ctx->inflight_list)) { - struct io_task_cancel cancel = { .task = task, .files = files }; - DEFINE_WAIT(wait); - int inflight; - - inflight = io_uring_count_inflight(ctx, task, files); - if (!inflight) - break; + struct io_tctx_node *node; + enum io_wq_cancel cret; + bool ret = false; - io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); - io_poll_remove_all(ctx, task, files); - io_kill_timeouts(ctx, task, files); - /* cancellations _may_ trigger task work */ - io_run_task_work(); + mutex_lock(&ctx->uring_lock); + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { + struct io_uring_task *tctx = node->task->io_uring; - prepare_to_wait(&task->io_uring->wait, &wait, - TASK_UNINTERRUPTIBLE); - if (inflight == io_uring_count_inflight(ctx, task, files)) - schedule(); - finish_wait(&task->io_uring->wait, &wait); + /* + * io_wq will stay alive while we hold uring_lock, because it's + * killed after ctx nodes, which requires to take the lock. + */ + if (!tctx || !tctx->io_wq) + continue; + cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); + ret |= (cret != IO_WQ_CANCEL_NOTFOUND); } + mutex_unlock(&ctx->uring_lock); + + return ret; } -static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, - struct task_struct *task) +static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, + struct task_struct *task, + bool cancel_all) { + struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; + struct io_uring_task *tctx = task ? task->io_uring : NULL; + while (1) { - struct io_task_cancel cancel = { .task = task, .files = NULL, }; enum io_wq_cancel cret; bool ret = false; - cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); - if (cret != IO_WQ_CANCEL_NOTFOUND) - ret = true; + if (!task) { + ret |= io_uring_try_cancel_iowq(ctx); + } else if (tctx && tctx->io_wq) { + /* + * Cancels requests of all rings, not only @ctx, but + * it's fine as the task is in exit/exec. + */ + cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, + &cancel, true); + ret |= (cret != IO_WQ_CANCEL_NOTFOUND); + } /* SQPOLL thread does its own polling */ - if (!(ctx->flags & IORING_SETUP_SQPOLL)) { + if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || + (ctx->sq_data && ctx->sq_data->thread == current)) { while (!list_empty_careful(&ctx->iopoll_list)) { io_iopoll_try_reap_events(ctx); ret = true; } } - ret |= io_poll_remove_all(ctx, task, NULL); - ret |= io_kill_timeouts(ctx, task, NULL); + ret |= io_cancel_defer_files(ctx, task, cancel_all); + ret |= io_poll_remove_all(ctx, task, cancel_all); + ret |= io_kill_timeouts(ctx, task, cancel_all); + if (task) + ret |= io_run_task_work(); if (!ret) break; - io_run_task_work(); cond_resched(); } } -static void io_disable_sqo_submit(struct io_ring_ctx *ctx) -{ - mutex_lock(&ctx->uring_lock); - ctx->sqo_dead = 1; - if (ctx->flags & IORING_SETUP_R_DISABLED) - io_sq_offload_start(ctx); - mutex_unlock(&ctx->uring_lock); - - /* make sure callers enter the ring to get error */ - if (ctx->rings) - io_ring_set_wakeup_flag(ctx); -} - -/* - * We need to iteratively cancel requests, in case a request has dependent - * hard links. These persist even for failure of cancelations, hence keep - * looping until none are found. - */ -static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, - struct files_struct *files) -{ - struct task_struct *task = current; - - if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { - io_disable_sqo_submit(ctx); - task = ctx->sq_data->thread; - atomic_inc(&task->io_uring->in_idle); - io_sq_thread_park(ctx->sq_data); - } - - io_cancel_defer_files(ctx, task, files); - io_cqring_overflow_flush(ctx, true, task, files); - - if (!files) - __io_uring_cancel_task_requests(ctx, task); - else - io_uring_cancel_files(ctx, task, files); - - if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { - atomic_dec(&task->io_uring->in_idle); - io_sq_thread_unpark(ctx->sq_data); - } -} - -/* - * Note that this task has used io_uring. We use it for cancelation purposes. - */ -static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) +static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) { struct io_uring_task *tctx = current->io_uring; + struct io_tctx_node *node; int ret; if (unlikely(!tctx)) { - ret = io_uring_alloc_task_context(current); + ret = io_uring_alloc_task_context(current, ctx); if (unlikely(ret)) return ret; + tctx = current->io_uring; - } - if (tctx->last != file) { - void *old = xa_load(&tctx->xa, (unsigned long)file); + if (ctx->iowq_limits_set) { + unsigned int limits[2] = { ctx->iowq_limits[0], + ctx->iowq_limits[1], }; - if (!old) { - get_file(file); - ret = xa_err(xa_store(&tctx->xa, (unsigned long)file, - file, GFP_KERNEL)); - if (ret) { - fput(file); + ret = io_wq_max_workers(tctx->io_wq, limits); + if (ret) return ret; - } } - tctx->last = file; } + if (!xa_load(&tctx->xa, (unsigned long)ctx)) { + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->ctx = ctx; + node->task = current; - /* - * This is race safe in that the task itself is doing this, hence it - * cannot be going through the exit/cancel paths at the same time. - * This cannot be modified while exit/cancel is running. - */ - if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL)) - tctx->sqpoll = true; + ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, + node, GFP_KERNEL)); + if (ret) { + kfree(node); + return ret; + } + mutex_lock(&ctx->uring_lock); + list_add(&node->ctx_node, &ctx->tctx_list); + mutex_unlock(&ctx->uring_lock); + } + tctx->last = ctx; return 0; } /* - * Remove this io_uring_file -> task mapping. + * Note that this task has used io_uring. We use it for cancelation purposes. */ -static void io_uring_del_task_file(struct file *file) +static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx) { struct io_uring_task *tctx = current->io_uring; - if (tctx->last == file) - tctx->last = NULL; - file = xa_erase(&tctx->xa, (unsigned long)file); - if (file) - fput(file); + if (likely(tctx && tctx->last == ctx)) + return 0; + return __io_uring_add_tctx_node(ctx); } -static void io_uring_remove_task_files(struct io_uring_task *tctx) +/* + * Remove this io_uring_file -> task mapping. + */ +static void io_uring_del_tctx_node(unsigned long index) { - struct file *file; - unsigned long index; + struct io_uring_task *tctx = current->io_uring; + struct io_tctx_node *node; - xa_for_each(&tctx->xa, index, file) - io_uring_del_task_file(file); -} + if (!tctx) + return; + node = xa_erase(&tctx->xa, index); + if (!node) + return; -void __io_uring_files_cancel(struct files_struct *files) -{ - struct io_uring_task *tctx = current->io_uring; - struct file *file; - unsigned long index; + WARN_ON_ONCE(current != node->task); + WARN_ON_ONCE(list_empty(&node->ctx_node)); - /* make sure overflow events are dropped */ - atomic_inc(&tctx->in_idle); - xa_for_each(&tctx->xa, index, file) - io_uring_cancel_task_requests(file->private_data, files); - atomic_dec(&tctx->in_idle); + mutex_lock(&node->ctx->uring_lock); + list_del(&node->ctx_node); + mutex_unlock(&node->ctx->uring_lock); - if (files) - io_uring_remove_task_files(tctx); + if (tctx->last == node->ctx) + tctx->last = NULL; + kfree(node); } -static s64 tctx_inflight(struct io_uring_task *tctx) +static void io_uring_clean_tctx(struct io_uring_task *tctx) { + struct io_wq *wq = tctx->io_wq; + struct io_tctx_node *node; unsigned long index; - struct file *file; - s64 inflight; - - inflight = percpu_counter_sum(&tctx->inflight); - if (!tctx->sqpoll) - return inflight; - - /* - * If we have SQPOLL rings, then we need to iterate and find them, and - * add the pending count for those. - */ - xa_for_each(&tctx->xa, index, file) { - struct io_ring_ctx *ctx = file->private_data; - if (ctx->flags & IORING_SETUP_SQPOLL) { - struct io_uring_task *__tctx = ctx->sqo_task->io_uring; - - inflight += percpu_counter_sum(&__tctx->inflight); - } + xa_for_each(&tctx->xa, index, node) { + io_uring_del_tctx_node(index); + cond_resched(); + } + if (wq) { + /* + * Must be after io_uring_del_task_file() (removes nodes under + * uring_lock) to avoid race with io_uring_try_cancel_iowq(). + */ + io_wq_put_and_exit(wq); + tctx->io_wq = NULL; } +} - return inflight; +static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) +{ + if (tracked) + return atomic_read(&tctx->inflight_tracked); + return percpu_counter_sum(&tctx->inflight); } /* - * Find any io_uring fd that this task has registered or done IO on, and cancel - * requests. + * Find any io_uring ctx that this task has registered or done IO on, and cancel + * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. */ -void __io_uring_task_cancel(void) +static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) { struct io_uring_task *tctx = current->io_uring; - DEFINE_WAIT(wait); + struct io_ring_ctx *ctx; s64 inflight; + DEFINE_WAIT(wait); - /* make sure overflow events are dropped */ - atomic_inc(&tctx->in_idle); + WARN_ON_ONCE(sqd && sqd->thread != current); - /* trigger io_disable_sqo_submit() */ - if (tctx->sqpoll) - __io_uring_files_cancel(NULL); + if (!current->io_uring) + return; + if (tctx->io_wq) + io_wq_exit_start(tctx->io_wq); + atomic_inc(&tctx->in_idle); do { + io_uring_drop_tctx_refs(current); /* read completions before cancelations */ - inflight = tctx_inflight(tctx); + inflight = tctx_inflight(tctx, !cancel_all); if (!inflight) break; - __io_uring_files_cancel(NULL); - - prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); - - /* - * If we've seen completions, retry without waiting. This - * avoids a race where a completion comes in before we did - * prepare_to_wait(). - */ - if (inflight == tctx_inflight(tctx)) - schedule(); - finish_wait(&tctx->wait, &wait); - } while (1); - - atomic_dec(&tctx->in_idle); - - io_uring_remove_task_files(tctx); -} - -static int io_uring_flush(struct file *file, void *data) -{ - struct io_uring_task *tctx = current->io_uring; - struct io_ring_ctx *ctx = file->private_data; - - if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) - io_uring_cancel_task_requests(ctx, NULL); - if (!tctx) - return 0; + if (!sqd) { + struct io_tctx_node *node; + unsigned long index; - /* we should have cancelled and erased it before PF_EXITING */ - WARN_ON_ONCE((current->flags & PF_EXITING) && - xa_load(&tctx->xa, (unsigned long)file)); + xa_for_each(&tctx->xa, index, node) { + /* sqpoll task will cancel all its requests */ + if (node->ctx->sq_data) + continue; + io_uring_try_cancel_requests(node->ctx, current, + cancel_all); + } + } else { + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + io_uring_try_cancel_requests(ctx, current, + cancel_all); + } - /* - * fput() is pending, will be 2 if the only other ref is our potential - * task file note. If the task is exiting, drop regardless of count. - */ - if (atomic_long_read(&file->f_count) != 2) - return 0; + prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); + io_run_task_work(); + io_uring_drop_tctx_refs(current); - if (ctx->flags & IORING_SETUP_SQPOLL) { - /* there is only one file note, which is owned by sqo_task */ - WARN_ON_ONCE(ctx->sqo_task != current && - xa_load(&tctx->xa, (unsigned long)file)); - /* sqo_dead check is for when this happens after cancellation */ - WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead && - !xa_load(&tctx->xa, (unsigned long)file)); + /* + * If we've seen completions, retry without waiting. This + * avoids a race where a completion comes in before we did + * prepare_to_wait(). + */ + if (inflight == tctx_inflight(tctx, !cancel_all)) + schedule(); + finish_wait(&tctx->wait, &wait); + } while (1); - io_disable_sqo_submit(ctx); + io_uring_clean_tctx(tctx); + if (cancel_all) { + /* + * We shouldn't run task_works after cancel, so just leave + * ->in_idle set for normal exit. + */ + atomic_dec(&tctx->in_idle); + /* for exec all current's requests should be gone, kill tctx */ + __io_uring_free(current); } +} - if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current) - io_uring_del_task_file(file); - return 0; +void __io_uring_cancel(bool cancel_all) +{ + io_uring_cancel_generic(cancel_all, NULL); } static void *io_uring_validate_mmap_request(struct file *file, @@ -9023,61 +9941,84 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx) { - int ret = 0; DEFINE_WAIT(wait); do { if (!io_sqring_full(ctx)) break; - prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); - if (unlikely(ctx->sqo_dead)) { - ret = -EOWNERDEAD; - goto out; - } - if (!io_sqring_full(ctx)) break; - schedule(); } while (!signal_pending(current)); finish_wait(&ctx->sqo_sq_wait, &wait); -out: - return ret; + return 0; +} + +static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, + struct __kernel_timespec __user **ts, + const sigset_t __user **sig) +{ + struct io_uring_getevents_arg arg; + + /* + * If EXT_ARG isn't set, then we have no timespec and the argp pointer + * is just a pointer to the sigset_t. + */ + if (!(flags & IORING_ENTER_EXT_ARG)) { + *sig = (const sigset_t __user *) argp; + *ts = NULL; + return 0; + } + + /* + * EXT_ARG is set - ensure we agree on the size of it and copy in our + * timespec and sigset_t pointers if good. + */ + if (*argsz != sizeof(arg)) + return -EINVAL; + if (copy_from_user(&arg, argp, sizeof(arg))) + return -EFAULT; + if (arg.pad) + return -EINVAL; + *sig = u64_to_user_ptr(arg.sigmask); + *argsz = arg.sigmask_sz; + *ts = u64_to_user_ptr(arg.ts); + return 0; } SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, - u32, min_complete, u32, flags, const sigset_t __user *, sig, - size_t, sigsz) + u32, min_complete, u32, flags, const void __user *, argp, + size_t, argsz) { struct io_ring_ctx *ctx; - long ret = -EBADF; int submitted = 0; struct fd f; + long ret; io_run_task_work(); - if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | - IORING_ENTER_SQ_WAIT)) + if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | + IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))) return -EINVAL; f = fdget(fd); - if (!f.file) + if (unlikely(!f.file)) return -EBADF; ret = -EOPNOTSUPP; - if (f.file->f_op != &io_uring_fops) + if (unlikely(f.file->f_op != &io_uring_fops)) goto out_fput; ret = -ENXIO; ctx = f.file->private_data; - if (!percpu_ref_tryget(&ctx->refs)) + if (unlikely(!percpu_ref_tryget(&ctx->refs))) goto out_fput; ret = -EBADFD; - if (ctx->flags & IORING_SETUP_R_DISABLED) + if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) goto out; /* @@ -9087,9 +10028,9 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, */ ret = 0; if (ctx->flags & IORING_SETUP_SQPOLL) { - io_cqring_overflow_flush(ctx, false, NULL, NULL); + io_cqring_overflow_flush(ctx); - if (unlikely(ctx->sqo_dead)) { + if (unlikely(ctx->sq_data->thread == NULL)) { ret = -EOWNERDEAD; goto out; } @@ -9102,7 +10043,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } submitted = to_submit; } else if (to_submit) { - ret = io_uring_add_task_file(ctx, f.file); + ret = io_uring_add_tctx_node(ctx); if (unlikely(ret)) goto out; mutex_lock(&ctx->uring_lock); @@ -9113,6 +10054,13 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, goto out; } if (flags & IORING_ENTER_GETEVENTS) { + const sigset_t __user *sig; + struct __kernel_timespec __user *ts; + + ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig); + if (unlikely(ret)) + goto out; + min_complete = min(min_complete, ctx->cq_entries); /* @@ -9125,7 +10073,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, !(ctx->flags & IORING_SETUP_SQPOLL)) { ret = io_iopoll_check(ctx, min_complete); } else { - ret = io_cqring_wait(ctx, min_complete, sig, sigsz); + ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts); } } @@ -9138,9 +10086,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, #ifdef CONFIG_PROC_FS static int io_uring_show_cred(struct seq_file *m, unsigned int id, - const struct io_identity *iod) + const struct cred *cred) { - const struct cred *cred = iod->creds; struct user_namespace *uns = seq_user_ns(m); struct group_info *gi; kernel_cap_t cap; @@ -9184,18 +10131,18 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) */ has_lock = mutex_trylock(&ctx->uring_lock); - if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) + if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { sq = ctx->sq_data; + if (!sq->thread) + sq = NULL; + } seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1); seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1); seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); for (i = 0; has_lock && i < ctx->nr_user_files; i++) { - struct fixed_file_table *table; - struct file *f; + struct file *f = io_file_from_index(ctx, i); - table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; - f = table->files[i & IORING_FILE_TABLE_MASK]; if (f) seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname); else @@ -9203,21 +10150,21 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) } seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *buf = &ctx->user_bufs[i]; + struct io_mapped_ubuf *buf = ctx->user_bufs[i]; + unsigned int len = buf->ubuf_end - buf->ubuf; - seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, - (unsigned int) buf->len); + seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len); } if (has_lock && !xa_empty(&ctx->personalities)) { unsigned long index; - const struct io_identity *iod; + const struct cred *cred; seq_printf(m, "Personalities:\n"); - xa_for_each(&ctx->personalities, index, iod) - io_uring_show_cred(m, index, iod); + xa_for_each(&ctx->personalities, index, cred) + io_uring_show_cred(m, index, cred); } seq_printf(m, "PollList:\n"); - spin_lock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list = &ctx->cancel_hash[i]; struct io_kiocb *req; @@ -9226,7 +10173,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) seq_printf(m, " op=%d, task_works=%d\n", req->opcode, req->task->task_works != NULL); } - spin_unlock_irq(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); if (has_lock) mutex_unlock(&ctx->uring_lock); } @@ -9244,14 +10191,12 @@ static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) static const struct file_operations io_uring_fops = { .release = io_uring_release, - .flush = io_uring_flush, .mmap = io_uring_mmap, #ifndef CONFIG_MMU .get_unmapped_area = io_uring_nommu_get_unmapped_area, .mmap_capabilities = io_uring_nommu_mmap_capabilities, #endif .poll = io_uring_poll, - .fasync = io_uring_fasync, #ifdef CONFIG_PROC_FS .show_fdinfo = io_uring_show_fdinfo, #endif @@ -9281,8 +10226,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, rings->cq_ring_mask = p->cq_entries - 1; rings->sq_ring_entries = p->sq_entries; rings->cq_ring_entries = p->cq_entries; - ctx->sq_mask = rings->sq_ring_mask; - ctx->cq_mask = rings->cq_ring_mask; size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); if (size == SIZE_MAX) { @@ -9309,7 +10252,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file) if (fd < 0) return fd; - ret = io_uring_add_task_file(ctx, file); + ret = io_uring_add_tctx_node(ctx); if (ret) { put_unused_fd(fd); return ret; @@ -9352,10 +10295,8 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx) static int io_uring_create(unsigned entries, struct io_uring_params *p, struct io_uring_params __user *params) { - struct user_struct *user = NULL; struct io_ring_ctx *ctx; struct file *file; - bool limit_mem; int ret; if (!entries) @@ -9395,34 +10336,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->cq_entries = 2 * p->sq_entries; } - user = get_uid(current_user()); - limit_mem = !capable(CAP_IPC_LOCK); - - if (limit_mem) { - ret = __io_account_mem(user, - ring_pages(p->sq_entries, p->cq_entries)); - if (ret) { - free_uid(user); - return ret; - } - } - ctx = io_ring_ctx_alloc(p); - if (!ctx) { - if (limit_mem) - __io_unaccount_mem(user, ring_pages(p->sq_entries, - p->cq_entries)); - free_uid(user); + if (!ctx) return -ENOMEM; - } ctx->compat = in_compat_syscall(); - ctx->user = user; - ctx->creds = get_current_cred(); -#ifdef CONFIG_AUDIT - ctx->loginuid = current->loginuid; - ctx->sessionid = current->sessionid; -#endif - ctx->sqo_task = get_task_struct(current); + if (!capable(CAP_IPC_LOCK)) + ctx->user = get_uid(current_user()); /* * This is just grabbed for accounting purposes. When a process exits, @@ -9433,35 +10352,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, mmgrab(current->mm); ctx->mm_account = current->mm; -#ifdef CONFIG_BLK_CGROUP - /* - * The sq thread will belong to the original cgroup it was inited in. - * If the cgroup goes offline (e.g. disabling the io controller), then - * issued bios will be associated with the closest cgroup later in the - * block layer. - */ - rcu_read_lock(); - ctx->sqo_blkcg_css = blkcg_css(); - ret = css_tryget_online(ctx->sqo_blkcg_css); - rcu_read_unlock(); - if (!ret) { - /* don't init against a dying cgroup, have the user try again */ - ctx->sqo_blkcg_css = NULL; - ret = -ENODEV; - goto err; - } -#endif - - /* - * Account memory _before_ installing the file descriptor. Once - * the descriptor is installed, it can get closed at any time. Also - * do this before hitting the general error path, as ring freeing - * will un-account as well. - */ - io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries), - ACCT_LOCKED); - ctx->limit_mem = limit_mem; - ret = io_allocate_scq_urings(ctx, p); if (ret) goto err; @@ -9469,9 +10359,11 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ret = io_sq_offload_create(ctx, p); if (ret) goto err; - - if (!(p->flags & IORING_SETUP_R_DISABLED)) - io_sq_offload_start(ctx); + /* always set a rsrc node */ + ret = io_rsrc_node_switch_start(ctx); + if (ret) + goto err; + io_rsrc_node_switch(ctx, NULL); memset(&p->sq_off, 0, sizeof(p->sq_off)); p->sq_off.head = offsetof(struct io_rings, sq.head); @@ -9494,7 +10386,9 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | - IORING_FEAT_POLL_32BITS; + IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | + IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | + IORING_FEAT_RSRC_TAGS; if (copy_to_user(params, p, sizeof(*p))) { ret = -EFAULT; @@ -9513,7 +10407,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, */ ret = io_uring_install_fd(ctx, file); if (ret < 0) { - io_disable_sqo_submit(ctx); /* fput will clean it up */ fput(file); return ret; @@ -9522,7 +10415,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); return ret; err: - io_disable_sqo_submit(ctx); io_ring_ctx_wait_and_kill(ctx); return ret; } @@ -9600,22 +10492,16 @@ static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args) static int io_register_personality(struct io_ring_ctx *ctx) { - struct io_identity *iod; + const struct cred *creds; u32 id; int ret; - iod = kmalloc(sizeof(*iod), GFP_KERNEL); - if (unlikely(!iod)) - return -ENOMEM; - - io_init_identity(iod); - iod->creds = get_current_cred(); + creds = get_current_cred(); - ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)iod, + ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); if (ret < 0) { - put_cred(iod->creds); - kfree(iod); + put_cred(creds); return ret; } return id; @@ -9699,24 +10585,273 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx) if (ctx->restrictions.registered) ctx->restricted = 1; - io_sq_offload_start(ctx); + ctx->flags &= ~IORING_SETUP_R_DISABLED; + if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait)) + wake_up(&ctx->sq_data->wait); + return 0; +} + +static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, + struct io_uring_rsrc_update2 *up, + unsigned nr_args) +{ + __u32 tmp; + int err; + + if (check_add_overflow(up->offset, nr_args, &tmp)) + return -EOVERFLOW; + err = io_rsrc_node_switch_start(ctx); + if (err) + return err; + + switch (type) { + case IORING_RSRC_FILE: + return __io_sqe_files_update(ctx, up, nr_args); + case IORING_RSRC_BUFFER: + return __io_sqe_buffers_update(ctx, up, nr_args); + } + return -EINVAL; +} + +static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, + unsigned nr_args) +{ + struct io_uring_rsrc_update2 up; + + if (!nr_args) + return -EINVAL; + memset(&up, 0, sizeof(up)); + if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update))) + return -EFAULT; + if (up.resv || up.resv2) + return -EINVAL; + return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args); +} + +static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, + unsigned size, unsigned type) +{ + struct io_uring_rsrc_update2 up; + + if (size != sizeof(up)) + return -EINVAL; + if (copy_from_user(&up, arg, sizeof(up))) + return -EFAULT; + if (!up.nr || up.resv || up.resv2) + return -EINVAL; + return __io_register_rsrc_update(ctx, type, &up, up.nr); +} + +static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, + unsigned int size, unsigned int type) +{ + struct io_uring_rsrc_register rr; + + /* keep it extendible */ + if (size != sizeof(rr)) + return -EINVAL; + + memset(&rr, 0, sizeof(rr)); + if (copy_from_user(&rr, arg, size)) + return -EFAULT; + if (!rr.nr || rr.resv || rr.resv2) + return -EINVAL; + + switch (type) { + case IORING_RSRC_FILE: + return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data), + rr.nr, u64_to_user_ptr(rr.tags)); + case IORING_RSRC_BUFFER: + return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data), + rr.nr, u64_to_user_ptr(rr.tags)); + } + return -EINVAL; +} + +static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg, + unsigned len) +{ + struct io_uring_task *tctx = current->io_uring; + cpumask_var_t new_mask; + int ret; + + if (!tctx || !tctx->io_wq) + return -EINVAL; + + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_clear(new_mask); + if (len > cpumask_size()) + len = cpumask_size(); + +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + ret = compat_get_bitmap(cpumask_bits(new_mask), + (const compat_ulong_t __user *)arg, + len * 8 /* CHAR_BIT */); + } else { + ret = copy_from_user(new_mask, arg, len); + } +#else + ret = copy_from_user(new_mask, arg, len); +#endif + + if (ret) { + free_cpumask_var(new_mask); + return -EFAULT; + } + + ret = io_wq_cpu_affinity(tctx->io_wq, new_mask); + free_cpumask_var(new_mask); + return ret; +} + +static int io_unregister_iowq_aff(struct io_ring_ctx *ctx) +{ + struct io_uring_task *tctx = current->io_uring; + + if (!tctx || !tctx->io_wq) + return -EINVAL; + + return io_wq_cpu_affinity(tctx->io_wq, NULL); +} + +static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, + void __user *arg) + __must_hold(&ctx->uring_lock) +{ + struct io_tctx_node *node; + struct io_uring_task *tctx = NULL; + struct io_sq_data *sqd = NULL; + __u32 new_count[2]; + int i, ret; + + if (copy_from_user(new_count, arg, sizeof(new_count))) + return -EFAULT; + for (i = 0; i < ARRAY_SIZE(new_count); i++) + if (new_count[i] > INT_MAX) + return -EINVAL; + + if (ctx->flags & IORING_SETUP_SQPOLL) { + sqd = ctx->sq_data; + if (sqd) { + /* + * Observe the correct sqd->lock -> ctx->uring_lock + * ordering. Fine to drop uring_lock here, we hold + * a ref to the ctx. + */ + refcount_inc(&sqd->refs); + mutex_unlock(&ctx->uring_lock); + mutex_lock(&sqd->lock); + mutex_lock(&ctx->uring_lock); + if (sqd->thread) + tctx = sqd->thread->io_uring; + } + } else { + tctx = current->io_uring; + } + + BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); + + for (i = 0; i < ARRAY_SIZE(new_count); i++) + if (new_count[i]) + ctx->iowq_limits[i] = new_count[i]; + ctx->iowq_limits_set = true; + + ret = -EINVAL; + if (tctx && tctx->io_wq) { + ret = io_wq_max_workers(tctx->io_wq, new_count); + if (ret) + goto err; + } else { + memset(new_count, 0, sizeof(new_count)); + } + + if (sqd) { + mutex_unlock(&sqd->lock); + io_put_sq_data(sqd); + } + + if (copy_to_user(arg, new_count, sizeof(new_count))) + return -EFAULT; + + /* that's it for SQPOLL, only the SQPOLL task creates requests */ + if (sqd) + return 0; + + /* now propagate the restriction to all registered users */ + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { + struct io_uring_task *tctx = node->task->io_uring; + + if (WARN_ON_ONCE(!tctx->io_wq)) + continue; + + for (i = 0; i < ARRAY_SIZE(new_count); i++) + new_count[i] = ctx->iowq_limits[i]; + /* ignore errors, it always returns zero anyway */ + (void)io_wq_max_workers(tctx->io_wq, new_count); + } return 0; +err: + if (sqd) { + mutex_unlock(&sqd->lock); + io_put_sq_data(sqd); + } + return ret; } static bool io_register_op_must_quiesce(int op) { switch (op) { + case IORING_REGISTER_BUFFERS: + case IORING_UNREGISTER_BUFFERS: + case IORING_REGISTER_FILES: case IORING_UNREGISTER_FILES: case IORING_REGISTER_FILES_UPDATE: case IORING_REGISTER_PROBE: case IORING_REGISTER_PERSONALITY: case IORING_UNREGISTER_PERSONALITY: + case IORING_REGISTER_FILES2: + case IORING_REGISTER_FILES_UPDATE2: + case IORING_REGISTER_BUFFERS2: + case IORING_REGISTER_BUFFERS_UPDATE: + case IORING_REGISTER_IOWQ_AFF: + case IORING_UNREGISTER_IOWQ_AFF: + case IORING_REGISTER_IOWQ_MAX_WORKERS: return false; default: return true; } } +static int io_ctx_quiesce(struct io_ring_ctx *ctx) +{ + long ret; + + percpu_ref_kill(&ctx->refs); + + /* + * Drop uring mutex before waiting for references to exit. If another + * thread is currently inside io_uring_enter() it might need to grab the + * uring_lock to make progress. If we hold it here across the drain + * wait, then we can deadlock. It's safe to drop the mutex here, since + * no new references will come in after we've killed the percpu ref. + */ + mutex_unlock(&ctx->uring_lock); + do { + ret = wait_for_completion_interruptible(&ctx->ref_comp); + if (!ret) + break; + ret = io_run_task_work_sig(); + } while (ret >= 0); + mutex_lock(&ctx->uring_lock); + + if (ret) + io_refs_resurrect(&ctx->refs, &ctx->ref_comp); + return ret; +} + static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, void __user *arg, unsigned nr_args) __releases(ctx->uring_lock) @@ -9732,58 +10867,30 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, if (percpu_ref_is_dying(&ctx->refs)) return -ENXIO; - if (io_register_op_must_quiesce(opcode)) { - percpu_ref_kill(&ctx->refs); - - /* - * Drop uring mutex before waiting for references to exit. If - * another thread is currently inside io_uring_enter() it might - * need to grab the uring_lock to make progress. If we hold it - * here across the drain wait, then we can deadlock. It's safe - * to drop the mutex here, since no new references will come in - * after we've killed the percpu ref. - */ - mutex_unlock(&ctx->uring_lock); - do { - ret = wait_for_completion_interruptible(&ctx->ref_comp); - if (!ret) - break; - ret = io_run_task_work_sig(); - if (ret < 0) - break; - } while (1); - mutex_lock(&ctx->uring_lock); - - if (ret) { - io_refs_resurrect(&ctx->refs, &ctx->ref_comp); - return ret; - } - } - if (ctx->restricted) { - if (opcode >= IORING_REGISTER_LAST) { - ret = -EINVAL; - goto out; - } + opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); + if (!test_bit(opcode, ctx->restrictions.register_op)) + return -EACCES; + } - if (!test_bit(opcode, ctx->restrictions.register_op)) { - ret = -EACCES; - goto out; - } + if (io_register_op_must_quiesce(opcode)) { + ret = io_ctx_quiesce(ctx); + if (ret) + return ret; } switch (opcode) { case IORING_REGISTER_BUFFERS: - ret = io_sqe_buffer_register(ctx, arg, nr_args); + ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL); break; case IORING_UNREGISTER_BUFFERS: ret = -EINVAL; if (arg || nr_args) break; - ret = io_sqe_buffer_unregister(ctx); + ret = io_sqe_buffers_unregister(ctx); break; case IORING_REGISTER_FILES: - ret = io_sqe_files_register(ctx, arg, nr_args); + ret = io_sqe_files_register(ctx, arg, nr_args, NULL); break; case IORING_UNREGISTER_FILES: ret = -EINVAL; @@ -9792,7 +10899,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ret = io_sqe_files_unregister(ctx); break; case IORING_REGISTER_FILES_UPDATE: - ret = io_sqe_files_update(ctx, arg, nr_args); + ret = io_register_files_update(ctx, arg, nr_args); break; case IORING_REGISTER_EVENTFD: case IORING_REGISTER_EVENTFD_ASYNC: @@ -9840,12 +10947,43 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, case IORING_REGISTER_RESTRICTIONS: ret = io_register_restrictions(ctx, arg, nr_args); break; + case IORING_REGISTER_FILES2: + ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE); + break; + case IORING_REGISTER_FILES_UPDATE2: + ret = io_register_rsrc_update(ctx, arg, nr_args, + IORING_RSRC_FILE); + break; + case IORING_REGISTER_BUFFERS2: + ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER); + break; + case IORING_REGISTER_BUFFERS_UPDATE: + ret = io_register_rsrc_update(ctx, arg, nr_args, + IORING_RSRC_BUFFER); + break; + case IORING_REGISTER_IOWQ_AFF: + ret = -EINVAL; + if (!arg || !nr_args) + break; + ret = io_register_iowq_aff(ctx, arg, nr_args); + break; + case IORING_UNREGISTER_IOWQ_AFF: + ret = -EINVAL; + if (arg || nr_args) + break; + ret = io_unregister_iowq_aff(ctx); + break; + case IORING_REGISTER_IOWQ_MAX_WORKERS: + ret = -EINVAL; + if (!arg || nr_args != 2) + break; + ret = io_register_iowq_max_workers(ctx, arg); + break; default: ret = -EINVAL; break; } -out: if (io_register_op_must_quiesce(opcode)) { /* bring the ctx back to life */ percpu_ref_reinit(&ctx->refs); @@ -9861,6 +10999,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, long ret = -EBADF; struct fd f; + if (opcode >= IORING_REGISTER_LAST) + return -EINVAL; + f = fdget(fd); if (!f.file) return -EBADF; @@ -9871,6 +11012,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, ctx = f.file->private_data; + io_run_task_work(); + mutex_lock(&ctx->uring_lock); ret = __io_uring_register(ctx, opcode, arg, nr_args); mutex_unlock(&ctx->uring_lock); @@ -9917,12 +11060,27 @@ static int __init io_uring_init(void) BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); BUILD_BUG_SQE_ELEM(32, __u64, user_data); BUILD_BUG_SQE_ELEM(40, __u16, buf_index); + BUILD_BUG_SQE_ELEM(40, __u16, buf_group); BUILD_BUG_SQE_ELEM(42, __u16, personality); BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); + BUILD_BUG_SQE_ELEM(44, __u32, file_index); + + BUILD_BUG_ON(sizeof(struct io_uring_files_update) != + sizeof(struct io_uring_rsrc_update)); + BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > + sizeof(struct io_uring_rsrc_update2)); + + /* ->buf_index is u16 */ + BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16)); + + /* should fit into one byte */ + BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST); - BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int)); - req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int)); + + req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC | + SLAB_ACCOUNT); return 0; }; __initcall(io_uring_init); diff --git a/ipc/msg.c b/ipc/msg.c index 6e6c8e0c9380e..8ded6b8f10a2c 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -147,7 +147,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) key_t key = params->key; int msgflg = params->flg; - msq = kvmalloc(sizeof(*msq), GFP_KERNEL); + msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT); if (unlikely(!msq)) return -ENOMEM; diff --git a/ipc/sem.c b/ipc/sem.c index 7d9c06b0ad6e2..916f7a90be31c 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -511,7 +511,7 @@ static struct sem_array *sem_alloc(size_t nsems) if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) return NULL; - sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL); + sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT); if (unlikely(!sma)) return NULL; @@ -1852,7 +1852,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) undo_list = current->sysvsem.undo_list; if (!undo_list) { - undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); + undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT); if (undo_list == NULL) return -ENOMEM; spin_lock_init(&undo_list->lock); @@ -1937,7 +1937,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) rcu_read_unlock(); /* step 2: allocate new undo structure */ - new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT); if (!new) { ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return ERR_PTR(-ENOMEM); @@ -2190,14 +2190,15 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * scenarios where we were awakened externally, during the * window between wake_q_add() and wake_up_q(). */ + rcu_read_lock(); error = READ_ONCE(queue.status); if (error != -EINTR) { /* see SEM_BARRIER_2 for purpose/pairing */ smp_acquire__after_ctrl_dep(); + rcu_read_unlock(); goto out_free; } - rcu_read_lock(); locknum = sem_lock(sma, sops, nsops); if (!ipc_valid_object(&sma->sem_perm)) diff --git a/ipc/shm.c b/ipc/shm.c index 471ac3e7498d5..b418731d66e88 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -711,7 +711,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; - shp = kvmalloc(sizeof(*shp), GFP_KERNEL); + shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT); if (unlikely(!shp)) return -ENOMEM; diff --git a/kernel/Makefile b/kernel/Makefile index e7905bdf6e972..82e9c843617f0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -53,7 +53,7 @@ obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ -obj-$(CONFIG_FUTEX) += futex.o +obj-$(CONFIG_FUTEX) += futex/ obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o ifneq ($(CONFIG_SMP),y) diff --git a/kernel/acct.c b/kernel/acct.c index f175df8f6aa4a..12f7dacf560e2 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -331,6 +331,8 @@ static comp_t encode_comp_t(unsigned long value) exp++; } + if (exp > (((comp_t) ~0U) >> MANTSIZE)) + return (comp_t) ~0U; /* * Clean it up and polish it off. */ diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 5d3a7af9ba9ba..8aaaaef99f09f 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -70,7 +70,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN); if (selem) { if (value) - memcpy(SDATA(selem)->data, value, smap->map.value_size); + copy_map_value(&smap->map, SDATA(selem)->data, value); return selem; } diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index dc497eaf22663..52e7048607399 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2913,7 +2913,7 @@ static int btf_struct_resolve(struct btf_verifier_env *env, if (v->next_member) { const struct btf_type *last_member_type; const struct btf_member *last_member; - u16 last_member_type_id; + u32 last_member_type_id; last_member = btf_type_member(v->t) + v->next_member - 1; last_member_type_id = last_member->type; @@ -3675,6 +3675,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env, break; } + if (btf_type_is_resolve_source_only(arg_type)) { + btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); + return -EINVAL; + } + if (args[i].name_off && (!btf_name_offset_valid(btf, args[i].name_off) || !btf_name_valid_identifier(btf, args[i].name_off))) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index fd2aa6b9909ec..73d4b1e32fbdb 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -1642,9 +1643,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) * reuse preexisting logic from Spectre v1 mitigation that * happens to produce the required code on x86 for v4 as well. */ -#ifdef CONFIG_X86 barrier_nospec(); -#endif CONT; #define LDST(SIZEOP, SIZE) \ STX_MEM_##SIZEOP: \ diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c index 3d897de890612..bbab8bb4b2fda 100644 --- a/kernel/bpf/percpu_freelist.c +++ b/kernel/bpf/percpu_freelist.c @@ -102,22 +102,21 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, u32 nr_elems) { struct pcpu_freelist_head *head; - int i, cpu, pcpu_entries; + unsigned int cpu, cpu_idx, i, j, n, m; - pcpu_entries = nr_elems / num_possible_cpus() + 1; - i = 0; + n = nr_elems / num_possible_cpus(); + m = nr_elems % num_possible_cpus(); + cpu_idx = 0; for_each_possible_cpu(cpu) { -again: head = per_cpu_ptr(s->freelist, cpu); - /* No locking required as this is not visible yet. */ - pcpu_freelist_push_node(head, buf); - i++; - buf += elem_size; - if (i == nr_elems) - break; - if (i % pcpu_entries) - goto again; + j = n + (cpu_idx < m ? 1 : 0); + for (i = 0; i < j; i++) { + /* No locking required as this is not visible yet. */ + pcpu_freelist_push_node(head, buf); + buf += elem_size; + } + cpu_idx++; } } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 419dbc3d060ee..aaad2dce2be6f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3915,7 +3915,9 @@ static int bpf_task_fd_query(const union bpf_attr *attr, if (attr->task_fd_query.flags != 0) return -EINVAL; + rcu_read_lock(); task = get_pid_task(find_vpid(pid), PIDTYPE_PID); + rcu_read_unlock(); if (!task) return -ENOENT; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e4dcc23b52c01..9e5f1ebe67d7f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -562,6 +562,20 @@ const char *kernel_type_name(u32 id) btf_type_by_id(btf_vmlinux, id)->name_off); } +/* The reg state of a pointer or a bounded scalar was saved when + * it was spilled to the stack. + */ +static bool is_spilled_reg(const struct bpf_stack_state *stack) +{ + return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; +} + +static void scrub_spilled_slot(u8 *stype) +{ + if (*stype != STACK_INVALID) + *stype = STACK_MISC; +} + static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { @@ -666,7 +680,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, continue; verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); - if (state->stack[i].slot_type[0] == STACK_SPILL) { + if (is_spilled_reg(&state->stack[i])) { reg = &state->stack[i].spilled_ptr; t = reg->type; verbose(env, "=%s", reg_type_str[t]); @@ -1868,8 +1882,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, */ if (insn->src_reg != BPF_REG_FP) return 0; - if (BPF_SIZE(insn->code) != BPF_DW) - return 0; /* dreg = *(u64 *)[fp - off] was a fill from the stack. * that [fp - off] slot contains scalar that needs to be @@ -1892,8 +1904,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, /* scalars can only be spilled into stack */ if (insn->dst_reg != BPF_REG_FP) return 0; - if (BPF_SIZE(insn->code) != BPF_DW) - return 0; spi = (-insn->off - 1) / BPF_REG_SIZE; if (spi >= 64) { verbose(env, "BUG spi %d\n", spi); @@ -2009,7 +2019,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, reg->precise = true; } for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { - if (func->stack[j].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&func->stack[j])) continue; reg = &func->stack[j].spilled_ptr; if (reg->type != SCALAR_VALUE) @@ -2019,7 +2029,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, } } -static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, +static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) { struct bpf_verifier_state *st = env->cur_state; @@ -2036,7 +2046,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, if (!env->bpf_capable) return 0; - func = st->frame[st->curframe]; + func = st->frame[frame]; if (regno >= 0) { reg = &func->regs[regno]; if (reg->type != SCALAR_VALUE) { @@ -2051,7 +2061,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, } while (spi >= 0) { - if (func->stack[spi].slot_type[0] != STACK_SPILL) { + if (!is_spilled_reg(&func->stack[spi])) { stack_mask = 0; break; } @@ -2117,7 +2127,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, break; new_marks = false; - func = st->frame[st->curframe]; + func = st->frame[frame]; bitmap_from_u64(mask, reg_mask); for_each_set_bit(i, mask, 32) { reg = &func->regs[i]; @@ -2150,7 +2160,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, return 0; } - if (func->stack[i].slot_type[0] != STACK_SPILL) { + if (!is_spilled_reg(&func->stack[i])) { stack_mask &= ~(1ull << i); continue; } @@ -2183,12 +2193,17 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, static int mark_chain_precision(struct bpf_verifier_env *env, int regno) { - return __mark_chain_precision(env, regno, -1); + return __mark_chain_precision(env, env->cur_state->curframe, regno, -1); +} + +static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno) +{ + return __mark_chain_precision(env, frame, regno, -1); } -static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) +static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi) { - return __mark_chain_precision(env, -1, spi); + return __mark_chain_precision(env, frame, -1, spi); } static bool is_spillable_regtype(enum bpf_reg_type type) @@ -2259,16 +2274,33 @@ static bool __is_pointer_value(bool allow_ptr_leaks, return reg->type != SCALAR_VALUE; } +/* Copy src state preserving dst->parent and dst->live fields */ +static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src) +{ + struct bpf_reg_state *parent = dst->parent; + enum bpf_reg_liveness live = dst->live; + + *dst = *src; + dst->parent = parent; + dst->live = live; +} + static void save_register_state(struct bpf_func_state *state, - int spi, struct bpf_reg_state *reg) + int spi, struct bpf_reg_state *reg, + int size) { int i; - state->stack[spi].spilled_ptr = *reg; - state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + copy_register_state(&state->stack[spi].spilled_ptr, reg); + if (size == BPF_REG_SIZE) + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; - for (i = 0; i < BPF_REG_SIZE; i++) - state->stack[spi].slot_type[i] = STACK_SPILL; + for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) + state->stack[spi].slot_type[i - 1] = STACK_SPILL; + + /* size < 8 bytes spill */ + for (; i; i--) + scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); } /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, @@ -2306,7 +2338,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, bool sanitize = reg && is_spillable_regtype(reg->type); for (i = 0; i < size; i++) { - if (state->stack[spi].slot_type[i] == STACK_INVALID) { + u8 type = state->stack[spi].slot_type[i]; + + if (type != STACK_MISC && type != STACK_ZERO) { sanitize = true; break; } @@ -2316,7 +2350,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, env->insn_aux_data[insn_idx].sanitize_stack_spill = true; } - if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && + if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && !register_is_null(reg) && env->bpf_capable) { if (dst_reg != BPF_REG_FP) { /* The backtracking logic can only recognize explicit @@ -2329,7 +2363,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, if (err) return err; } - save_register_state(state, spi, reg); + save_register_state(state, spi, reg, size); } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { @@ -2341,16 +2375,16 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } - save_register_state(state, spi, reg); + save_register_state(state, spi, reg, size); } else { u8 type = STACK_MISC; /* regular write of data into stack destroys any spilled ptr */ state->stack[spi].spilled_ptr.type = NOT_INIT; /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ - if (state->stack[spi].slot_type[0] == STACK_SPILL) + if (is_spilled_reg(&state->stack[spi])) for (i = 0; i < BPF_REG_SIZE; i++) - state->stack[spi].slot_type[i] = STACK_MISC; + scrub_spilled_slot(&state->stack[spi].slot_type[i]); /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon @@ -2439,14 +2473,17 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, spi = slot / BPF_REG_SIZE; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; - if (!env->allow_ptr_leaks - && *stype != NOT_INIT - && *stype != SCALAR_VALUE) { - /* Reject the write if there's are spilled pointers in - * range. If we didn't reject here, the ptr status - * would be erased below (even though not all slots are - * actually overwritten), possibly opening the door to - * leaks. + if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { + /* Reject the write if range we may write to has not + * been initialized beforehand. If we didn't reject + * here, the ptr status would be erased below (even + * though not all slots are actually overwritten), + * possibly opening the door to leaks. + * + * We do however catch STACK_INVALID case below, and + * only allow reading possibly uninitialized memory + * later for CAP_PERFMON, as the write may not happen to + * that slot. */ verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", insn_idx, i); @@ -2554,35 +2591,56 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; struct bpf_reg_state *reg; - u8 *stype; + u8 *stype, type; stype = reg_state->stack[spi].slot_type; reg = ®_state->stack[spi].spilled_ptr; - if (stype[0] == STACK_SPILL) { - if (size != BPF_REG_SIZE) { + if (is_spilled_reg(®_state->stack[spi])) { + u8 spill_size = 1; + + for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) + spill_size++; + + if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { if (reg->type != SCALAR_VALUE) { verbose_linfo(env, env->insn_idx, "; "); verbose(env, "invalid size of register fill\n"); return -EACCES; } - if (dst_regno >= 0) { + + mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + if (dst_regno < 0) + return 0; + + if (!(off % BPF_REG_SIZE) && size == spill_size) { + /* The earlier check_reg_arg() has decided the + * subreg_def for this insn. Save it first. + */ + s32 subreg_def = state->regs[dst_regno].subreg_def; + + copy_register_state(&state->regs[dst_regno], reg); + state->regs[dst_regno].subreg_def = subreg_def; + } else { + for (i = 0; i < size; i++) { + type = stype[(slot - i) % BPF_REG_SIZE]; + if (type == STACK_SPILL) + continue; + if (type == STACK_MISC) + continue; + verbose(env, "invalid read from stack off %d+%d size %d\n", + off, i, size); + return -EACCES; + } mark_reg_unknown(env, state->regs, dst_regno); - state->regs[dst_regno].live |= REG_LIVE_WRITTEN; } - mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + state->regs[dst_regno].live |= REG_LIVE_WRITTEN; return 0; } - for (i = 1; i < BPF_REG_SIZE; i++) { - if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { - verbose(env, "corrupted spill memory\n"); - return -EACCES; - } - } if (dst_regno >= 0) { /* restore register state from stack */ - state->regs[dst_regno] = *reg; + copy_register_state(&state->regs[dst_regno], reg); /* mark reg as written since spilled pointer state likely * has its liveness marks cleared by is_state_visited() * which resets stack/reg liveness for state transitions @@ -2601,8 +2659,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); } else { - u8 type; - for (i = 0; i < size; i++) { type = stype[(slot - i) % BPF_REG_SIZE]; if (type == STACK_MISC) @@ -2978,7 +3034,9 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, regno); return -EACCES; } - err = __check_mem_access(env, regno, off, size, reg->range, + + err = reg->range < 0 ? -EINVAL : + __check_mem_access(env, regno, off, size, reg->range, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); @@ -4076,17 +4134,17 @@ static int check_stack_range_initialized( goto mark; } - if (state->stack[spi].slot_type[0] == STACK_SPILL && + if (is_spilled_reg(&state->stack[spi]) && state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) goto mark; - if (state->stack[spi].slot_type[0] == STACK_SPILL && + if (is_spilled_reg(&state->stack[spi]) && (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || env->allow_ptr_leaks)) { if (clobber) { __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) - state->stack[spi].slot_type[j] = STACK_MISC; + scrub_spilled_slot(&state->stack[spi].slot_type[j]); } goto mark; } @@ -4991,50 +5049,41 @@ static int check_func_proto(const struct bpf_func_proto *fn, int func_id) /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ -static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, - struct bpf_func_state *state) +static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { - struct bpf_reg_state *regs = state->regs, *reg; - int i; - - for (i = 0; i < MAX_BPF_REG; i++) - if (reg_is_pkt_pointer_any(®s[i])) - mark_reg_unknown(env, regs, i); + struct bpf_func_state *state; + struct bpf_reg_state *reg; - bpf_for_each_spilled_reg(i, state, reg) { - if (!reg) - continue; + bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(env, reg); - } + })); } -static void clear_all_pkt_pointers(struct bpf_verifier_env *env) -{ - struct bpf_verifier_state *vstate = env->cur_state; - int i; - - for (i = 0; i <= vstate->curframe; i++) - __clear_all_pkt_pointers(env, vstate->frame[i]); -} +enum { + AT_PKT_END = -1, + BEYOND_PKT_END = -2, +}; -static void release_reg_references(struct bpf_verifier_env *env, - struct bpf_func_state *state, - int ref_obj_id) +static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) { - struct bpf_reg_state *regs = state->regs, *reg; - int i; + struct bpf_func_state *state = vstate->frame[vstate->curframe]; + struct bpf_reg_state *reg = &state->regs[regn]; - for (i = 0; i < MAX_BPF_REG; i++) - if (regs[i].ref_obj_id == ref_obj_id) - mark_reg_unknown(env, regs, i); + if (reg->type != PTR_TO_PACKET) + /* PTR_TO_PACKET_META is not supported yet */ + return; - bpf_for_each_spilled_reg(i, state, reg) { - if (!reg) - continue; - if (reg->ref_obj_id == ref_obj_id) - __mark_reg_unknown(env, reg); - } + /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. + * How far beyond pkt_end it goes is unknown. + * if (!range_open) it's the case of pkt >= pkt_end + * if (range_open) it's the case of pkt > pkt_end + * hence this pointer is at least 1 byte bigger than pkt_end + */ + if (range_open) + reg->range = BEYOND_PKT_END; + else + reg->range = AT_PKT_END; } /* The pointer with the specified id has released its reference to kernel @@ -5043,16 +5092,22 @@ static void release_reg_references(struct bpf_verifier_env *env, static int release_reference(struct bpf_verifier_env *env, int ref_obj_id) { - struct bpf_verifier_state *vstate = env->cur_state; + struct bpf_func_state *state; + struct bpf_reg_state *reg; int err; - int i; err = release_reference_state(cur_func(env), ref_obj_id); if (err) return err; - for (i = 0; i <= vstate->curframe; i++) - release_reg_references(env, vstate->frame[i], ref_obj_id); + bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ + if (reg->ref_obj_id == ref_obj_id) { + if (!env->allow_ptr_leaks) + __mark_reg_not_init(env, reg); + else + __mark_reg_unknown(env, reg); + } + })); return 0; } @@ -5846,7 +5901,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, */ if (!ptr_is_dst_reg) { tmp = *dst_reg; - *dst_reg = *ptr_reg; + copy_register_state(dst_reg, ptr_reg); } ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, env->insn_idx); @@ -6990,6 +7045,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, return err; return adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); + } else if (dst_reg->precise) { + /* if dst_reg is precise, src_reg should be precise as well */ + err = mark_chain_precision(env, insn->src_reg); + if (err) + return err; } } else { /* Pretend the src is a reg with a known value, since we only @@ -7095,7 +7155,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) * to propagate min/max range. */ src_reg->id = ++env->id_gen; - *dst_reg = *src_reg; + copy_register_state(dst_reg, src_reg); dst_reg->live |= REG_LIVE_WRITTEN; dst_reg->subreg_def = DEF_NOT_SUBREG; } else { @@ -7106,7 +7166,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) insn->src_reg); return -EACCES; } else if (src_reg->type == SCALAR_VALUE) { - *dst_reg = *src_reg; + copy_register_state(dst_reg, src_reg); /* Make sure ID is cleared otherwise * dst_reg min/max could be incorrectly * propagated into src_reg by find_equal_scalars() @@ -7191,35 +7251,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) return 0; } -static void __find_good_pkt_pointers(struct bpf_func_state *state, - struct bpf_reg_state *dst_reg, - enum bpf_reg_type type, u16 new_range) -{ - struct bpf_reg_state *reg; - int i; - - for (i = 0; i < MAX_BPF_REG; i++) { - reg = &state->regs[i]; - if (reg->type == type && reg->id == dst_reg->id) - /* keep the maximum range already checked */ - reg->range = max(reg->range, new_range); - } - - bpf_for_each_spilled_reg(i, state, reg) { - if (!reg) - continue; - if (reg->type == type && reg->id == dst_reg->id) - reg->range = max(reg->range, new_range); - } -} - static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { - u16 new_range; - int i; + struct bpf_func_state *state; + struct bpf_reg_state *reg; + int new_range; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) @@ -7284,9 +7323,11 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ - for (i = 0; i <= vstate->curframe; i++) - __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, - new_range); + bpf_for_each_reg_in_vstate(vstate, state, reg, ({ + if (reg->type == type && reg->id == dst_reg->id) + /* keep the maximum range already checked */ + reg->range = max(reg->range, new_range); + })); } static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) @@ -7470,6 +7511,67 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, return is_branch64_taken(reg, val, opcode); } +static int flip_opcode(u32 opcode) +{ + /* How can we transform "a b" into "b a"? */ + static const u8 opcode_flip[16] = { + /* these stay the same */ + [BPF_JEQ >> 4] = BPF_JEQ, + [BPF_JNE >> 4] = BPF_JNE, + [BPF_JSET >> 4] = BPF_JSET, + /* these swap "lesser" and "greater" (L and G in the opcodes) */ + [BPF_JGE >> 4] = BPF_JLE, + [BPF_JGT >> 4] = BPF_JLT, + [BPF_JLE >> 4] = BPF_JGE, + [BPF_JLT >> 4] = BPF_JGT, + [BPF_JSGE >> 4] = BPF_JSLE, + [BPF_JSGT >> 4] = BPF_JSLT, + [BPF_JSLE >> 4] = BPF_JSGE, + [BPF_JSLT >> 4] = BPF_JSGT + }; + return opcode_flip[opcode >> 4]; +} + +static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg, + u8 opcode) +{ + struct bpf_reg_state *pkt; + + if (src_reg->type == PTR_TO_PACKET_END) { + pkt = dst_reg; + } else if (dst_reg->type == PTR_TO_PACKET_END) { + pkt = src_reg; + opcode = flip_opcode(opcode); + } else { + return -1; + } + + if (pkt->range >= 0) + return -1; + + switch (opcode) { + case BPF_JLE: + /* pkt <= pkt_end */ + fallthrough; + case BPF_JGT: + /* pkt > pkt_end */ + if (pkt->range == BEYOND_PKT_END) + /* pkt has at last one extra byte beyond pkt_end */ + return opcode == BPF_JGT; + break; + case BPF_JLT: + /* pkt < pkt_end */ + fallthrough; + case BPF_JGE: + /* pkt >= pkt_end */ + if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) + return opcode == BPF_JGE; + break; + } + return -1; +} + /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. @@ -7640,23 +7742,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, u64 val, u32 val32, u8 opcode, bool is_jmp32) { - /* How can we transform "a b" into "b a"? */ - static const u8 opcode_flip[16] = { - /* these stay the same */ - [BPF_JEQ >> 4] = BPF_JEQ, - [BPF_JNE >> 4] = BPF_JNE, - [BPF_JSET >> 4] = BPF_JSET, - /* these swap "lesser" and "greater" (L and G in the opcodes) */ - [BPF_JGE >> 4] = BPF_JLE, - [BPF_JGT >> 4] = BPF_JLT, - [BPF_JLE >> 4] = BPF_JGE, - [BPF_JLT >> 4] = BPF_JGT, - [BPF_JSGE >> 4] = BPF_JSLE, - [BPF_JSGT >> 4] = BPF_JSLT, - [BPF_JSLE >> 4] = BPF_JSGE, - [BPF_JSLT >> 4] = BPF_JSGT - }; - opcode = opcode_flip[opcode >> 4]; + opcode = flip_opcode(opcode); /* This uses zero as "not present in table"; luckily the zero opcode, * BPF_JA, can't get here. */ @@ -7754,7 +7840,7 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, reg->ref_obj_id = 0; } else if (!reg_may_point_to_spin_lock(reg)) { /* For not-NULL ptr, reg->ref_obj_id will be reset - * in release_reg_references(). + * in release_reference(). * * reg->id is still used by spin_lock ptr. Other * than spin_lock ptr type, reg->id can be reset. @@ -7764,22 +7850,6 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, } } -static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, - bool is_null) -{ - struct bpf_reg_state *reg; - int i; - - for (i = 0; i < MAX_BPF_REG; i++) - mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); - - bpf_for_each_spilled_reg(i, state, reg) { - if (!reg) - continue; - mark_ptr_or_null_reg(state, reg, id, is_null); - } -} - /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ @@ -7787,10 +7857,9 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; - struct bpf_reg_state *regs = state->regs; + struct bpf_reg_state *regs = state->regs, *reg; u32 ref_obj_id = regs[regno].ref_obj_id; u32 id = regs[regno].id; - int i; if (ref_obj_id && ref_obj_id == id && is_null) /* regs[regno] is in the " == NULL" branch. @@ -7799,8 +7868,9 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, */ WARN_ON_ONCE(release_reference_state(state, id)); - for (i = 0; i <= vstate->curframe; i++) - __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); + bpf_for_each_reg_in_vstate(vstate, state, reg, ({ + mark_ptr_or_null_reg(state, reg, id, is_null); + })); } static bool try_match_pkt_pointers(const struct bpf_insn *insn, @@ -7825,6 +7895,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); + mark_pkt_end(other_branch, insn->dst_reg, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && @@ -7832,6 +7903,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); + mark_pkt_end(this_branch, insn->src_reg, false); } else { return false; } @@ -7844,6 +7916,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); + mark_pkt_end(this_branch, insn->dst_reg, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && @@ -7851,6 +7924,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); + mark_pkt_end(other_branch, insn->src_reg, true); } else { return false; } @@ -7863,6 +7937,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); + mark_pkt_end(other_branch, insn->dst_reg, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && @@ -7870,6 +7945,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); + mark_pkt_end(this_branch, insn->src_reg, true); } else { return false; } @@ -7882,6 +7958,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); + mark_pkt_end(this_branch, insn->dst_reg, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && @@ -7889,6 +7966,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); + mark_pkt_end(other_branch, insn->src_reg, false); } else { return false; } @@ -7905,23 +7983,11 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate, { struct bpf_func_state *state; struct bpf_reg_state *reg; - int i, j; - for (i = 0; i <= vstate->curframe; i++) { - state = vstate->frame[i]; - for (j = 0; j < MAX_BPF_REG; j++) { - reg = &state->regs[j]; - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) - *reg = *known_reg; - } - - bpf_for_each_spilled_reg(j, state, reg) { - if (!reg) - continue; - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) - *reg = *known_reg; - } - } + bpf_for_each_reg_in_vstate(vstate, state, reg, ({ + if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) + copy_register_state(reg, known_reg); + })); } static int check_cond_jmp_op(struct bpf_verifier_env *env, @@ -7988,6 +8054,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, src_reg->var_off.value, opcode, is_jmp32); + } else if (reg_is_pkt_pointer_any(dst_reg) && + reg_is_pkt_pointer_any(src_reg) && + !is_jmp32) { + pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode); } if (pred >= 0) { @@ -7996,7 +8066,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, */ if (!__is_pointer_value(false, dst_reg)) err = mark_chain_precision(env, insn->dst_reg); - if (BPF_SRC(insn->code) == BPF_X && !err) + if (BPF_SRC(insn->code) == BPF_X && !err && + !__is_pointer_value(false, src_reg)) err = mark_chain_precision(env, insn->src_reg); if (err) return err; @@ -9264,9 +9335,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, * return false to continue verification of this path */ return false; - if (i % BPF_REG_SIZE) + if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) continue; - if (old->stack[spi].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&old->stack[spi])) continue; if (!regsafe(env, &old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) @@ -9457,34 +9528,36 @@ static int propagate_precision(struct bpf_verifier_env *env, { struct bpf_reg_state *state_reg; struct bpf_func_state *state; - int i, err = 0; + int i, err = 0, fr; - state = old->frame[old->curframe]; - state_reg = state->regs; - for (i = 0; i < BPF_REG_FP; i++, state_reg++) { - if (state_reg->type != SCALAR_VALUE || - !state_reg->precise) - continue; - if (env->log.level & BPF_LOG_LEVEL2) - verbose(env, "propagating r%d\n", i); - err = mark_chain_precision(env, i); - if (err < 0) - return err; - } + for (fr = old->curframe; fr >= 0; fr--) { + state = old->frame[fr]; + state_reg = state->regs; + for (i = 0; i < BPF_REG_FP; i++, state_reg++) { + if (state_reg->type != SCALAR_VALUE || + !state_reg->precise) + continue; + if (env->log.level & BPF_LOG_LEVEL2) + verbose(env, "frame %d: propagating r%d\n", i, fr); + err = mark_chain_precision_frame(env, fr, i); + if (err < 0) + return err; + } - for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) - continue; - state_reg = &state->stack[i].spilled_ptr; - if (state_reg->type != SCALAR_VALUE || - !state_reg->precise) - continue; - if (env->log.level & BPF_LOG_LEVEL2) - verbose(env, "propagating fp%d\n", - (-i - 1) * BPF_REG_SIZE); - err = mark_chain_precision_stack(env, i); - if (err < 0) - return err; + for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { + if (!is_spilled_reg(&state->stack[i])) + continue; + state_reg = &state->stack[i].spilled_ptr; + if (state_reg->type != SCALAR_VALUE || + !state_reg->precise) + continue; + if (env->log.level & BPF_LOG_LEVEL2) + verbose(env, "frame %d: propagating fp%d\n", + (-i - 1) * BPF_REG_SIZE, fr); + err = mark_chain_precision_stack_frame(env, fr, i); + if (err < 0) + return err; + } } return 0; } diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 6e36e854b5124..d8fcc139ac05d 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -169,7 +169,6 @@ extern struct mutex cgroup_mutex; extern spinlock_t css_set_lock; extern struct cgroup_subsys *cgroup_subsys[]; extern struct list_head cgroup_roots; -extern struct file_system_type cgroup_fs_type; /* iterate across the hierarchies */ #define for_each_root(root) \ diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 1a0a9f820c69b..433b9e840b387 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -57,6 +57,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) int retval = 0; mutex_lock(&cgroup_mutex); + cpus_read_lock(); percpu_down_write(&cgroup_threadgroup_rwsem); for_each_root(root) { struct cgroup *from_cgrp; @@ -73,6 +74,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) break; } percpu_up_write(&cgroup_threadgroup_rwsem); + cpus_read_unlock(); mutex_unlock(&cgroup_mutex); return retval; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 5046c99deba86..684c16849eff3 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2304,6 +2304,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) } EXPORT_SYMBOL_GPL(task_cgroup_path); +/** + * cgroup_attach_lock - Lock for ->attach() + * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem + * + * cgroup migration sometimes needs to stabilize threadgroups against forks and + * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() + * implementations (e.g. cpuset), also need to disable CPU hotplug. + * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can + * lead to deadlocks. + * + * Bringing up a CPU may involve creating and destroying tasks which requires + * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside + * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while + * write-locking threadgroup_rwsem, the locking order is reversed and we end up + * waiting for an on-going CPU hotplug operation which in turn is waiting for + * the threadgroup_rwsem to be released to create new tasks. For more details: + * + * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu + * + * Resolve the situation by always acquiring cpus_read_lock() before optionally + * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that + * CPU hotplug is disabled on entry. + */ +static void cgroup_attach_lock(bool lock_threadgroup) +{ + cpus_read_lock(); + if (lock_threadgroup) + percpu_down_write(&cgroup_threadgroup_rwsem); +} + +/** + * cgroup_attach_unlock - Undo cgroup_attach_lock() + * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem + */ +static void cgroup_attach_unlock(bool lock_threadgroup) +{ + if (lock_threadgroup) + percpu_up_write(&cgroup_threadgroup_rwsem); + cpus_read_unlock(); +} + /** * cgroup_migrate_add_task - add a migration target task to a migration context * @task: target task @@ -2780,8 +2821,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, } struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, - bool *locked) - __acquires(&cgroup_threadgroup_rwsem) + bool *threadgroup_locked) { struct task_struct *tsk; pid_t pid; @@ -2798,12 +2838,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, * Therefore, we can skip the global lock. */ lockdep_assert_held(&cgroup_mutex); - if (pid || threadgroup) { - percpu_down_write(&cgroup_threadgroup_rwsem); - *locked = true; - } else { - *locked = false; - } + *threadgroup_locked = pid || threadgroup; + cgroup_attach_lock(*threadgroup_locked); rcu_read_lock(); if (pid) { @@ -2834,17 +2870,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, goto out_unlock_rcu; out_unlock_threadgroup: - if (*locked) { - percpu_up_write(&cgroup_threadgroup_rwsem); - *locked = false; - } + cgroup_attach_unlock(*threadgroup_locked); + *threadgroup_locked = false; out_unlock_rcu: rcu_read_unlock(); return tsk; } -void cgroup_procs_write_finish(struct task_struct *task, bool locked) - __releases(&cgroup_threadgroup_rwsem) +void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked) { struct cgroup_subsys *ss; int ssid; @@ -2852,8 +2885,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked) /* release reference from cgroup_procs_write_start() */ put_task_struct(task); - if (locked) - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(threadgroup_locked); + for_each_subsys(ss, ssid) if (ss->post_attach) ss->post_attach(); @@ -2908,12 +2941,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) struct cgroup_subsys_state *d_css; struct cgroup *dsct; struct css_set *src_cset; + bool has_tasks; int ret; lockdep_assert_held(&cgroup_mutex); - percpu_down_write(&cgroup_threadgroup_rwsem); - /* look up all csses currently attached to @cgrp's subtree */ spin_lock_irq(&css_set_lock); cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { @@ -2924,6 +2956,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) } spin_unlock_irq(&css_set_lock); + /* + * We need to write-lock threadgroup_rwsem while migrating tasks. + * However, if there are no source csets for @cgrp, changing its + * controllers isn't gonna produce any task migrations and the + * write-locking can be skipped safely. + */ + has_tasks = !list_empty(&mgctx.preloaded_src_csets); + cgroup_attach_lock(has_tasks); + /* NULL dst indicates self on default hierarchy */ ret = cgroup_migrate_prepare_dst(&mgctx); if (ret) @@ -2943,7 +2984,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ret = cgroup_migrate_execute(&mgctx); out_finish: cgroup_migrate_finish(&mgctx); - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(has_tasks); return ret; } @@ -4799,13 +4840,13 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of, struct task_struct *task; const struct cred *saved_cred; ssize_t ret; - bool locked; + bool threadgroup_locked; dst_cgrp = cgroup_kn_lock_live(of->kn, false); if (!dst_cgrp) return -ENODEV; - task = cgroup_procs_write_start(buf, true, &locked); + task = cgroup_procs_write_start(buf, true, &threadgroup_locked); ret = PTR_ERR_OR_ZERO(task); if (ret) goto out_unlock; @@ -4831,7 +4872,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of, ret = cgroup_attach_task(dst_cgrp, task, true); out_finish: - cgroup_procs_write_finish(task, locked); + cgroup_procs_write_finish(task, threadgroup_locked); out_unlock: cgroup_kn_unlock(of->kn); diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index c51863b63f93a..43270b07b2e0b 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -1059,10 +1060,18 @@ static void update_tasks_cpumask(struct cpuset *cs) { struct css_task_iter it; struct task_struct *task; + bool top_cs = cs == &top_cpuset; css_task_iter_start(&cs->css, 0, &it); - while ((task = css_task_iter_next(&it))) + while ((task = css_task_iter_next(&it))) { + /* + * Percpu kthreads in top_cpuset are ignored + */ + if (top_cs && (task->flags & PF_KTHREAD) && + kthread_is_per_cpu(task)) + continue; set_cpus_allowed_ptr(task, cs->effective_cpus); + } css_task_iter_end(&it); } @@ -2016,12 +2025,7 @@ static int update_prstate(struct cpuset *cs, int new_prs) update_flag(CS_CPU_EXCLUSIVE, cs, 0); } - /* - * Update cpumask of parent's tasks except when it is the top - * cpuset as some system daemons cannot be mapped to other CPUs. - */ - if (parent != &top_cpuset) - update_tasks_cpumask(parent); + update_tasks_cpumask(parent); if (parent->child_ecpus_count) update_sibling_cpumasks(parent, cs, &tmpmask); @@ -2212,7 +2216,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) cgroup_taskset_first(tset, &css); cs = css_cs(css); - cpus_read_lock(); + lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ percpu_down_write(&cpuset_rwsem); /* prepare for attach */ @@ -2268,7 +2272,6 @@ static void cpuset_attach(struct cgroup_taskset *tset) wake_up(&cpuset_attach_wq); percpu_up_write(&cpuset_rwsem); - cpus_read_unlock(); } /* The various types of files and directories in a cpuset file system */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 3c9ee966c56a5..008b50da22246 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2231,8 +2231,10 @@ static ssize_t write_cpuhp_target(struct device *dev, if (st->state < target) ret = cpu_up(dev->id, target); - else + else if (st->state > target) ret = cpu_down(dev->id, target); + else if (WARN_ON(st->target != target)) + st->target = target; out: unlock_device_hotplug(); return ret ? ret : count; diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 274587a57717f..d897d161366a4 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -452,7 +452,10 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, } } -#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) +static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) +{ + return start + (idx << IO_TLB_SHIFT); +} /* * Return the offset into a iotlb slot required to keep the device happy. @@ -731,7 +734,18 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, size_t swiotlb_max_mapping_size(struct device *dev) { - return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE; + int min_align_mask = dma_get_min_align_mask(dev); + int min_align = 0; + + /* + * swiotlb_find_slots() skips slots according to + * min align mask. This affects max mapping size. + * Take it into acount here. + */ + if (min_align_mask) + min_align = roundup(min_align_mask, IO_TLB_SIZE); + + return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; } bool is_swiotlb_active(void) diff --git a/kernel/entry/common.c b/kernel/entry/common.c index e289e67732926..09f58853f6927 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -135,7 +135,15 @@ static __always_inline void exit_to_user_mode(void) } /* Workaround to allow gradual conversion of architecture code */ -void __weak arch_do_signal(struct pt_regs *regs) { } +void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { } + +static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work) +{ + if (ti_work & _TIF_NOTIFY_SIGNAL) + tracehook_notify_signal(); + + arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING); +} static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work) @@ -157,8 +165,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, if (ti_work & _TIF_PATCH_PENDING) klp_update_patch_state(current); - if (ti_work & _TIF_SIGPENDING) - arch_do_signal(regs); + if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + handle_signal_work(regs, ti_work); if (ti_work & _TIF_NOTIFY_RESUME) { tracehook_notify_resume(regs); diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c index 2a3139dab109e..7b946847be783 100644 --- a/kernel/entry/kvm.c +++ b/kernel/entry/kvm.c @@ -8,7 +8,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) do { int ret; - if (ti_work & _TIF_SIGPENDING) { + if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { kvm_handle_signal_exit(vcpu); return -EINTR; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 0e01216f4e5af..d7b61116f15bb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8740,7 +8740,7 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog, PERF_RECORD_KSYMBOL_TYPE_BPF, (u64)(unsigned long)subprog->bpf_func, subprog->jited_len, unregister, - prog->aux->ksym.name); + subprog->aux->ksym.name); } } } @@ -10810,13 +10810,15 @@ static int pmu_dev_alloc(struct pmu *pmu) pmu->dev->groups = pmu->attr_groups; device_initialize(pmu->dev); - ret = dev_set_name(pmu->dev, "%s", pmu->name); - if (ret) - goto free_dev; dev_set_drvdata(pmu->dev, pmu); pmu->dev->bus = &pmu_bus; pmu->dev->release = pmu_dev_release; + + ret = dev_set_name(pmu->dev, "%s", pmu->name); + if (ret) + goto free_dev; + ret = device_add(pmu->dev); if (ret) goto free_dev; @@ -11779,12 +11781,12 @@ SYSCALL_DEFINE5(perf_event_open, if (flags & ~PERF_FLAG_ALL) return -EINVAL; - /* Do we allow access to perf_event_open(2) ? */ - err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); + err = perf_copy_attr(attr_uptr, &attr); if (err) return err; - err = perf_copy_attr(attr_uptr, &attr); + /* Do we allow access to perf_event_open(2) ? */ + err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); if (err) return err; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index e1bbb3b92921d..826a2355da1ed 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1973,7 +1973,7 @@ bool uprobe_deny_signal(void) WARN_ON_ONCE(utask->state != UTASK_SSTEP); - if (signal_pending(t)) { + if (task_sigpending(t)) { spin_lock_irq(&t->sighand->siglock); clear_tsk_thread_flag(t, TIF_SIGPENDING); spin_unlock_irq(&t->sighand->siglock); diff --git a/kernel/exit.c b/kernel/exit.c index ab900b661867f..bacdaf980933b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -64,11 +64,58 @@ #include #include #include +#include #include #include #include +/* + * The default value should be high enough to not crash a system that randomly + * crashes its kernel from time to time, but low enough to at least not permit + * overflowing 32-bit refcounts or the ldsem writer count. + */ +static unsigned int oops_limit = 10000; + +#ifdef CONFIG_SYSCTL +static struct ctl_table kern_exit_table[] = { + { + .procname = "oops_limit", + .data = &oops_limit, + .maxlen = sizeof(oops_limit), + .mode = 0644, + .proc_handler = proc_douintvec, + }, + { } +}; + +static __init int kernel_exit_sysctls_init(void) +{ + register_sysctl_init("kernel", kern_exit_table); + return 0; +} +late_initcall(kernel_exit_sysctls_init); +#endif + +static atomic_t oops_count = ATOMIC_INIT(0); + +#ifdef CONFIG_SYSFS +static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, + char *page) +{ + return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); +} + +static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); + +static __init int kernel_exit_sysfs_init(void) +{ + sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); + return 0; +} +late_initcall(kernel_exit_sysfs_init); +#endif + static void __unhash_process(struct task_struct *p, bool group_dead) { nr_threads--; @@ -763,7 +810,7 @@ void __noreturn do_exit(long code) schedule(); } - io_uring_files_cancel(tsk->files); + io_uring_files_cancel(); exit_signals(tsk); /* sets PF_EXITING */ /* sync mm's RSS info before statistics gathering */ @@ -863,6 +910,31 @@ void __noreturn do_exit(long code) } EXPORT_SYMBOL_GPL(do_exit); +void __noreturn make_task_dead(int signr) +{ + /* + * Take the task off the cpu after something catastrophic has + * happened. + */ + unsigned int limit; + + /* + * Every time the system oopses, if the oops happens while a reference + * to an object was held, the reference leaks. + * If the oops doesn't also leak memory, repeated oopsing can cause + * reference counters to wrap around (if they're not using refcount_t). + * This means that repeated oopsing can make unexploitable-looking bugs + * exploitable through repeated oopsing. + * To make sure this can't happen, place an upper bound on how often the + * kernel may oops without panic(). + */ + limit = READ_ONCE(oops_limit); + if (atomic_inc_return(&oops_count) >= limit && limit) + panic("Oopsed too often (kernel.oops_limit is %d)", limit); + + do_exit(signr); +} + void complete_and_exit(struct completion *comp, long code) { if (comp) diff --git a/kernel/fork.c b/kernel/fork.c index a78c0b02edd55..68efe2a0b4fbc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -926,6 +926,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; + tsk->pf_io_worker = NULL; account_kernel_stack(tsk, 1); @@ -1127,6 +1128,7 @@ void mmput_async(struct mm_struct *mm) schedule_work(&mm->async_put_work); } } +EXPORT_SYMBOL_GPL(mmput_async); #endif /** @@ -1941,13 +1943,21 @@ static __latent_entropy struct task_struct *copy_process( recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); retval = -ERESTARTNOINTR; - if (signal_pending(current)) + if (task_sigpending(current)) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current, node); if (!p) goto fork_out; + if (args->io_thread) { + /* + * Mark us an IO worker, and block any signal that isn't + * fatal or STOP + */ + p->flags |= PF_IO_WORKER; + siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); + } /* * This _must_ happen before we call free_task(), i.e. before we jump @@ -2414,6 +2424,28 @@ struct mm_struct *copy_init_mm(void) return dup_mm(NULL, &init_mm); } +/* + * This is like kernel_clone(), but shaved down and tailored to just + * creating io_uring workers. It returns a created task, or an error pointer. + * The returned task is inactive, and the caller must fire it up through + * wake_up_new_task(p). All signals are blocked in the created task. + */ +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) +{ + unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO; + struct kernel_clone_args args = { + .flags = ((lower_32_bits(flags) | CLONE_VM | + CLONE_UNTRACED) & ~CSIGNAL), + .exit_signal = (lower_32_bits(flags) & CSIGNAL), + .stack = (unsigned long)fn, + .stack_size = (unsigned long)arg, + .io_thread = 1, + }; + + return copy_process(NULL, 0, node, &args); +} + /* * Ok, this is the main fork-routine. * diff --git a/kernel/futex/Makefile b/kernel/futex/Makefile new file mode 100644 index 0000000000000..b89ba3fba3437 --- /dev/null +++ b/kernel/futex/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-y += core.o diff --git a/kernel/futex.c b/kernel/futex/core.c similarity index 99% rename from kernel/futex.c rename to kernel/futex/core.c index 98a6e1b80bfe4..8dd0bc50ac36d 100644 --- a/kernel/futex.c +++ b/kernel/futex/core.c @@ -42,7 +42,7 @@ #include -#include "locking/rtmutex_common.h" +#include "../locking/rtmutex_common.h" /* * READ this before attempting to hack on futexes! @@ -3405,6 +3405,7 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, bool pi, bool pending_op) { u32 uval, nval, mval; + pid_t owner; int err; /* Futex address must be 32bit aligned */ @@ -3426,6 +3427,10 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, * 2. A woken up waiter is killed before it can acquire the * futex in user space. * + * In the second case, the wake up notification could be generated + * by the unlock path in user space after setting the futex value + * to zero or by the kernel after setting the OWNER_DIED bit below. + * * In both cases the TID validation below prevents a wakeup of * potential waiters which can cause these waiters to block * forever. @@ -3434,24 +3439,27 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, * * 1) task->robust_list->list_op_pending != NULL * @pending_op == true - * 2) User space futex value == 0 + * 2) The owner part of user space futex value == 0 * 3) Regular futex: @pi == false * * If these conditions are met, it is safe to attempt waking up a * potential waiter without touching the user space futex value and - * trying to set the OWNER_DIED bit. The user space futex value is - * uncontended and the rest of the user space mutex state is - * consistent, so a woken waiter will just take over the - * uncontended futex. Setting the OWNER_DIED bit would create - * inconsistent state and malfunction of the user space owner died - * handling. - */ - if (pending_op && !pi && !uval) { + * trying to set the OWNER_DIED bit. If the futex value is zero, + * the rest of the user space mutex state is consistent, so a woken + * waiter will just take over the uncontended futex. Setting the + * OWNER_DIED bit would create inconsistent state and malfunction + * of the user space owner died handling. Otherwise, the OWNER_DIED + * bit is already set, and the woken waiter is expected to deal with + * this. + */ + owner = uval & FUTEX_TID_MASK; + + if (pending_op && !pi && !owner) { futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); return 0; } - if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) + if (owner != task_pid_vnr(curr)) return 0; /* diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c index c466c7fbdece5..ea6b45d0fa0d6 100644 --- a/kernel/gcov/clang.c +++ b/kernel/gcov/clang.c @@ -327,6 +327,8 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src) for (i = 0; i < sfn_ptr->num_counters; i++) dfn_ptr->counters[i] += sfn_ptr->counters[i]; + + sfn_ptr = list_next_entry(sfn_ptr, head); } } diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 53c67c87f141b..04880d8fba254 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -33,6 +33,13 @@ #define GCOV_TAG_FUNCTION_LENGTH 3 +/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */ +#if (__GNUC__ >= 12) +#define GCOV_UNIT_SIZE 4 +#else +#define GCOV_UNIT_SIZE 1 +#endif + static struct gcov_info *gcov_info_head; /** @@ -78,6 +85,7 @@ struct gcov_fn_info { * @version: gcov version magic indicating the gcc version used for compilation * @next: list head for a singly-linked list * @stamp: uniquifying time stamp + * @checksum: unique object checksum * @filename: name of the associated gcov data file * @merge: merge functions (null for unused counter type) * @n_functions: number of instrumented functions @@ -90,6 +98,10 @@ struct gcov_info { unsigned int version; struct gcov_info *next; unsigned int stamp; + /* Since GCC 12.1 a checksum field is added. */ +#if (__GNUC__ >= 12) + unsigned int checksum; +#endif const char *filename; void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int); unsigned int n_functions; @@ -451,12 +463,18 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info) pos += store_gcov_u32(buffer, pos, info->version); pos += store_gcov_u32(buffer, pos, info->stamp); +#if (__GNUC__ >= 12) + /* Use zero as checksum of the compilation unit. */ + pos += store_gcov_u32(buffer, pos, 0); +#endif + for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) { fi_ptr = info->functions[fi_idx]; /* Function record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION); - pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH); + pos += store_gcov_u32(buffer, pos, + GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE); pos += store_gcov_u32(buffer, pos, fi_ptr->ident); pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum); pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); @@ -470,7 +488,8 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info) /* Counter record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FOR_COUNTER(ct_idx)); - pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2); + pos += store_gcov_u32(buffer, pos, + ci_ptr->num * 2 * GCOV_UNIT_SIZE); for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) { pos += store_gcov_u64(buffer, pos, diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index e58342ace11f2..f1d83a8b44171 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -52,6 +52,7 @@ enum { * IRQS_PENDING - irq is pending and replayed later * IRQS_SUSPENDED - irq is suspended * IRQS_NMI - irq line is used to deliver NMIs + * IRQS_SYSFS - descriptor has been added to sysfs */ enum { IRQS_AUTODETECT = 0x00000001, @@ -64,6 +65,7 @@ enum { IRQS_SUSPENDED = 0x00000800, IRQS_TIMINGS = 0x00001000, IRQS_NMI = 0x00002000, + IRQS_SYSFS = 0x00004000, }; #include "debug.h" diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index ca36c6179aa76..9b0914a063f90 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -288,22 +288,25 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc) if (irq_kobj_base) { /* * Continue even in case of failure as this is nothing - * crucial. + * crucial and failures in the late irq_sysfs_init() + * cannot be rolled back. */ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) pr_warn("Failed to add kobject for irq %d\n", irq); + else + desc->istate |= IRQS_SYSFS; } } static void irq_sysfs_del(struct irq_desc *desc) { /* - * If irq_sysfs_init() has not yet been invoked (early boot), then - * irq_kobj_base is NULL and the descriptor was never added. - * kobject_del() complains about a object with no parent, so make - * it conditional. + * Only invoke kobject_del() when kobject_add() was successfully + * invoked for the descriptor. This covers both early boot, where + * sysfs is not initialized yet, and the case of a failed + * kobject_add() invocation. */ - if (irq_kobj_base) + if (desc->istate & IRQS_SYSFS) kobject_del(&desc->kobj); } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 92d94615cbbbd..437b073dc487e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -223,11 +223,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); + const struct cpumask *prog_mask; int ret; + static DEFINE_RAW_SPINLOCK(tmp_mask_lock); + static struct cpumask tmp_mask; + if (!chip || !chip->irq_set_affinity) return -EINVAL; + raw_spin_lock(&tmp_mask_lock); /* * If this is a managed interrupt and housekeeping is enabled on * it check whether the requested affinity mask intersects with @@ -249,24 +254,34 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, */ if (irqd_affinity_is_managed(data) && housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { - const struct cpumask *hk_mask, *prog_mask; - - static DEFINE_RAW_SPINLOCK(tmp_mask_lock); - static struct cpumask tmp_mask; + const struct cpumask *hk_mask; hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); - raw_spin_lock(&tmp_mask_lock); cpumask_and(&tmp_mask, mask, hk_mask); if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) prog_mask = mask; else prog_mask = &tmp_mask; - ret = chip->irq_set_affinity(data, prog_mask, force); - raw_spin_unlock(&tmp_mask_lock); } else { - ret = chip->irq_set_affinity(data, mask, force); + prog_mask = mask; } + + /* + * Make sure we only provide online CPUs to the irqchip, + * unless we are being asked to force the affinity (in which + * case we do as we are told). + */ + cpumask_and(&tmp_mask, prog_mask, cpu_online_mask); + if (!force && !cpumask_empty(&tmp_mask)) + ret = chip->irq_set_affinity(data, &tmp_mask, force); + else if (force) + ret = chip->irq_set_affinity(data, mask, force); + else + ret = -EINVAL; + + raw_spin_unlock(&tmp_mask_lock); + switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: @@ -1652,7 +1667,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } - if (irq_settings_can_autoenable(desc)) { + if (!(new->flags & IRQF_NO_AUTOEN) && + irq_settings_can_autoenable(desc)) { irq_startup(desc, IRQ_RESEND, IRQ_START_COND); } else { /* @@ -2039,10 +2055,15 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, * which interrupt is which (messes up the interrupt freeing * logic etc). * + * Also shared interrupts do not go well with disabling auto enable. + * The sharing interrupt might request it while it's still disabled + * and then wait for interrupts forever. + * * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and * it cannot be set along with IRQF_NO_SUSPEND. */ if (((irqflags & IRQF_SHARED) && !dev_id) || + ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) || (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) return -EINVAL; @@ -2198,7 +2219,8 @@ int request_nmi(unsigned int irq, irq_handler_t handler, desc = irq_to_desc(irq); - if (!desc || irq_settings_can_autoenable(desc) || + if (!desc || (irq_settings_can_autoenable(desc) && + !(irqflags & IRQF_NO_AUTOEN)) || !irq_settings_can_request(desc) || WARN_ON(irq_settings_is_per_cpu_devid(desc)) || !irq_supports_nmi(desc)) diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index d217acc9f71b6..b47d95b68ac1a 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -456,6 +456,13 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, irqd_clr_can_reserve(irq_data); if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) irqd_set_msi_nomask_quirk(irq_data); + if ((info->flags & MSI_FLAG_ACTIVATE_EARLY) && + irqd_affinity_is_managed(irq_data) && + !cpumask_intersects(irq_data_get_affinity_mask(irq_data), + cpu_online_mask)) { + irqd_set_managed_shutdown(irq_data); + continue; + } } ret = irq_domain_activate_irq(irq_data, can_reserve); if (ret) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 23e7acb5c6679..762df6108c589 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -9,10 +9,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include "atomic.h" @@ -1045,3 +1047,51 @@ EXPORT_SYMBOL(__tsan_atomic_thread_fence); void __tsan_atomic_signal_fence(int memorder); void __tsan_atomic_signal_fence(int memorder) { } EXPORT_SYMBOL(__tsan_atomic_signal_fence); + +#ifdef __HAVE_ARCH_MEMSET +void *__tsan_memset(void *s, int c, size_t count); +noinline void *__tsan_memset(void *s, int c, size_t count) +{ + /* + * Instead of not setting up watchpoints where accessed size is greater + * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE. + */ + size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE); + + check_access(s, check_len, KCSAN_ACCESS_WRITE); + return memset(s, c, count); +} +#else +void *__tsan_memset(void *s, int c, size_t count) __alias(memset); +#endif +EXPORT_SYMBOL(__tsan_memset); + +#ifdef __HAVE_ARCH_MEMMOVE +void *__tsan_memmove(void *dst, const void *src, size_t len); +noinline void *__tsan_memmove(void *dst, const void *src, size_t len) +{ + size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE); + + check_access(dst, check_len, KCSAN_ACCESS_WRITE); + check_access(src, check_len, 0); + return memmove(dst, src, len); +} +#else +void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove); +#endif +EXPORT_SYMBOL(__tsan_memmove); + +#ifdef __HAVE_ARCH_MEMCPY +void *__tsan_memcpy(void *dst, const void *src, size_t len); +noinline void *__tsan_memcpy(void *dst, const void *src, size_t len) +{ + size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE); + + check_access(dst, check_len, KCSAN_ACCESS_WRITE); + check_access(src, check_len, 0); + return memcpy(dst, src, len); +} +#else +void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy); +#endif +EXPORT_SYMBOL(__tsan_memcpy); diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c index ebe7fd2451040..8a8ccaf4f38f3 100644 --- a/kernel/kcsan/kcsan-test.c +++ b/kernel/kcsan/kcsan-test.c @@ -149,7 +149,7 @@ static bool report_matches(const struct expect_report *r) const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT; bool ret = false; unsigned long flags; - typeof(observed.lines) expect; + typeof(*observed.lines) *expect; const char *end; char *cur; int i; @@ -158,6 +158,10 @@ static bool report_matches(const struct expect_report *r) if (!report_available()) return false; + expect = kmalloc(sizeof(observed.lines), GFP_KERNEL); + if (WARN_ON(!expect)) + return false; + /* Generate expected report contents. */ /* Title */ @@ -241,6 +245,7 @@ static bool report_matches(const struct expect_report *r) strstr(observed.lines[2], expect[1]))); out: spin_unlock_irqrestore(&observed.lock, flags); + kfree(expect); return ret; } diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index d3bf87e6007ca..069830f5a5d24 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -630,8 +630,8 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE && print_report(value_change, type, &ai, other_info); - if (reported && panic_on_warn) - panic("panic_on_warn set ...\n"); + if (reported) + check_panic_on_warn("KCSAN"); release_report(&flags, other_info); } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a93407da0ae10..75150e7555180 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1642,6 +1642,7 @@ static int check_kprobe_address_safe(struct kprobe *p, /* Ensure it is not in reserved area nor out of text */ if (!(core_kernel_text((unsigned long) p->addr) || is_module_text_address((unsigned long) p->addr)) || + in_gate_area_no_mm((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) || jump_label_text_reserved(p->addr, p->addr) || static_call_text_reserved(p->addr, p->addr) || @@ -1840,7 +1841,13 @@ static int __unregister_kprobe_top(struct kprobe *p) if ((list_p != p) && (list_p->post_handler)) goto noclean; } - ap->post_handler = NULL; + /* + * For the kprobe-on-ftrace case, we keep the + * post_handler setting to identify this aggrprobe + * armed with kprobe_ipmodify_ops. + */ + if (!kprobe_ftrace(ap)) + ap->post_handler = NULL; } noclean: /* @@ -2334,8 +2341,11 @@ int enable_kprobe(struct kprobe *kp) if (!kprobes_all_disarmed && kprobe_disabled(p)) { p->flags &= ~KPROBE_FLAG_DISABLED; ret = arm_kprobe(p); - if (ret) + if (ret) { p->flags |= KPROBE_FLAG_DISABLED; + if (p != kp) + kp->flags |= KPROBE_FLAG_DISABLED; + } } out: mutex_unlock(&kprobe_mutex); diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index f6310f848f342..b04b87a4e0a7b 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -611,9 +611,23 @@ void klp_reverse_transition(void) /* Called from copy_process() during fork */ void klp_copy_process(struct task_struct *child) { - child->patch_state = current->patch_state; - /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ + /* + * The parent process may have gone through a KLP transition since + * the thread flag was copied in setup_thread_stack earlier. Bring + * the task flag up to date with the parent here. + * + * The operation is serialized against all klp_*_transition() + * operations by the tasklist_lock. The only exception is + * klp_update_patch_state(current), but we cannot race with + * that because we are current. + */ + if (test_tsk_thread_flag(current, TIF_PATCH_PENDING)) + set_tsk_thread_flag(child, TIF_PATCH_PENDING); + else + clear_tsk_thread_flag(child, TIF_PATCH_PENDING); + + child->patch_state = current->patch_state; } /* diff --git a/kernel/module.c b/kernel/module.c index 6a0fd245c0483..33d1dc6d4cd6a 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3661,7 +3661,8 @@ static bool finished_loading(const char *name) sched_annotate_sleep(); mutex_lock(&module_mutex); mod = find_module_all(name, strlen(name), true); - ret = !mod || mod->state == MODULE_STATE_LIVE; + ret = !mod || mod->state == MODULE_STATE_LIVE + || mod->state == MODULE_STATE_GOING; mutex_unlock(&module_mutex); return ret; @@ -3827,20 +3828,35 @@ static int add_unformed_module(struct module *mod) mod->state = MODULE_STATE_UNFORMED; -again: mutex_lock(&module_mutex); old = find_module_all(mod->name, strlen(mod->name), true); if (old != NULL) { - if (old->state != MODULE_STATE_LIVE) { + if (old->state == MODULE_STATE_COMING + || old->state == MODULE_STATE_UNFORMED) { /* Wait in case it fails to load. */ mutex_unlock(&module_mutex); err = wait_event_interruptible(module_wq, finished_loading(mod->name)); if (err) goto out_unlocked; - goto again; + + /* The module might have gone in the meantime. */ + mutex_lock(&module_mutex); + old = find_module_all(mod->name, strlen(mod->name), + true); } - err = -EEXIST; + + /* + * We are here only when the same module was being loaded. Do + * not try to load it again right now. It prevents long delays + * caused by serialized module load failures. It might happen + * when more devices of the same type trigger load of + * a particular module. + */ + if (old && old->state == MODULE_STATE_LIVE) + err = -EEXIST; + else + err = -EBUSY; goto out; } mod_update_bounds(mod); diff --git a/kernel/padata.c b/kernel/padata.c index d4d3ba6e1728a..11ca3ebd8b123 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -220,14 +220,16 @@ int padata_do_parallel(struct padata_shell *ps, pw = padata_work_alloc(); spin_unlock(&padata_works_lock); + if (!pw) { + /* Maximum works limit exceeded, run in the current task. */ + padata->parallel(padata); + } + rcu_read_unlock_bh(); if (pw) { padata_work_init(pw, padata_parallel_worker, padata, 0); queue_work(pinst->parallel_wq, &pw->pw_work); - } else { - /* Maximum works limit exceeded, run in the current task. */ - padata->parallel(padata); } return 0; @@ -401,13 +403,16 @@ void padata_do_serial(struct padata_priv *padata) int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); struct padata_priv *cur; + struct list_head *pos; spin_lock(&reorder->lock); /* Sort in ascending order of sequence number. */ - list_for_each_entry_reverse(cur, &reorder->list, list) + list_for_each_prev(pos, &reorder->list) { + cur = list_entry(pos, struct padata_priv, list); if (cur->seq_nr < padata->seq_nr) break; - list_add(&padata->list, &cur->list); + } + list_add(&padata->list, pos); spin_unlock(&reorder->lock); /* diff --git a/kernel/panic.c b/kernel/panic.c index 332736a72a58e..bc39e2b27d315 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #define PANIC_TIMER_STEP 100 @@ -41,7 +42,9 @@ * Should we dump all CPUs backtraces in an oops event? * Defaults to 0, can be changed via sysctl. */ -unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; +static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; +#else +#define sysctl_oops_all_cpu_backtrace 0 #endif /* CONFIG_SMP */ int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; @@ -54,6 +57,7 @@ bool crash_kexec_post_notifiers; int panic_on_warn __read_mostly; unsigned long panic_on_taint; bool panic_on_taint_nousertaint = false; +static unsigned int warn_limit __read_mostly; int panic_timeout = CONFIG_PANIC_TIMEOUT; EXPORT_SYMBOL_GPL(panic_timeout); @@ -70,6 +74,56 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list); EXPORT_SYMBOL(panic_notifier_list); +#ifdef CONFIG_SYSCTL +static struct ctl_table kern_panic_table[] = { +#ifdef CONFIG_SMP + { + .procname = "oops_all_cpu_backtrace", + .data = &sysctl_oops_all_cpu_backtrace, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif + { + .procname = "warn_limit", + .data = &warn_limit, + .maxlen = sizeof(warn_limit), + .mode = 0644, + .proc_handler = proc_douintvec, + }, + { } +}; + +static __init int kernel_panic_sysctls_init(void) +{ + register_sysctl_init("kernel", kern_panic_table); + return 0; +} +late_initcall(kernel_panic_sysctls_init); +#endif + +static atomic_t warn_count = ATOMIC_INIT(0); + +#ifdef CONFIG_SYSFS +static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr, + char *page) +{ + return sysfs_emit(page, "%d\n", atomic_read(&warn_count)); +} + +static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count); + +static __init int kernel_panic_sysfs_init(void) +{ + sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL); + return 0; +} +late_initcall(kernel_panic_sysfs_init); +#endif + static long no_blink(int state) { return 0; @@ -166,6 +220,19 @@ static void panic_print_sys_info(void) ftrace_dump(DUMP_ALL); } +void check_panic_on_warn(const char *origin) +{ + unsigned int limit; + + if (panic_on_warn) + panic("%s: panic_on_warn set ...\n", origin); + + limit = READ_ONCE(warn_limit); + if (atomic_inc_return(&warn_count) >= limit && limit) + panic("%s: system warned too often (kernel.warn_limit is %d)", + origin, limit); +} + /** * panic - halt the system * @fmt: The text string to print @@ -183,6 +250,16 @@ void panic(const char *fmt, ...) int old_cpu, this_cpu; bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; + if (panic_on_warn) { + /* + * This thread may hit another WARN() in the panic path. + * Resetting this prevents additional WARN() from panicking the + * system on this thread. Other threads are blocked by the + * panic_mutex in panic(). + */ + panic_on_warn = 0; + } + /* * Disable local interrupts. This will prevent panic_smp_self_stop * from deadlocking the first cpu that invokes the panic, since @@ -594,16 +671,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint, if (regs) show_regs(regs); - if (panic_on_warn) { - /* - * This thread may hit another WARN() in the panic path. - * Resetting this prevents additional WARN() from panicking the - * system on this thread. Other threads are blocked by the - * panic_mutex in panic(). - */ - panic_on_warn = 0; - panic("panic_on_warn set ...\n"); - } + check_panic_on_warn("kernel"); if (!regs) dump_stack(); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 522cb1387462c..59a1b126c369b 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -637,7 +637,7 @@ static void power_down(void) int error; if (hibernation_mode == HIBERNATION_SUSPEND) { - error = suspend_devices_and_enter(PM_SUSPEND_MEM); + error = suspend_devices_and_enter(mem_sleep_current); if (error) { hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM : diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 1da013f50059a..f5dccd445d360 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1677,8 +1677,8 @@ static unsigned long minimum_image_size(unsigned long saveable) * /sys/power/reserved_size, respectively). To make this happen, we compute the * total number of available page frames and allocate at least * - * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 - * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) + * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2 + * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) * * of them, which corresponds to the maximum size of a hibernation image. * diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 14af29fe13770..8b51e6a5b3869 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -171,7 +171,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) { /* Complain if the scheduler has not started. */ - RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, + WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, "synchronize_rcu_tasks called too soon"); /* Wait for the grace period. */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b41009a283caf..9cce4e13af414 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1157,7 +1157,7 @@ bool rcu_lockdep_current_cpu_online(void) preempt_disable_notrace(); rdp = this_cpu_ptr(&rcu_data); rnp = rdp->mynode; - if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) + if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1) ret = true; preempt_enable_notrace(); return ret; @@ -1724,6 +1724,7 @@ static void rcu_strict_gp_boundary(void *unused) */ static bool rcu_gp_init(void) { + unsigned long firstseq; unsigned long flags; unsigned long oldmask; unsigned long mask; @@ -1767,6 +1768,12 @@ static bool rcu_gp_init(void) */ rcu_state.gp_state = RCU_GP_ONOFF; rcu_for_each_leaf_node(rnp) { + smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values. + firstseq = READ_ONCE(rnp->ofl_seq); + if (firstseq & 0x1) + while (firstseq == READ_ONCE(rnp->ofl_seq)) + schedule_timeout_idle(1); // Can't wake unless RCU is watching. + smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values. raw_spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irq_rcu_node(rnp); if (rnp->qsmaskinit == rnp->qsmaskinitnext && @@ -2650,7 +2657,7 @@ void rcu_force_quiescent_state(void) struct rcu_node *rnp_old = NULL; /* Funnel through hierarchy to reduce memory contention. */ - rnp = __this_cpu_read(rcu_data.mynode); + rnp = raw_cpu_read(rcu_data.mynode); for (; rnp != NULL; rnp = rnp->parent) { ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || !raw_spin_trylock(&rnp->fqslock); @@ -3393,15 +3400,16 @@ static void fill_page_cache_func(struct work_struct *work) bnode = (struct kvfree_rcu_bulk_data *) __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - if (bnode) { - raw_spin_lock_irqsave(&krcp->lock, flags); - pushed = put_cached_bnode(krcp, bnode); - raw_spin_unlock_irqrestore(&krcp->lock, flags); + if (!bnode) + break; - if (!pushed) { - free_page((unsigned long) bnode); - break; - } + raw_spin_lock_irqsave(&krcp->lock, flags); + pushed = put_cached_bnode(krcp, bnode); + raw_spin_unlock_irqrestore(&krcp->lock, flags); + + if (!pushed) { + free_page((unsigned long) bnode); + break; } } @@ -4106,6 +4114,9 @@ void rcu_cpu_starting(unsigned int cpu) rnp = rdp->mynode; mask = rdp->grpmask; + WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); + WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); + smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). raw_spin_lock_irqsave_rcu_node(rnp, flags); WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); newcpu = !(rnp->expmaskinitnext & mask); @@ -4123,6 +4134,9 @@ void rcu_cpu_starting(unsigned int cpu) } else { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } + smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). + WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); + WARN_ON_ONCE(rnp->ofl_seq & 0x1); smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ } @@ -4149,6 +4163,9 @@ void rcu_report_dead(unsigned int cpu) /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ mask = rdp->grpmask; + WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); + WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); + smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). raw_spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); @@ -4161,6 +4178,9 @@ void rcu_report_dead(unsigned int cpu) WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock(&rcu_state.ofl_lock); + smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). + WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); + WARN_ON_ONCE(rnp->ofl_seq & 0x1); rdp->cpu_started = false; } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e4f66b8f7c470..6e8c77729a470 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -56,6 +56,7 @@ struct rcu_node { /* Initialized from ->qsmaskinitnext at the */ /* beginning of each grace period. */ unsigned long qsmaskinitnext; + unsigned long ofl_seq; /* CPU-hotplug operation sequence count. */ /* Online CPUs for next grace period. */ unsigned long expmask; /* CPUs or groups that need to check in */ /* to allow the current expedited GP */ diff --git a/kernel/relay.c b/kernel/relay.c index b08d936d5fa75..067769b80d4ab 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -163,13 +163,13 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) { struct rchan_buf *buf; - if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *)) + if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t)) return NULL; buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); if (!buf) return NULL; - buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *), + buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t), GFP_KERNEL); if (!buf->padding) goto free_buf; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index da96a309eefed..1303a2607f1f8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -21,7 +21,7 @@ #include #include "../workqueue_internal.h" -#include "../../fs/io-wq.h" +#include "../../io_uring/io-wq.h" #include "../smpboot.h" #include "pelt.h" @@ -4280,8 +4280,7 @@ static noinline void __schedule_bug(struct task_struct *prev) pr_err("Preemption disabled at:"); print_ip_sym(KERN_ERR, preempt_disable_ip); } - if (panic_on_warn) - panic("scheduling while atomic\n"); + check_panic_on_warn("scheduling while atomic"); dump_stack(); add_taint(TAINT_WARN, LOCKDEP_STILL_OK); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bca0efc03a51d..c39d2fc3f9945 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4074,7 +4074,131 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, trace_sched_util_est_se_tp(&p->se); } -static inline int task_fits_capacity(struct task_struct *p, long capacity) +static inline int util_fits_cpu(unsigned long util, + unsigned long uclamp_min, + unsigned long uclamp_max, + int cpu) +{ + unsigned long capacity_orig, capacity_orig_thermal; + unsigned long capacity = capacity_of(cpu); + bool fits, uclamp_max_fits; + + /* + * Check if the real util fits without any uclamp boost/cap applied. + */ + fits = fits_capacity(util, capacity); + + if (!uclamp_is_used()) + return fits; + + /* + * We must use capacity_orig_of() for comparing against uclamp_min and + * uclamp_max. We only care about capacity pressure (by using + * capacity_of()) for comparing against the real util. + * + * If a task is boosted to 1024 for example, we don't want a tiny + * pressure to skew the check whether it fits a CPU or not. + * + * Similarly if a task is capped to capacity_orig_of(little_cpu), it + * should fit a little cpu even if there's some pressure. + * + * Only exception is for thermal pressure since it has a direct impact + * on available OPP of the system. + * + * We honour it for uclamp_min only as a drop in performance level + * could result in not getting the requested minimum performance level. + * + * For uclamp_max, we can tolerate a drop in performance level as the + * goal is to cap the task. So it's okay if it's getting less. + * + * In case of capacity inversion, which is not handled yet, we should + * honour the inverted capacity for both uclamp_min and uclamp_max all + * the time. + */ + capacity_orig = capacity_orig_of(cpu); + capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu); + + /* + * We want to force a task to fit a cpu as implied by uclamp_max. + * But we do have some corner cases to cater for.. + * + * + * C=z + * | ___ + * | C=y | | + * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max + * | C=x | | | | + * | ___ | | | | + * | | | | | | | (util somewhere in this region) + * | | | | | | | + * | | | | | | | + * +---------------------------------------- + * cpu0 cpu1 cpu2 + * + * In the above example if a task is capped to a specific performance + * point, y, then when: + * + * * util = 80% of x then it does not fit on cpu0 and should migrate + * to cpu1 + * * util = 80% of y then it is forced to fit on cpu1 to honour + * uclamp_max request. + * + * which is what we're enforcing here. A task always fits if + * uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig, + * the normal upmigration rules should withhold still. + * + * Only exception is when we are on max capacity, then we need to be + * careful not to block overutilized state. This is so because: + * + * 1. There's no concept of capping at max_capacity! We can't go + * beyond this performance level anyway. + * 2. The system is being saturated when we're operating near + * max capacity, it doesn't make sense to block overutilized. + */ + uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE); + uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig); + fits = fits || uclamp_max_fits; + + /* + * + * C=z + * | ___ (region a, capped, util >= uclamp_max) + * | C=y | | + * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max + * | C=x | | | | + * | ___ | | | | (region b, uclamp_min <= util <= uclamp_max) + * |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min + * | | | | | | | + * | | | | | | | (region c, boosted, util < uclamp_min) + * +---------------------------------------- + * cpu0 cpu1 cpu2 + * + * a) If util > uclamp_max, then we're capped, we don't care about + * actual fitness value here. We only care if uclamp_max fits + * capacity without taking margin/pressure into account. + * See comment above. + * + * b) If uclamp_min <= util <= uclamp_max, then the normal + * fits_capacity() rules apply. Except we need to ensure that we + * enforce we remain within uclamp_max, see comment above. + * + * c) If util < uclamp_min, then we are boosted. Same as (b) but we + * need to take into account the boosted value fits the CPU without + * taking margin/pressure into account. + * + * Cases (a) and (b) are handled in the 'fits' variable already. We + * just need to consider an extra check for case (c) after ensuring we + * handle the case uclamp_min > uclamp_max. + */ + uclamp_min = min(uclamp_min, uclamp_max); + if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE) + fits = fits && (uclamp_min <= capacity_orig_thermal); + + return fits; +} + +static inline int task_fits_capacity(struct task_struct *p, + unsigned long capacity) { return fits_capacity(uclamp_task_util(p), capacity); } @@ -6247,7 +6371,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) return best_cpu; } -static inline bool asym_fits_capacity(int task_util, int cpu) +static inline bool asym_fits_capacity(unsigned long task_util, int cpu) { if (static_branch_unlikely(&sched_asym_cpucapacity)) return fits_capacity(task_util, capacity_of(cpu)); diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index b7f38f3ad42a2..debaeb07ae533 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1158,10 +1158,11 @@ void psi_trigger_destroy(struct psi_trigger *t) group = t->group; /* - * Wakeup waiters to stop polling. Can happen if cgroup is deleted - * from under a polling process. + * Wakeup waiters to stop polling and clear the queue to prevent it from + * being accessed later. Can happen if cgroup is deleted from under a + * polling process. */ - wake_up_interruptible(&t->event_wait); + wake_up_pollfree(&t->event_wait); mutex_lock(&group->trigger_lock); diff --git a/kernel/signal.c b/kernel/signal.c index d05f783d5a5e6..e487c4660921d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -984,7 +984,7 @@ static inline bool wants_signal(int sig, struct task_struct *p) if (task_is_stopped_or_traced(p)) return false; - return task_curr(p) || !signal_pending(p); + return task_curr(p) || !task_sigpending(p); } static void complete_signal(int sig, struct task_struct *p, enum pid_type type) @@ -2520,6 +2520,21 @@ bool get_signal(struct ksignal *ksig) struct signal_struct *signal = current->signal; int signr; + if (unlikely(current->task_works)) + task_work_run(); + + /* + * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so + * that the arch handlers don't all have to do it. If we get here + * without TIF_SIGPENDING, just exit after running signal work. + */ + if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) { + if (test_thread_flag(TIF_NOTIFY_SIGNAL)) + tracehook_notify_signal(); + if (!task_sigpending(current)) + return false; + } + if (unlikely(uprobe_deny_signal())) return false; @@ -2532,26 +2547,6 @@ bool get_signal(struct ksignal *ksig) relock: spin_lock_irq(&sighand->siglock); - /* - * Make sure we can safely read ->jobctl() in task_work add. As Oleg - * states: - * - * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we - * roughly have - * - * task_work_add: get_signal: - * STORE(task->task_works, new_work); STORE(task->jobctl); - * mb(); mb(); - * LOAD(task->jobctl); LOAD(task->task_works); - * - * and we can rely on STORE-MB-LOAD [ in task_work_add]. - */ - smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK); - if (unlikely(current->task_works)) { - spin_unlock_irq(&sighand->siglock); - task_work_run(); - goto relock; - } /* * Every stopped thread goes here after wakeup. Check to see if @@ -2742,6 +2737,14 @@ bool get_signal(struct ksignal *ksig) do_coredump(&ksig->info); } + /* + * PF_IO_WORKER threads will catch and exit on fatal signals + * themselves. They have cleanup that must be performed, so + * we cannot call do_exit() on their behalf. + */ + if (current->flags & PF_IO_WORKER) + goto out; + /* * Death signals, no core dump. */ @@ -2749,7 +2752,7 @@ bool get_signal(struct ksignal *ksig) /* NOTREACHED */ } spin_unlock_irq(&sighand->siglock); - +out: ksig->sig = signr; return ksig->sig > 0; } @@ -2813,7 +2816,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) /* Remove the signals this thread can handle. */ sigandsets(&retarget, &retarget, &t->blocked); - if (!signal_pending(t)) + if (!task_sigpending(t)) signal_wake_up(t, 0); if (sigisemptyset(&retarget)) @@ -2847,7 +2850,7 @@ void exit_signals(struct task_struct *tsk) cgroup_threadgroup_change_end(tsk); - if (!signal_pending(tsk)) + if (!task_sigpending(tsk)) goto out; unblocked = tsk->blocked; @@ -2891,7 +2894,7 @@ long do_no_restart_syscall(struct restart_block *param) static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) { - if (signal_pending(tsk) && !thread_group_empty(tsk)) { + if (task_sigpending(tsk) && !thread_group_empty(tsk)) { sigset_t newblocked; /* A set of now blocked but previously unblocked signals. */ sigandnsets(&newblocked, newset, ¤t->blocked); diff --git a/kernel/sys.c b/kernel/sys.c index 24a3a28ae2284..9f59cc8ab8f86 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1548,6 +1548,8 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource, if (resource >= RLIM_NLIMITS) return -EINVAL; + resource = array_index_nospec(resource, RLIM_NLIMITS); + if (new_rlim) { if (new_rlim->rlim_cur > new_rlim->rlim_max) return -EINVAL; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f0dd1a3b66eb9..d8b7b28463135 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -391,13 +391,14 @@ int proc_dostring(struct ctl_table *table, int write, ppos); } -static size_t proc_skip_spaces(char **buf) +static void proc_skip_spaces(char **buf, size_t *size) { - size_t ret; - char *tmp = skip_spaces(*buf); - ret = tmp - *buf; - *buf = tmp; - return ret; + while (*size) { + if (!isspace(**buf)) + break; + (*size)--; + (*buf)++; + } } static void proc_skip_char(char **buf, size_t *size, const char v) @@ -466,13 +467,12 @@ static int proc_get_long(char **buf, size_t *size, unsigned long *val, bool *neg, const char *perm_tr, unsigned perm_tr_len, char *tr) { - int len; char *p, tmp[TMPBUFLEN]; + ssize_t len = *size; - if (!*size) + if (len <= 0) return -EINVAL; - len = *size; if (len > TMPBUFLEN - 1) len = TMPBUFLEN - 1; @@ -630,7 +630,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, bool neg; if (write) { - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) break; @@ -657,7 +657,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, if (!write && !first && left && !err) proc_put_char(&buffer, &left, '\n'); if (write && !err && left) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (write && first) return err ? : -EINVAL; *lenp -= left; @@ -699,7 +699,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data, if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) { err = -EINVAL; goto out_free; @@ -719,7 +719,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data, } if (!err && left) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); out_free: if (err) @@ -1177,7 +1177,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, if (write) { bool neg; - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) break; @@ -1205,7 +1205,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, if (!write && !first && left && !err) proc_put_char(&buffer, &left, '\n'); if (write && !err) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (write && first) return err ? : -EINVAL; *lenp -= left; @@ -2199,17 +2199,6 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif -#ifdef CONFIG_SMP - { - .procname = "oops_all_cpu_backtrace", - .data = &sysctl_oops_all_cpu_backtrace, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, -#endif /* CONFIG_SMP */ { .procname = "pid_max", .data = &pid_max, diff --git a/kernel/task_work.c b/kernel/task_work.c index 8d6e1217c451c..e9316198c64bf 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -33,7 +33,6 @@ int task_work_add(struct task_struct *task, struct callback_head *work, enum task_work_notify_mode notify) { struct callback_head *head; - unsigned long flags; do { head = READ_ONCE(task->task_works); @@ -49,17 +48,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work, set_notify_resume(task); break; case TWA_SIGNAL: - /* - * Only grab the sighand lock if we don't already have some - * task_work pending. This pairs with the smp_store_mb() - * in get_signal(), see comment there. - */ - if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) && - lock_task_sighand(task, &flags)) { - task->jobctl |= JOBCTL_TASK_WORK; - signal_wake_up(task, 0); - unlock_task_sighand(task, &flags); - } + set_notify_signal(task); break; default: WARN_ON_ONCE(1); @@ -70,18 +59,17 @@ int task_work_add(struct task_struct *task, struct callback_head *work, } /** - * task_work_cancel - cancel a pending work added by task_work_add() + * task_work_cancel_match - cancel a pending work added by task_work_add() * @task: the task which should execute the work - * @func: identifies the work to remove - * - * Find the last queued pending work with ->func == @func and remove - * it from queue. + * @match: match function to call * * RETURNS: * The found work or NULL if not found. */ struct callback_head * -task_work_cancel(struct task_struct *task, task_work_func_t func) +task_work_cancel_match(struct task_struct *task, + bool (*match)(struct callback_head *, void *data), + void *data) { struct callback_head **pprev = &task->task_works; struct callback_head *work; @@ -97,7 +85,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) */ raw_spin_lock_irqsave(&task->pi_lock, flags); while ((work = READ_ONCE(*pprev))) { - if (work->func != func) + if (!match(work, data)) pprev = &work->next; else if (cmpxchg(pprev, work, work->next) == work) break; @@ -107,6 +95,28 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) return work; } +static bool task_work_func_match(struct callback_head *cb, void *data) +{ + return cb->func == data; +} + +/** + * task_work_cancel - cancel a pending work added by task_work_add() + * @task: the task which should execute the work + * @func: identifies the work to remove + * + * Find the last queued pending work with ->func == @func and remove + * it from queue. + * + * RETURNS: + * The found work or NULL if not found. + */ +struct callback_head * +task_work_cancel(struct task_struct *task, task_work_func_t func) +{ + return task_work_cancel_match(task, task_work_func_match, func); +} + /** * task_work_run - execute the works added by task_work_add() * diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index daeaa7140d0aa..1de426d3f694c 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -470,11 +470,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) } EXPORT_SYMBOL_GPL(alarm_forward); -u64 alarm_forward_now(struct alarm *alarm, ktime_t interval) +static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle) { struct alarm_base *base = &alarm_bases[alarm->type]; + ktime_t now = base->get_ktime(); + + if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) { + /* + * Same issue as with posix_timer_fn(). Timers which are + * periodic but the signal is ignored can starve the system + * with a very small interval. The real fix which was + * promised in the context of posix_timer_fn() never + * materialized, but someone should really work on it. + * + * To prevent DOS fake @now to be 1 jiffie out which keeps + * the overrun accounting correct but creates an + * inconsistency vs. timer_gettime(2). + */ + ktime_t kj = NSEC_PER_SEC / HZ; + + if (interval < kj) + now = ktime_add(now, kj); + } + + return alarm_forward(alarm, now, interval); +} - return alarm_forward(alarm, base->get_ktime(), interval); +u64 alarm_forward_now(struct alarm *alarm, ktime_t interval) +{ + return __alarm_forward_now(alarm, interval, false); } EXPORT_SYMBOL_GPL(alarm_forward_now); @@ -548,9 +572,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, if (posix_timer_event(ptr, si_private) && ptr->it_interval) { /* * Handle ignored signals and rearm the timer. This will go - * away once we handle ignored signals proper. + * away once we handle ignored signals proper. Ensure that + * small intervals cannot starve the system. */ - ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval); + ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true); ++ptr->it_requeue_pending; ptr->it_active = 1; result = ALARMTIMER_RESTART; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 15a376f85e09b..ab912cc60760a 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1592,7 +1592,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags, static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) { - if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) + if ((iter->ent->type != TRACE_BLK) || + !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) return TRACE_TYPE_UNHANDLED; return print_one_line(iter, true); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a9e074769881f..94e51d36fb497 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1055,6 +1055,7 @@ static void do_bpf_send_signal(struct irq_work *entry) work = container_of(entry, struct send_signal_irq_work, irq_work); group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); + put_task_struct(work->task); } static int bpf_send_signal_common(u32 sig, enum pid_type type) @@ -1072,6 +1073,9 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) return -EPERM; if (unlikely(!nmi_uaccess_okay())) return -EPERM; + /* Task should not be pid=1 to avoid kernel panic. */ + if (unlikely(is_global_init(current))) + return -EPERM; if (irqs_disabled()) { /* Do an early check on signal validity. Otherwise, @@ -1088,7 +1092,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) * to the irq_work. The current task may change when queued * irq works get executed. */ - work->task = current; + work->task = get_task_struct(current); work->sig = sig; work->type = type; irq_work_queue(&work->irq_work); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d868df6f13c86..d97c189695cbb 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1295,6 +1295,7 @@ static int ftrace_add_mod(struct trace_array *tr, if (!ftrace_mod) return -ENOMEM; + INIT_LIST_HEAD(&ftrace_mod->list); ftrace_mod->func = kstrdup(func, GFP_KERNEL); ftrace_mod->module = kstrdup(module, GFP_KERNEL); ftrace_mod->enable = enable; @@ -2946,18 +2947,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) command |= FTRACE_UPDATE_TRACE_FUNC; } - if (!command || !ftrace_enabled) { - /* - * If these are dynamic or per_cpu ops, they still - * need their data freed. Since, function tracing is - * not currently active, we can just free them - * without synchronizing all CPUs. - */ - if (ops->flags & FTRACE_OPS_FL_DYNAMIC) - goto free_ops; - - return 0; - } + if (!command || !ftrace_enabled) + goto out; /* * If the ops uses a trampoline, then it needs to be @@ -2994,6 +2985,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) removed_ops = NULL; ops->flags &= ~FTRACE_OPS_FL_REMOVING; +out: /* * Dynamic ops may be freed, we must make sure that all * callers are done before leaving this function. @@ -3021,7 +3013,6 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) if (IS_ENABLED(CONFIG_PREEMPTION)) synchronize_rcu_tasks(); - free_ops: ftrace_trampoline_free(ops); } @@ -3188,7 +3179,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count) /* if we can't allocate this size, try something smaller */ if (!order) return -ENOMEM; - order >>= 1; + order--; goto again; } @@ -5662,8 +5653,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) if (filter_hash) { orig_hash = &iter->ops->func_hash->filter_hash; - if (iter->tr && !list_empty(&iter->tr->mod_trace)) - iter->hash->flags |= FTRACE_HASH_FL_MOD; + if (iter->tr) { + if (list_empty(&iter->tr->mod_trace)) + iter->hash->flags &= ~FTRACE_HASH_FL_MOD; + else + iter->hash->flags |= FTRACE_HASH_FL_MOD; + } } else orig_hash = &iter->ops->func_hash->notrace_hash; @@ -6883,7 +6878,7 @@ void __init ftrace_init(void) } pr_info("ftrace: allocating %ld entries in %ld pages\n", - count, count / ENTRIES_PER_PAGE + 1); + count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); last_ftrace_enabled = ftrace_enabled = 1; diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c index 18b0f1cbb947f..c736487fc0e48 100644 --- a/kernel/trace/kprobe_event_gen_test.c +++ b/kernel/trace/kprobe_event_gen_test.c @@ -35,6 +35,49 @@ static struct trace_event_file *gen_kprobe_test; static struct trace_event_file *gen_kretprobe_test; +#define KPROBE_GEN_TEST_FUNC "do_sys_open" + +/* X86 */ +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_32) +#define KPROBE_GEN_TEST_ARG0 "dfd=%ax" +#define KPROBE_GEN_TEST_ARG1 "filename=%dx" +#define KPROBE_GEN_TEST_ARG2 "flags=%cx" +#define KPROBE_GEN_TEST_ARG3 "mode=+4($stack)" + +/* ARM64 */ +#elif defined(CONFIG_ARM64) +#define KPROBE_GEN_TEST_ARG0 "dfd=%x0" +#define KPROBE_GEN_TEST_ARG1 "filename=%x1" +#define KPROBE_GEN_TEST_ARG2 "flags=%x2" +#define KPROBE_GEN_TEST_ARG3 "mode=%x3" + +/* ARM */ +#elif defined(CONFIG_ARM) +#define KPROBE_GEN_TEST_ARG0 "dfd=%r0" +#define KPROBE_GEN_TEST_ARG1 "filename=%r1" +#define KPROBE_GEN_TEST_ARG2 "flags=%r2" +#define KPROBE_GEN_TEST_ARG3 "mode=%r3" + +/* RISCV */ +#elif defined(CONFIG_RISCV) +#define KPROBE_GEN_TEST_ARG0 "dfd=%a0" +#define KPROBE_GEN_TEST_ARG1 "filename=%a1" +#define KPROBE_GEN_TEST_ARG2 "flags=%a2" +#define KPROBE_GEN_TEST_ARG3 "mode=%a3" + +/* others */ +#else +#define KPROBE_GEN_TEST_ARG0 NULL +#define KPROBE_GEN_TEST_ARG1 NULL +#define KPROBE_GEN_TEST_ARG2 NULL +#define KPROBE_GEN_TEST_ARG3 NULL +#endif + +static bool trace_event_file_is_valid(struct trace_event_file *input) +{ + return input && !IS_ERR(input); +} + /* * Test to make sure we can create a kprobe event, then add more * fields. @@ -58,23 +101,23 @@ static int __init test_gen_kprobe_cmd(void) * fields. */ ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test", - "do_sys_open", - "dfd=%ax", "filename=%dx"); + KPROBE_GEN_TEST_FUNC, + KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1); if (ret) - goto free; + goto out; /* Use kprobe_event_add_fields to add the rest of the fields */ - ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)"); + ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3); if (ret) - goto free; + goto out; /* * This actually creates the event. */ ret = kprobe_event_gen_cmd_end(&cmd); if (ret) - goto free; + goto out; /* * Now get the gen_kprobe_test event file. We need to prevent @@ -97,13 +140,13 @@ static int __init test_gen_kprobe_cmd(void) goto delete; } out: + kfree(buf); return ret; delete: + if (trace_event_file_is_valid(gen_kprobe_test)) + gen_kprobe_test = NULL; /* We got an error after creating the event, delete it */ ret = kprobe_event_delete("gen_kprobe_test"); - free: - kfree(buf); - goto out; } @@ -128,17 +171,17 @@ static int __init test_gen_kretprobe_cmd(void) * Define the kretprobe event. */ ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test", - "do_sys_open", + KPROBE_GEN_TEST_FUNC, "$retval"); if (ret) - goto free; + goto out; /* * This actually creates the event. */ ret = kretprobe_event_gen_cmd_end(&cmd); if (ret) - goto free; + goto out; /* * Now get the gen_kretprobe_test event file. We need to @@ -162,13 +205,13 @@ static int __init test_gen_kretprobe_cmd(void) goto delete; } out: + kfree(buf); return ret; delete: + if (trace_event_file_is_valid(gen_kretprobe_test)) + gen_kretprobe_test = NULL; /* We got an error after creating the event, delete it */ ret = kprobe_event_delete("gen_kretprobe_test"); - free: - kfree(buf); - goto out; } @@ -182,10 +225,12 @@ static int __init kprobe_event_gen_test_init(void) ret = test_gen_kretprobe_cmd(); if (ret) { - WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr, - "kprobes", - "gen_kretprobe_test", false)); - trace_put_event_file(gen_kretprobe_test); + if (trace_event_file_is_valid(gen_kretprobe_test)) { + WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr, + "kprobes", + "gen_kretprobe_test", false)); + trace_put_event_file(gen_kretprobe_test); + } WARN_ON(kprobe_event_delete("gen_kretprobe_test")); } @@ -194,24 +239,30 @@ static int __init kprobe_event_gen_test_init(void) static void __exit kprobe_event_gen_test_exit(void) { - /* Disable the event or you can't remove it */ - WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr, - "kprobes", - "gen_kprobe_test", false)); + if (trace_event_file_is_valid(gen_kprobe_test)) { + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr, + "kprobes", + "gen_kprobe_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(gen_kprobe_test); + } - /* Now give the file and instance back */ - trace_put_event_file(gen_kprobe_test); /* Now unregister and free the event */ WARN_ON(kprobe_event_delete("gen_kprobe_test")); - /* Disable the event or you can't remove it */ - WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr, - "kprobes", - "gen_kretprobe_test", false)); + if (trace_event_file_is_valid(gen_kretprobe_test)) { + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr, + "kprobes", + "gen_kretprobe_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(gen_kretprobe_test); + } - /* Now give the file and instance back */ - trace_put_event_file(gen_kretprobe_test); /* Now unregister and free the event */ WARN_ON(kprobe_event_delete("gen_kretprobe_test")); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 6deac666ba3e4..49ebb8c662682 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -414,6 +414,7 @@ struct rb_irq_work { struct irq_work work; wait_queue_head_t waiters; wait_queue_head_t full_waiters; + long wait_index; bool waiters_pending; bool full_waiters_pending; bool wakeup_full; @@ -516,6 +517,7 @@ struct ring_buffer_per_cpu { local_t committing; local_t commits; local_t pages_touched; + local_t pages_lost; local_t pages_read; long last_pages_touch; size_t shortest_full; @@ -770,10 +772,18 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) { size_t read; + size_t lost; size_t cnt; read = local_read(&buffer->buffers[cpu]->pages_read); + lost = local_read(&buffer->buffers[cpu]->pages_lost); cnt = local_read(&buffer->buffers[cpu]->pages_touched); + + if (WARN_ON_ONCE(cnt < lost)) + return 0; + + cnt -= lost; + /* The reader can read an empty page, but not more than that */ if (cnt < read) { WARN_ON_ONCE(read > cnt + 1); @@ -783,6 +793,21 @@ size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) return cnt - read; } +static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) +{ + struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + size_t nr_pages; + size_t dirty; + + nr_pages = cpu_buffer->nr_pages; + if (!nr_pages || !full) + return true; + + dirty = ring_buffer_nr_dirty_pages(buffer, cpu); + + return (dirty * 100) > (full * nr_pages); +} + /* * rb_wake_up_waiters - wake up tasks waiting for ring buffer input * @@ -794,12 +819,44 @@ static void rb_wake_up_waiters(struct irq_work *work) struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); wake_up_all(&rbwork->waiters); - if (rbwork->wakeup_full) { + if (rbwork->full_waiters_pending || rbwork->wakeup_full) { rbwork->wakeup_full = false; + rbwork->full_waiters_pending = false; wake_up_all(&rbwork->full_waiters); } } +/** + * ring_buffer_wake_waiters - wake up any waiters on this ring buffer + * @buffer: The ring buffer to wake waiters on + * + * In the case of a file that represents a ring buffer is closing, + * it is prudent to wake up any waiters that are on this. + */ +void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + struct rb_irq_work *rbwork; + + if (cpu == RING_BUFFER_ALL_CPUS) { + + /* Wake up individual ones too. One level recursion */ + for_each_buffer_cpu(buffer, cpu) + ring_buffer_wake_waiters(buffer, cpu); + + rbwork = &buffer->irq_work; + } else { + cpu_buffer = buffer->buffers[cpu]; + rbwork = &cpu_buffer->irq_work; + } + + rbwork->wait_index++; + /* make sure the waiters see the new index */ + smp_wmb(); + + rb_wake_up_waiters(&rbwork->work); +} + /** * ring_buffer_wait - wait for input to the ring buffer * @buffer: buffer to wait on @@ -815,6 +872,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) struct ring_buffer_per_cpu *cpu_buffer; DEFINE_WAIT(wait); struct rb_irq_work *work; + long wait_index; int ret = 0; /* @@ -833,6 +891,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) work = &cpu_buffer->irq_work; } + wait_index = READ_ONCE(work->wait_index); while (true) { if (full) @@ -877,26 +936,29 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) !ring_buffer_empty_cpu(buffer, cpu)) { unsigned long flags; bool pagebusy; - size_t nr_pages; - size_t dirty; + bool done; if (!full) break; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; - nr_pages = cpu_buffer->nr_pages; - dirty = ring_buffer_nr_dirty_pages(buffer, cpu); + done = !pagebusy && full_hit(buffer, cpu, full); + if (!cpu_buffer->shortest_full || - cpu_buffer->shortest_full < full) + cpu_buffer->shortest_full > full) cpu_buffer->shortest_full = full; raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); - if (!pagebusy && - (!nr_pages || (dirty * 100) > full * nr_pages)) + if (done) break; } schedule(); + + /* Make sure to see the new wait index */ + smp_rmb(); + if (wait_index != work->wait_index) + break; } if (full) @@ -913,6 +975,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) * @cpu: the cpu buffer to wait on * @filp: the file descriptor * @poll_table: The poll descriptor + * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS * * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon * as data is added to any of the @buffer's cpu buffers. Otherwise @@ -922,14 +985,15 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) * zero otherwise. */ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, - struct file *filp, poll_table *poll_table) + struct file *filp, poll_table *poll_table, int full) { struct ring_buffer_per_cpu *cpu_buffer; struct rb_irq_work *work; - if (cpu == RING_BUFFER_ALL_CPUS) + if (cpu == RING_BUFFER_ALL_CPUS) { work = &buffer->irq_work; - else { + full = 0; + } else { if (!cpumask_test_cpu(cpu, buffer->cpumask)) return -EINVAL; @@ -937,8 +1001,14 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, work = &cpu_buffer->irq_work; } - poll_wait(filp, &work->waiters, poll_table); - work->waiters_pending = true; + if (full) { + poll_wait(filp, &work->full_waiters, poll_table); + work->full_waiters_pending = true; + } else { + poll_wait(filp, &work->waiters, poll_table); + work->waiters_pending = true; + } + /* * There's a tight race between setting the waiters_pending and * checking if the ring buffer is empty. Once the waiters_pending bit @@ -954,6 +1024,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, */ smp_mb(); + if (full) + return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; + if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) return EPOLLIN | EPOLLRDNORM; @@ -1595,9 +1668,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) free_buffer_page(cpu_buffer->reader_page); - rb_head_page_deactivate(cpu_buffer); - if (head) { + rb_head_page_deactivate(cpu_buffer); + list_for_each_entry_safe(bpage, tmp, head, list) { list_del_init(&bpage->list); free_buffer_page(bpage); @@ -1833,6 +1906,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) */ local_add(page_entries, &cpu_buffer->overrun); local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); + local_inc(&cpu_buffer->pages_lost); } /* @@ -2323,6 +2397,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, */ local_add(entries, &cpu_buffer->overrun); local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); + local_inc(&cpu_buffer->pages_lost); /* * The entries will be zeroed out when we move the @@ -2491,6 +2566,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, /* Mark the rest of the page with padding */ rb_event_set_padding(event); + /* Make sure the padding is visible before the write update */ + smp_wmb(); + /* Set the write back to the previous setting */ local_sub(length, &tail_page->write); return; @@ -2502,6 +2580,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, /* time delta must be non zero */ event->time_delta = 1; + /* Make sure the padding is visible before the tail_page->write update */ + smp_wmb(); + /* Set write to end of buffer */ length = (tail + length) - BUF_PAGE_SIZE; local_sub(length, &tail_page->write); @@ -2987,10 +3068,6 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, static __always_inline void rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) { - size_t nr_pages; - size_t dirty; - size_t full; - if (buffer->irq_work.waiters_pending) { buffer->irq_work.waiters_pending = false; /* irq_work_queue() supplies it's own memory barriers */ @@ -3014,10 +3091,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); - full = cpu_buffer->shortest_full; - nr_pages = cpu_buffer->nr_pages; - dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); - if (full && nr_pages && (dirty * 100) <= full * nr_pages) + if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) return; cpu_buffer->irq_work.wakeup_full = true; @@ -4316,6 +4390,33 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); + /* + * The writer has preempt disable, wait for it. But not forever + * Although, 1 second is pretty much "forever" + */ +#define USECS_WAIT 1000000 + for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { + /* If the write is past the end of page, a writer is still updating it */ + if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) + break; + + udelay(1); + + /* Get the latest version of the reader write value */ + smp_rmb(); + } + + /* The writer is not moving forward? Something is wrong */ + if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) + reader = NULL; + + /* + * Make sure we see any padding after the write update + * (see rb_reset_tail()) + */ + smp_rmb(); + + return reader; } @@ -4891,6 +4992,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) local_set(&cpu_buffer->committing, 0); local_set(&cpu_buffer->commits, 0); local_set(&cpu_buffer->pages_touched, 0); + local_set(&cpu_buffer->pages_lost, 0); local_set(&cpu_buffer->pages_read, 0); cpu_buffer->last_pages_touch = 0; cpu_buffer->shortest_full = 0; @@ -5341,7 +5443,15 @@ int ring_buffer_read_page(struct trace_buffer *buffer, unsigned int pos = 0; unsigned int size; - if (full) + /* + * If a full page is expected, this can still be returned + * if there's been a previous partial read and the + * rest of the page can be read and the commit page is off + * the reader page. + */ + if (full && + (!read || (len < (commit - read)) || + cpu_buffer->reader_page == cpu_buffer->commit_page)) goto out_unlock; if (len > (commit - read)) diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c index edd912cd14aaf..a6a2813afb87f 100644 --- a/kernel/trace/synth_event_gen_test.c +++ b/kernel/trace/synth_event_gen_test.c @@ -120,15 +120,13 @@ static int __init test_gen_synth_cmd(void) /* Now generate a gen_synth_test event */ ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals)); - out: + free: + kfree(buf); return ret; delete: /* We got an error after creating the event, delete it */ synth_event_delete("gen_synth_test"); - free: - kfree(buf); - - goto out; + goto free; } /* @@ -227,15 +225,13 @@ static int __init test_empty_synth_event(void) /* Now trace an empty_synth_test event */ ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals)); - out: + free: + kfree(buf); return ret; delete: /* We got an error after creating the event, delete it */ synth_event_delete("empty_synth_test"); - free: - kfree(buf); - - goto out; + goto free; } static struct synth_field_desc create_synth_test_fields[] = { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 50200898410d5..8637eab2986ee 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1197,12 +1197,14 @@ void *tracing_cond_snapshot_data(struct trace_array *tr) { void *cond_data = NULL; + local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) cond_data = tr->cond_snapshot->cond_data; arch_spin_unlock(&tr->max_lock); + local_irq_enable(); return cond_data; } @@ -1338,9 +1340,11 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, goto fail_unlock; } + local_irq_disable(); arch_spin_lock(&tr->max_lock); tr->cond_snapshot = cond_snapshot; arch_spin_unlock(&tr->max_lock); + local_irq_enable(); mutex_unlock(&trace_types_lock); @@ -1367,6 +1371,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr) { int ret = 0; + local_irq_disable(); arch_spin_lock(&tr->max_lock); if (!tr->cond_snapshot) @@ -1377,6 +1382,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr) } arch_spin_unlock(&tr->max_lock); + local_irq_enable(); return ret; } @@ -2198,6 +2204,11 @@ static size_t tgid_map_max; #define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX +/* + * Preemption must be disabled before acquiring trace_cmdline_lock. + * The various trace_arrays' max_lock must be acquired in a context + * where interrupt is disabled. + */ static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; @@ -2410,7 +2421,11 @@ static int trace_save_cmdline(struct task_struct *tsk) * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. + * + * This is called within the scheduler and wake up, so interrupts + * had better been disabled and run queue lock been held. */ + lockdep_assert_preemption_disabled(); if (!arch_spin_trylock(&trace_cmdline_lock)) return 0; @@ -5470,9 +5485,11 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, char buf[64]; int r; + preempt_disable(); arch_spin_lock(&trace_cmdline_lock); r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } @@ -5497,10 +5514,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val) return -ENOMEM; } + preempt_disable(); arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; savedcmd = s; arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); free_saved_cmdlines_buffer(savedcmd_temp); return 0; @@ -5953,10 +5972,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) #ifdef CONFIG_TRACER_SNAPSHOT if (t->use_max_tr) { + local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); + local_irq_enable(); if (ret) goto out; } @@ -5987,12 +6008,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) if (tr->current_trace->reset) tr->current_trace->reset(tr); +#ifdef CONFIG_TRACER_MAX_TRACE + had_max_tr = tr->current_trace->use_max_tr; + /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; -#ifdef CONFIG_TRACER_MAX_TRACE - had_max_tr = tr->allocated_snapshot; - if (had_max_tr && !t->use_max_tr) { /* * We need to make sure that the update_max_tr sees that @@ -6004,14 +6025,14 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) synchronize_rcu(); free_snapshot(tr); } -#endif -#ifdef CONFIG_TRACER_MAX_TRACE - if (t->use_max_tr && !had_max_tr) { + if (t->use_max_tr && !tr->allocated_snapshot) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } +#else + tr->current_trace = &nop_trace; #endif if (t->init) { @@ -6242,7 +6263,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, - filp, poll_table); + filp, poll_table, iter->tr->buffer_percent); } static __poll_t @@ -6350,7 +6371,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { - /* don't print partial lines */ + /* + * If one print_trace_line() fills entire trace_seq in one shot, + * trace_seq_to_user() will returns -EBUSY because save_len == 0, + * In this case, we need to consume it, otherwise, loop will peek + * this event next time, resulting in an infinite loop. + */ + if (save_len == 0) { + iter->seq.full = 0; + trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); + trace_consume(iter); + break; + } + + /* In other cases, don't print partial lines */ iter->seq.seq.len = save_len; break; } @@ -7030,10 +7064,12 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, goto out; } + local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); + local_irq_enable(); if (ret) goto out; @@ -8533,9 +8569,6 @@ buffer_percent_write(struct file *filp, const char __user *ubuf, if (val > 100) return -EINVAL; - if (!val) - val = 1; - tr->buffer_percent = val; (*ppos)++; @@ -9644,6 +9677,8 @@ void __init early_trace_init(void) static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); + + init_events(); } void __init trace_init(void) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 8d67f7f448400..37f616bf5fa93 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1673,6 +1673,7 @@ extern void trace_event_enable_cmd_record(bool enable); extern void trace_event_enable_tgid_record(bool enable); extern int event_trace_init(void); +extern int init_events(void); extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); extern int event_trace_del_tracer(struct trace_array *tr); extern void __trace_early_add_events(struct trace_array *tr); diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index 5fa49cfd2bb6f..d312a52a10a5b 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -70,6 +70,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type) if (ret) break; } + tracing_reset_all_online_cpus(); mutex_unlock(&event_mutex); return ret; @@ -165,6 +166,7 @@ int dyn_events_release_all(struct dyn_event_operations *type) break; } out: + tracing_reset_all_online_cpus(); mutex_unlock(&event_mutex); return ret; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 826ecf01e380c..bac13f24a96e5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2572,7 +2572,10 @@ static int probe_remove_event_call(struct trace_event_call *call) * TRACE_REG_UNREGISTER. */ if (file->flags & EVENT_FILE_FL_ENABLED) - return -EBUSY; + goto busy; + + if (file->flags & EVENT_FILE_FL_WAS_ENABLED) + tr->clear_trace = true; /* * The do_for_each_event_file_safe() is * a double loop. After finding the call for this @@ -2585,6 +2588,12 @@ static int probe_remove_event_call(struct trace_event_call *call) __trace_remove_event_call(call); return 0; + busy: + /* No need to clear the trace now */ + list_for_each_entry(tr, &ftrace_trace_arrays, list) { + tr->clear_trace = false; + } + return -EBUSY; } /* Remove an event_call */ diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index fd54168294456..ccc99cd23f3c4 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -417,7 +417,7 @@ struct action_data { * event param, and is passed to the synthetic event * invocation. */ - unsigned int var_ref_idx[TRACING_MAP_VARS_MAX]; + unsigned int var_ref_idx[SYNTH_FIELDS_MAX]; struct synth_event *synth_event; bool use_trace_keyword; char *synth_event_name; @@ -1646,6 +1646,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, unsigned long fl = flags & ~HIST_FIELD_FL_LOG2; hist_field->fn = hist_field_log2; hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); + if (!hist_field->operands[0]) + goto free; hist_field->size = hist_field->operands[0]->size; hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL); if (!hist_field->type) @@ -1846,7 +1848,9 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, return ref_field; } } - + /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */ + if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX) + return NULL; ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); if (ref_field) { if (init_var_ref(ref_field, var_field, system, event_name)) { @@ -3113,6 +3117,7 @@ static int parse_action_params(struct trace_array *tr, char *params, while (params) { if (data->n_params >= SYNTH_FIELDS_MAX) { hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); + ret = -EINVAL; goto out; } @@ -3449,6 +3454,10 @@ static int trace_action_create(struct hist_trigger_data *hist_data, lockdep_assert_held(&event_mutex); + /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */ + if (data->n_params > SYNTH_FIELDS_MAX) + return -EINVAL; + if (data->use_trace_keyword) synth_event_name = data->synth_event_name; else @@ -5827,7 +5836,7 @@ static int event_hist_trigger_func(struct event_command *cmd_ops, /* Just return zero, not the number of registered triggers */ ret = 0; out: - if (ret == 0) + if (ret == 0 && glob[0]) hist_err_clear(); return ret; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 881df991742ab..18291ab356570 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -791,10 +791,9 @@ static int register_synth_event(struct synth_event *event) } ret = set_synth_event_print_fmt(call); - if (ret < 0) { + /* unregister_trace_event() will be called inside */ + if (ret < 0) trace_remove_event_call(call); - goto err; - } out: return ret; err: diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 000e9dc224c61..b3ee8d9b6b62a 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1378,7 +1378,7 @@ static struct trace_event *events[] __initdata = { NULL }; -__init static int init_events(void) +__init int init_events(void) { struct trace_event *event; int i, ret; @@ -1396,4 +1396,3 @@ __init static int init_events(void) return 0; } -early_initcall(init_events); diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c index f4938040c2286..3aa55b8075608 100644 --- a/kernel/trace/trace_preemptirq.c +++ b/kernel/trace/trace_preemptirq.c @@ -94,15 +94,15 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr) this_cpu_write(tracing_irq_cpu, 0); } - lockdep_hardirqs_on_prepare(CALLER_ADDR0); - lockdep_hardirqs_on(CALLER_ADDR0); + lockdep_hardirqs_on_prepare(caller_addr); + lockdep_hardirqs_on(caller_addr); } EXPORT_SYMBOL(trace_hardirqs_on_caller); NOKPROBE_SYMBOL(trace_hardirqs_on_caller); __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { - lockdep_hardirqs_off(CALLER_ADDR0); + lockdep_hardirqs_off(caller_addr); if (!this_cpu_read(tracing_irq_cpu)) { this_cpu_write(tracing_irq_cpu, 1); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index fdf5fa4bf4448..0cc2a62e88f9e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3047,10 +3047,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) if (WARN_ON(!work->func)) return false; - if (!from_cancel) { - lock_map_acquire(&work->lockdep_map); - lock_map_release(&work->lockdep_map); - } + lock_map_acquire(&work->lockdep_map); + lock_map_release(&work->lockdep_map); if (start_flush_work(work, &barr, from_cancel)) { wait_for_completion(&barr.done); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ce796ca869c22..19c28a34c5f1d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -298,8 +298,10 @@ config FRAME_WARN int "Warn for stack frames larger than" range 0 8192 default 2048 if GCC_PLUGIN_LATENT_ENTROPY - default 1280 if (!64BIT && PARISC) - default 1024 if (!64BIT && !PARISC) + default 2048 if PARISC + default 1536 if (!64BIT && XTENSA) + default 1280 if KASAN && !64BIT + default 1024 if !64BIT default 2048 if 64BIT help Tell gcc to warn at build time for stack frames larger than this. @@ -1801,8 +1803,14 @@ config NETDEV_NOTIFIER_ERROR_INJECT If unsure, say N. config FUNCTION_ERROR_INJECTION - def_bool y + bool "Fault-injections of functions" depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES + help + Add fault injections into various functions that are annotated with + ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return + value of theses functions. This is useful to test error paths of code. + + If unsure, say N config FAULT_INJECTION bool "Fault-injection framework" @@ -1907,7 +1915,6 @@ config KCOV depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS select DEBUG_FS select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC - select SKB_EXTENSIONS if NET help KCOV exposes kernel code coverage information in a form suitable for coverage-guided fuzzing (randomized testing). diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 9e14ae02306bc..71bdc167a9ee7 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -440,6 +440,7 @@ static int object_cpu_offline(unsigned int cpu) struct debug_percpu_free *percpu_pool; struct hlist_node *tmp; struct debug_obj *obj; + unsigned long flags; /* Remote access is safe as the CPU is dead already */ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); @@ -447,6 +448,12 @@ static int object_cpu_offline(unsigned int cpu) hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } + + raw_spin_lock_irqsave(&pool_lock, flags); + obj_pool_used -= percpu_pool->obj_free; + debug_objects_freed += percpu_pool->obj_free; + raw_spin_unlock_irqrestore(&pool_lock, flags); + percpu_pool->obj_free = 0; return 0; @@ -1316,6 +1323,8 @@ static int __init debug_objects_replace_static_objects(void) hlist_add_head(&obj->node, &objects); } + debug_objects_allocated += i; + /* * debug_objects_mem_init() is now called early that only one CPU is up * and interrupts have been disabled, so it is safe to replace the @@ -1384,6 +1393,7 @@ void __init debug_objects_mem_init(void) debug_objects_enabled = 0; kmem_cache_destroy(obj_cache); pr_warn("out of memory.\n"); + return; } else debug_objects_selftest(); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 921d0a654243c..10a50c03074ee 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -207,10 +207,11 @@ static int ddebug_change(const struct ddebug_query *query, continue; #ifdef CONFIG_JUMP_LABEL if (dp->flags & _DPRINTK_FLAGS_PRINT) { - if (!(modifiers->flags & _DPRINTK_FLAGS_PRINT)) + if (!(newflags & _DPRINTK_FLAGS_PRINT)) static_branch_disable(&dp->key.dd_key_true); - } else if (modifiers->flags & _DPRINTK_FLAGS_PRINT) + } else if (newflags & _DPRINTK_FLAGS_PRINT) { static_branch_enable(&dp->key.dd_key_true); + } #endif dp->flags = newflags; v2pr_info("changed %s:%d [%s]%s =%s\n", @@ -379,10 +380,6 @@ static int ddebug_parse_query(char *words[], int nwords, return -EINVAL; } - if (modname) - /* support $modname.dyndbg= */ - query->module = modname; - for (i = 0; i < nwords; i += 2) { char *keyword = words[i]; char *arg = words[i+1]; @@ -423,6 +420,13 @@ static int ddebug_parse_query(char *words[], int nwords, if (rc) return rc; } + if (!query->module && modname) + /* + * support $modname.dyndbg=, when + * not given in the query itself + */ + query->module = modname; + vpr_info_dq(query, "parsed"); return 0; } @@ -548,35 +552,6 @@ static int ddebug_exec_queries(char *query, const char *modname) return nfound; } -/** - * dynamic_debug_exec_queries - select and change dynamic-debug prints - * @query: query-string described in admin-guide/dynamic-debug-howto - * @modname: string containing module name, usually &module.mod_name - * - * This uses the >/proc/dynamic_debug/control reader, allowing module - * authors to modify their dynamic-debug callsites. The modname is - * canonically struct module.mod_name, but can also be null or a - * module-wildcard, for example: "drm*". - */ -int dynamic_debug_exec_queries(const char *query, const char *modname) -{ - int rc; - char *qry; /* writable copy of query */ - - if (!query) { - pr_err("non-null query/command string expected\n"); - return -EINVAL; - } - qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL); - if (!qry) - return -ENOMEM; - - rc = ddebug_exec_queries(qry, modname); - kfree(qry); - return rc; -} -EXPORT_SYMBOL_GPL(dynamic_debug_exec_queries); - #define PREFIX_SIZE 64 static int remaining(int wrote) diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c index 5f4b07b56cd9c..9738664386088 100644 --- a/lib/fonts/fonts.c +++ b/lib/fonts/fonts.c @@ -135,8 +135,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w, if (res > 20) c += 20 - res; - if ((font_w & (1 << (f->width - 1))) && - (font_h & (1 << (f->height - 1)))) + if ((font_w & (1U << (f->width - 1))) && + (font_h & (1U << (f->height - 1)))) c += 1000; if (c > cc) { diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 1b0a349fbcd92..6e30113303ba6 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -467,20 +467,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_init); -static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) -{ - char *from = kmap_atomic(page); - memcpy(to, from + offset, len); - kunmap_atomic(from); -} - -static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) -{ - char *to = kmap_atomic(page); - memcpy(to + offset, from, len); - kunmap_atomic(to); -} - static void memzero_page(struct page *page, size_t offset, size_t len) { char *addr = kmap_atomic(page); @@ -1836,24 +1822,38 @@ int import_single_range(int rw, void __user *buf, size_t len, } EXPORT_SYMBOL(import_single_range); -int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, - int (*f)(struct kvec *vec, void *context), - void *context) +/** + * iov_iter_restore() - Restore a &struct iov_iter to the same state as when + * iov_iter_save_state() was called. + * + * @i: &struct iov_iter to restore + * @state: state to restore from + * + * Used after iov_iter_save_state() to bring restore @i, if operations may + * have advanced it. + * + * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC + */ +void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) { - struct kvec w; - int err = -EINVAL; - if (!bytes) - return 0; - - iterate_all_kinds(i, bytes, v, -EINVAL, ({ - w.iov_base = kmap(v.bv_page) + v.bv_offset; - w.iov_len = v.bv_len; - err = f(&w, context); - kunmap(v.bv_page); - err;}), ({ - w = v; - err = f(&w, context);}) - ) - return err; + if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && + !iov_iter_is_kvec(i)) + return; + i->iov_offset = state->iov_offset; + i->count = state->count; + /* + * For the *vec iters, nr_segs + iov is constant - if we increment + * the vec, then we also decrement the nr_segs count. Hence we don't + * need to track both of these, just one is enough and we can deduct + * the other from that. ITER_KVEC and ITER_IOVEC are the same struct + * size, so we can just increment the iov pointer as they are unionzed. + * ITER_BVEC _may_ be the same size on some archs, but on others it is + * not. Be safe and handle it separately. + */ + BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); + if (iov_iter_is_bvec(i)) + i->bvec -= state->nr_segs - i->nr_segs; + else + i->iov -= state->nr_segs - i->nr_segs; + i->nr_segs = state->nr_segs; } -EXPORT_SYMBOL(iov_iter_for_each_range); diff --git a/lib/lockref.c b/lib/lockref.c index 5b34bbd3eba81..81ac5f3552428 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -24,7 +24,6 @@ } \ if (!--retry) \ break; \ - cpu_relax(); \ } \ } while (0) diff --git a/lib/nlattr.c b/lib/nlattr.c index fe60f9ae9db17..aa8fc4371e930 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -369,6 +370,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype, if (type <= 0 || type > maxtype) return 0; + type = array_index_nospec(type, maxtype + 1); pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); @@ -584,6 +586,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, } continue; } + type = array_index_nospec(type, maxtype + 1); if (policy) { int err = validate_nla(nla, maxtype, policy, validate, extack, depth); diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c index 21016b32d3131..2b24ea6c94979 100644 --- a/lib/notifier-error-inject.c +++ b/lib/notifier-error-inject.c @@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set, +DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set, "%lld\n"); static struct dentry *debugfs_create_errno(const char *name, umode_t mode, diff --git a/lib/once.c b/lib/once.c index 59149bf3bfb4a..351f66aad310a 100644 --- a/lib/once.c +++ b/lib/once.c @@ -66,3 +66,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key, once_disable_jump(once_key, mod); } EXPORT_SYMBOL(__do_once_done); + +static DEFINE_MUTEX(once_mutex); + +bool __do_once_slow_start(bool *done) + __acquires(once_mutex) +{ + mutex_lock(&once_mutex); + if (*done) { + mutex_unlock(&once_mutex); + /* Keep sparse happy by restoring an even lock count on + * this mutex. In case we return here, we don't call into + * __do_once_done but return early in the DO_ONCE_SLOW() macro. + */ + __acquire(once_mutex); + return false; + } + + return true; +} +EXPORT_SYMBOL(__do_once_slow_start); + +void __do_once_slow_done(bool *done, struct static_key_true *once_key, + struct module *mod) + __releases(once_mutex) +{ + *done = true; + mutex_unlock(&once_mutex); + once_disable_jump(once_key, mod); +} +EXPORT_SYMBOL(__do_once_slow_done); diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 2baa275a6ddf4..76550d2e2edc7 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -1114,6 +1114,7 @@ static int __init test_firmware_init(void) rc = misc_register(&test_fw_misc_device); if (rc) { + __test_firmware_config_free(); kfree(test_fw_config); pr_err("could not register misc device: %d\n", rc); return rc; diff --git a/lib/ubsan.c b/lib/ubsan.c index adf8dcf3c84e6..ee14c46cac897 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -151,16 +151,7 @@ static void ubsan_epilogue(void) current->in_ubsan--; - if (panic_on_warn) { - /* - * This thread may hit another WARN() in the panic path. - * Resetting this prevents additional WARN() from panicking the - * system on this thread. Other threads are blocked by the - * panic_mutex in panic(). - */ - panic_on_warn = 0; - panic("panic_on_warn set ...\n"); - } + check_panic_on_warn("UBSAN"); } static void handle_overflow(struct overflow_data *data, void *lhs, diff --git a/lib/usercopy.c b/lib/usercopy.c index 7413dd300516e..7ee63df042d7e 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -3,6 +3,7 @@ #include #include #include +#include /* out-of-line parts */ @@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n unsigned long res = n; might_fault(); if (!should_fail_usercopy() && likely(access_ok(from, n))) { + /* + * Ensure that bad access_ok() speculation will not + * lead to nasty side effects *after* the copy is + * finished: + */ + barrier_nospec(); instrument_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n); } diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile index c415a685d61bb..e814061d6aa01 100644 --- a/lib/vdso/Makefile +++ b/lib/vdso/Makefile @@ -17,6 +17,6 @@ $(error ARCH_REL_TYPE_ABS is not set) endif quiet_cmd_vdso_check = VDSOCHK $@ - cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \ + cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \ then (echo >&2 "$@: dynamic relocations are not supported"; \ rm -f $@; /bin/false); fi diff --git a/mm/compaction.c b/mm/compaction.c index 8dfbe86bd74f7..b58021666e1a3 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1245,7 +1245,7 @@ move_freelist_tail(struct list_head *freelist, struct page *freepage) } static void -fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) +fast_isolate_around(struct compact_control *cc, unsigned long pfn) { unsigned long start_pfn, end_pfn; struct page *page; @@ -1266,21 +1266,13 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long if (!page) return; - /* Scan before */ - if (start_pfn != pfn) { - isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); - if (cc->nr_freepages >= cc->nr_migratepages) - return; - } - - /* Scan after */ - start_pfn = pfn + nr_isolated; - if (start_pfn < end_pfn) - isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); + isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ if (cc->nr_freepages < cc->nr_migratepages) set_pageblock_skip(page); + + return; } /* Search orders in round-robin fashion */ @@ -1456,7 +1448,7 @@ fast_isolate_freepages(struct compact_control *cc) return cc->free_pfn; low_pfn = page_to_pfn(page); - fast_isolate_around(cc, low_pfn, nr_isolated); + fast_isolate_around(cc, low_pfn); return low_pfn; } diff --git a/mm/filemap.c b/mm/filemap.c index 125b69f59caad..3a983bc1a71c9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3303,7 +3303,7 @@ ssize_t generic_perform_write(struct file *file, unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ size_t copied; /* Bytes copied from user */ - void *fsdata; + void *fsdata = NULL; offset = (pos & (PAGE_SIZE - 1)); bytes = min_t(unsigned long, PAGE_SIZE - offset, diff --git a/mm/frame_vector.c b/mm/frame_vector.c index 10f82d5643b6d..0e589a9a88012 100644 --- a/mm/frame_vector.c +++ b/mm/frame_vector.c @@ -37,7 +37,6 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int ret = 0; - int err; int locked; if (nr_frames == 0) @@ -74,32 +73,14 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, vec->is_pfns = false; ret = pin_user_pages_locked(start, nr_frames, gup_flags, (struct page **)(vec->ptrs), &locked); - goto out; + if (likely(ret > 0)) + goto out; } - vec->got_ref = false; - vec->is_pfns = true; - do { - unsigned long *nums = frame_vector_pfns(vec); - - while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { - err = follow_pfn(vma, start, &nums[ret]); - if (err) { - if (ret == 0) - ret = err; - goto out; - } - start += PAGE_SIZE; - ret++; - } - /* - * We stop if we have enough pages or if VMA doesn't completely - * cover the tail page. - */ - if (ret >= nr_frames || start < vma->vm_end) - break; - vma = find_vma_intersection(mm, start, start + 1); - } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); + /* This used to (racily) return non-refcounted pfns. Let people know */ + WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping"); + vec->nr_frames = 0; + out: if (locked) mmap_read_unlock(mm); diff --git a/mm/gup.c b/mm/gup.c index 6cb7d8ae56f66..11307a8b20d58 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -405,6 +405,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); + + /* + * Considering PTE level hugetlb, like continuous-PTE hugetlb on + * ARM64 architecture. + */ + if (is_vm_hugetlb_page(vma)) { + page = follow_huge_pmd_pte(vma, address, flags); + if (page) + return page; + return no_page_table(vma, flags); + } + retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); @@ -560,7 +572,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { - page = follow_huge_pmd(mm, address, pmd, flags); + page = follow_huge_pmd_pte(vma, address, flags); if (page) return page; return no_page_table(vma, flags); @@ -1615,7 +1627,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, */ if (is_migrate_cma_page(head)) { if (PageHuge(head)) { - if (!isolate_huge_page(head, &cma_page_list)) + if (isolate_hugetlb(head, &cma_page_list)) isolation_error_count++; } else { if (!PageLRU(head) && drain_allow) { @@ -2128,8 +2140,28 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL -static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, - unsigned int flags, struct page **pages, int *nr) +/* + * Fast-gup relies on pte change detection to avoid concurrent pgtable + * operations. + * + * To pin the page, fast-gup needs to do below in order: + * (1) pin the page (by prefetching pte), then (2) check pte not changed. + * + * For the rest of pgtable operations where pgtable updates can be racy + * with fast-gup, we need to do (1) clear pte, then (2) check whether page + * is pinned. + * + * Above will work for all pte-level operations, including THP split. + * + * For THP collapse, it's a bit more complicated because fast-gup may be + * walking a pgtable page that is being freed (pte is still valid but pmd + * can be cleared already). To avoid race in such condition, we need to + * also check pmd here to make sure pmd doesn't change (corresponds to + * pmdp_collapse_flush() in the THP collapse code path). + */ +static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, + unsigned long end, unsigned int flags, + struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; @@ -2169,7 +2201,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, if (!head) goto pte_unmap; - if (unlikely(pte_val(pte) != pte_val(*ptep))) { + if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || + unlikely(pte_val(pte) != pte_val(*ptep))) { put_compound_head(head, 1, flags); goto pte_unmap; } @@ -2214,8 +2247,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, * get_user_pages_fast_only implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ -static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, - unsigned int flags, struct page **pages, int *nr) +static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, + unsigned long end, unsigned int flags, + struct page **pages, int *nr) { return 0; } @@ -2522,7 +2556,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, flags, pages, nr)) return 0; - } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) + } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); @@ -2542,7 +2576,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo next = pud_addr_end(addr, end); if (unlikely(!pud_present(pud))) return 0; - if (unlikely(pud_huge(pud))) { + if (unlikely(pud_huge(pud) || pud_devmap(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, flags, pages, nr)) return 0; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c42c76447e103..81949f6d29af5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2387,11 +2387,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; + spin_lock(&hugetlb_lock); if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { SetPagePrivate(page); h->resv_huge_pages--; } - spin_lock(&hugetlb_lock); list_add(&page->lru, &h->hugepage_activelist); /* Fall through */ } @@ -4337,6 +4337,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); bool new_page = false; + u32 hash = hugetlb_fault_mutex_hash(mapping, idx); /* * Currently, we are forced to kill the process in the event the @@ -4346,7 +4347,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", current->pid); - return ret; + goto out; } /* @@ -4365,7 +4366,6 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, * Check for page in userfault range */ if (userfaultfd_missing(vma)) { - u32 hash; struct vm_fault vmf = { .vma = vma, .address = haddr, @@ -4380,17 +4380,14 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, }; /* - * hugetlb_fault_mutex and i_mmap_rwsem must be - * dropped before handling userfault. Reacquire - * after handling fault to make calling code simpler. + * vma_lock and hugetlb_fault_mutex must be dropped + * before handling userfault. Also mmap_lock will + * be dropped during handling userfault, any vma + * operation should be careful from here. */ - hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); - ret = handle_userfault(&vmf, VM_UFFD_MISSING); - i_mmap_lock_read(mapping); - mutex_lock(&hugetlb_fault_mutex_table[hash]); - goto out; + return handle_userfault(&vmf, VM_UFFD_MISSING); } page = alloc_huge_page(vma, haddr, 0); @@ -4497,6 +4494,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, unlock_page(page); out: + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); return ret; backout: @@ -4592,10 +4591,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); - if (huge_pte_none(entry)) { - ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); - goto out_mutex; - } + if (huge_pte_none(entry)) + /* + * hugetlb_no_page will drop vma lock and hugetlb fault + * mutex internally, which make us return immediately. + */ + return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); ret = 0; @@ -5584,12 +5585,13 @@ follow_huge_pd(struct vm_area_struct *vma, } struct page * __weak -follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmd, int flags) +follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags) { + struct hstate *h = hstate_vma(vma); + struct mm_struct *mm = vma->vm_mm; struct page *page = NULL; spinlock_t *ptl; - pte_t pte; + pte_t *ptep, pte; /* FOLL_GET and FOLL_PIN are mutually exclusive. */ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == @@ -5597,17 +5599,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, return NULL; retry: - ptl = pmd_lockptr(mm, pmd); - spin_lock(ptl); - /* - * make sure that the address range covered by this pmd is not - * unmapped from other threads. - */ - if (!pmd_huge(*pmd)) - goto out; - pte = huge_ptep_get((pte_t *)pmd); + ptep = huge_pte_offset(mm, address, huge_page_size(h)); + if (!ptep) + return NULL; + + ptl = huge_pte_lock(h, mm, ptep); + pte = huge_ptep_get(ptep); if (pte_present(pte)) { - page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); + page = pte_page(pte) + + ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); /* * try_grab_page() should always succeed here, because: a) we * hold the pmd (ptl) lock, and b) we've just checked that the @@ -5623,7 +5623,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, } else { if (is_hugetlb_entry_migration(pte)) { spin_unlock(ptl); - __migration_entry_wait(mm, (pte_t *)pmd, ptl); + __migration_entry_wait(mm, ptep, ptl); goto retry; } /* @@ -5655,14 +5655,14 @@ follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int fla return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); } -bool isolate_huge_page(struct page *page, struct list_head *list) +int isolate_hugetlb(struct page *page, struct list_head *list) { - bool ret = true; + int ret = 0; spin_lock(&hugetlb_lock); if (!PageHeadHuge(page) || !page_huge_active(page) || !get_page_unless_zero(page)) { - ret = false; + ret = -EBUSY; goto unlock; } clear_page_huge_active(page); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 00a53f1355aec..2f5e96ac4d008 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -95,16 +95,8 @@ static void end_report(unsigned long *flags) pr_err("==================================================================\n"); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irqrestore(&report_lock, *flags); - if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) { - /* - * This thread may hit another WARN() in the panic path. - * Resetting this prevents additional WARN() from panicking the - * system on this thread. Other threads are blocked by the - * panic_mutex in panic(). - */ - panic_on_warn = 0; - panic("panic_on_warn set ...\n"); - } + if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) + check_panic_on_warn("KASAN"); kasan_enable_current(); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 969e57dde65f9..b77186ec70e93 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1144,14 +1144,17 @@ static void collapse_huge_page(struct mm_struct *mm, pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ /* - * After this gup_fast can't run anymore. This also removes - * any huge TLB entry from the CPU so we won't allow - * huge and small TLB entries for the same virtual address - * to avoid the risk of CPU bugs in that area. + * This removes any huge TLB entry from the CPU so we won't allow + * huge and small TLB entries for the same virtual address to + * avoid the risk of CPU bugs in that area. + * + * Parallel fast GUP is fine since fast GUP will back off when + * it detects PMD is changed. */ _pmd = pmdp_collapse_flush(vma, address, pmd); spin_unlock(pmd_ptl); mmu_notifier_invalidate_range_end(&range); + tlb_remove_table_sync_one(); spin_lock(pte_ptl); isolated = __collapse_huge_page_isolate(vma, address, pte, @@ -1441,6 +1444,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) spinlock_t *ptl; int count = 0; int i; + struct mmu_notifier_range range; if (!vma || !vma->vm_file || vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE) @@ -1467,6 +1471,19 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) if (!pmd) goto drop_hpage; + /* + * We need to lock the mapping so that from here on, only GUP-fast and + * hardware page walks can access the parts of the page tables that + * we're operating on. + */ + i_mmap_lock_write(vma->vm_file->f_mapping); + + /* + * This spinlock should be unnecessary: Nobody else should be accessing + * the page tables under spinlock protection here, only + * lockless_pages_from_mm() and the hardware page walker can access page + * tables while all the high-level locks are held in write mode. + */ start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); /* step 1: check all mapped PTEs are to the right huge page */ @@ -1513,12 +1530,23 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) } /* step 4: collapse pmd */ - ptl = pmd_lock(vma->vm_mm, pmd); + /* we make no change to anon, but protect concurrent anon page lookup */ + if (vma->anon_vma) + anon_vma_lock_write(vma->anon_vma); + + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr, + haddr + HPAGE_PMD_SIZE); + mmu_notifier_invalidate_range_start(&range); _pmd = pmdp_collapse_flush(vma, haddr, pmd); - spin_unlock(ptl); mm_dec_nr_ptes(mm); + tlb_remove_table_sync_one(); + mmu_notifier_invalidate_range_end(&range); pte_free(mm, pmd_pgtable(_pmd)); + if (vma->anon_vma) + anon_vma_unlock_write(vma->anon_vma); + i_mmap_unlock_write(vma->vm_file->f_mapping); + drop_hpage: unlock_page(hpage); put_page(hpage); @@ -1526,6 +1554,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) abort: pte_unmap_unlock(start_pte, ptl); + i_mmap_unlock_write(vma->vm_file->f_mapping); goto drop_hpage; } @@ -1575,7 +1604,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * An alternative would be drop the check, but check that page * table is clear before calling pmdp_collapse_flush() under * ptl. It has higher chance to recover THP for the VMA, but - * has higher cost too. + * has higher cost too. It would also probably require locking + * the anon_vma. */ if (vma->anon_vma) continue; @@ -1597,12 +1627,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) */ if (mmap_write_trylock(mm)) { if (!khugepaged_test_exit(mm)) { - spinlock_t *ptl = pmd_lock(mm, pmd); + struct mmu_notifier_range range; + + mmu_notifier_range_init(&range, + MMU_NOTIFY_CLEAR, 0, + NULL, mm, addr, + addr + HPAGE_PMD_SIZE); + mmu_notifier_invalidate_range_start(&range); /* assume page table is clear */ _pmd = pmdp_collapse_flush(vma, addr, pmd); - spin_unlock(ptl); mm_dec_nr_ptes(mm); + tlb_remove_table_sync_one(); pte_free(mm, pmd_pgtable(_pmd)); + mmu_notifier_invalidate_range_end(&range); } mmap_write_unlock(mm); } else { diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5bfae0686199e..4801751cb6b6d 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1123,7 +1123,7 @@ EXPORT_SYMBOL(kmemleak_no_scan); void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, gfp_t gfp) { - if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) + if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) kmemleak_alloc(__va(phys), size, min_count, gfp); } EXPORT_SYMBOL(kmemleak_alloc_phys); @@ -1137,7 +1137,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); */ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) { - if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) + if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) kmemleak_free_part(__va(phys), size); } EXPORT_SYMBOL(kmemleak_free_part_phys); @@ -1149,7 +1149,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); */ void __ref kmemleak_not_leak_phys(phys_addr_t phys) { - if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) + if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) kmemleak_not_leak(__va(phys)); } EXPORT_SYMBOL(kmemleak_not_leak_phys); @@ -1161,7 +1161,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); */ void __ref kmemleak_ignore_phys(phys_addr_t phys) { - if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) + if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) kmemleak_ignore(__va(phys)); } EXPORT_SYMBOL(kmemleak_ignore_phys); diff --git a/mm/maccess.c b/mm/maccess.c index 3bd70405f2d84..f6ea117a69eb5 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -83,7 +83,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) return src - unsafe_addr; Efault: pagefault_enable(); - dst[-1] = '\0'; + dst[0] = '\0'; return -EFAULT; } #else /* HAVE_GET_KERNEL_NOFAULT */ diff --git a/mm/madvise.c b/mm/madvise.c index 77e1dc2d4e186..f71fc88f0b331 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -434,8 +434,11 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, continue; } - /* Do not interfere with other mappings of this page */ - if (page_mapcount(page) != 1) + /* + * Do not interfere with other mappings of this page and + * non-LRU page. + */ + if (!PageLRU(page) || page_mapcount(page) != 1) continue; VM_BUG_ON_PAGE(PageTransCompound(page), page); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 92ab008777183..c62d997c8ca1d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4866,6 +4866,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, unsigned int efd, cfd; struct fd efile; struct fd cfile; + struct dentry *cdentry; const char *name; char *endp; int ret; @@ -4916,6 +4917,16 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, if (ret < 0) goto out_put_cfile; + /* + * The control file must be a regular cgroup1 file. As a regular cgroup + * file can't be renamed, it's safe to access its name afterwards. + */ + cdentry = cfile.file->f_path.dentry; + if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { + ret = -EINVAL; + goto out_put_cfile; + } + /* * Determine the event callbacks and set them in @event. This used * to be done via struct cftype but cgroup core no longer knows @@ -4924,7 +4935,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, * * DO NOT ADD NEW FILES. */ - name = cfile.file->f_path.dentry->d_name.name; + name = cdentry->d_name.name; if (!strcmp(name, "memory.usage_in_bytes")) { event->register_event = mem_cgroup_usage_register_event; @@ -4948,7 +4959,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, * automatically removed on cgroup destruction but the removal is * asynchronous, so take an extra ref on @css. */ - cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, + cfile_css = css_tryget_online_from_dir(cdentry->d_parent, &memory_cgrp_subsys); ret = -EINVAL; if (IS_ERR(cfile_css)) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index aef267c6a7246..b21dd4a793926 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1763,7 +1763,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist) bool lru = PageLRU(page); if (PageHuge(page)) { - isolated = isolate_huge_page(page, pagelist); + isolated = !isolate_hugetlb(page, pagelist); } else { if (lru) isolated = !isolate_lru_page(page); diff --git a/mm/memory.c b/mm/memory.c index cc50fa0f4590d..cbc0a163d7057 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -823,6 +823,17 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma if (likely(!page_maybe_dma_pinned(page))) return 1; + /* + * The vma->anon_vma of the child process may be NULL + * because the entire vma does not contain anonymous pages. + * A BUG will occur when the copy_present_page() passes + * a copy of a non-anonymous page of that vma to the + * page_add_new_anon_rmap() to set up new anonymous rmap. + * Return 1 if the page is not an anonymous page. + */ + if (!PageAnon(page)) + return 1; + new_page = *prealloc; if (!new_page) return -EAGAIN; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 6275b1c05f111..f0633f9a91166 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1288,7 +1288,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (PageHuge(page)) { pfn = page_to_pfn(head) + compound_nr(head) - 1; - isolate_huge_page(head, &source); + isolate_hugetlb(head, &source); continue; } else if (PageTransHuge(page)) pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f9f47449e8dda..6c98585f20dfe 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -622,8 +622,9 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ if (flags & (MPOL_MF_MOVE_ALL) || - (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { - if (!isolate_huge_page(page, qp->pagelist) && + (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 && + !hugetlb_pmd_shared(pte))) { + if (isolate_hugetlb(page, qp->pagelist) && (flags & MPOL_MF_STRICT)) /* * Failed to isolate page but allow migrating pages diff --git a/mm/memremap.c b/mm/memremap.c index 2455bac895066..299aad0d26e56 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -348,6 +348,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) WARN(1, "File system DAX not supported\n"); return ERR_PTR(-EINVAL); } + params.pgprot = pgprot_decrypted(params.pgprot); break; case MEMORY_DEVICE_GENERIC: break; diff --git a/mm/migrate.c b/mm/migrate.c index 495bdac5cf929..fcb7eb6a6ecae 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -164,7 +164,7 @@ void putback_movable_page(struct page *page) * * This function shall be used whenever the isolated pageset has been * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() - * and isolate_huge_page(). + * and isolate_hugetlb(). */ void putback_movable_pages(struct list_head *l) { @@ -1657,8 +1657,9 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, if (PageHuge(page)) { if (PageHead(page)) { - isolate_huge_page(page, pagelist); - err = 1; + err = isolate_hugetlb(page, pagelist); + if (!err) + err = 1; } } else { struct page *head; @@ -2473,13 +2474,14 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, migrate->dst[migrate->npages] = 0; migrate->src[migrate->npages++] = mpfn; } - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(ptep - 1, ptl); /* Only flush the TLB if we actually modified any entries */ if (unmapped) flush_tlb_range(walk->vma, start, end); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(ptep - 1, ptl); + return 0; } diff --git a/mm/mmap.c b/mm/mmap.c index b69c9711bb269..33ebda8385b95 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1856,7 +1856,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, if (!arch_validate_flags(vma->vm_flags)) { error = -EINVAL; if (file) - goto unmap_and_free_vma; + goto close_and_free_vma; else goto free_vma; } @@ -1900,6 +1900,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr, return addr; +close_and_free_vma: + if (vma->vm_ops && vma->vm_ops->close) + vma->vm_ops->close(vma); unmap_and_free_vma: vma->vm_file = NULL; fput(file); @@ -2664,6 +2667,7 @@ static void unmap_region(struct mm_struct *mm, { struct vm_area_struct *next = vma_next(mm, prev); struct mmu_gather tlb; + struct vm_area_struct *cur_vma; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); @@ -2678,8 +2682,12 @@ static void unmap_region(struct mm_struct *mm, * concurrent flush in this region has to be coming through the rmap, * and we synchronize against that using the rmap lock. */ - if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) - tlb_flush_mmu(&tlb); + for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) { + if ((cur_vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) { + tlb_flush_mmu(&tlb); + break; + } + } free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 03c33c93a582b..205fdbb5792a9 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -139,7 +139,7 @@ static void tlb_remove_table_smp_sync(void *arg) /* Simply deliver the interrupt */ } -static void tlb_remove_table_sync_one(void) +void tlb_remove_table_sync_one(void) { /* * This isn't an RCU grace period and hence the page-tables cannot be @@ -163,8 +163,6 @@ static void tlb_remove_table_free(struct mmu_table_batch *batch) #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */ -static void tlb_remove_table_sync_one(void) { } - static void tlb_remove_table_free(struct mmu_table_batch *batch) { __tlb_remove_table_free(batch); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 43ff22ce76324..1fd41b91a1a88 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4322,6 +4322,30 @@ void fs_reclaim_release(gfp_t gfp_mask) EXPORT_SYMBOL_GPL(fs_reclaim_release); #endif +/* + * Zonelists may change due to hotplug during allocation. Detect when zonelists + * have been rebuilt so allocation retries. Reader side does not lock and + * retries the allocation if zonelist changes. Writer side is protected by the + * embedded spin_lock. + */ +static DEFINE_SEQLOCK(zonelist_update_seq); + +static unsigned int zonelist_iter_begin(void) +{ + if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) + return read_seqbegin(&zonelist_update_seq); + + return 0; +} + +static unsigned int check_retry_zonelist(unsigned int seq) +{ + if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) + return read_seqretry(&zonelist_update_seq, seq); + + return seq; +} + /* Perform direct synchronous page reclaim */ static unsigned long __perform_reclaim(gfp_t gfp_mask, unsigned int order, @@ -4629,6 +4653,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int compaction_retries; int no_progress_loops; unsigned int cpuset_mems_cookie; + unsigned int zonelist_iter_cookie; int reserve_flags; /* @@ -4639,11 +4664,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) gfp_mask &= ~__GFP_ATOMIC; -retry_cpuset: +restart: compaction_retries = 0; no_progress_loops = 0; compact_priority = DEF_COMPACT_PRIORITY; cpuset_mems_cookie = read_mems_allowed_begin(); + zonelist_iter_cookie = zonelist_iter_begin(); /* * The fast path uses conservative alloc_flags to succeed only until @@ -4802,9 +4828,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, goto retry; - /* Deal with possible cpuset update races before we start OOM killing */ - if (check_retry_cpuset(cpuset_mems_cookie, ac)) - goto retry_cpuset; + /* + * Deal with possible cpuset update races or zonelist updates to avoid + * a unnecessary OOM kill. + */ + if (check_retry_cpuset(cpuset_mems_cookie, ac) || + check_retry_zonelist(zonelist_iter_cookie)) + goto restart; /* Reclaim has failed us, start killing things */ page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); @@ -4824,9 +4854,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, } nopage: - /* Deal with possible cpuset update races before we fail */ - if (check_retry_cpuset(cpuset_mems_cookie, ac)) - goto retry_cpuset; + /* + * Deal with possible cpuset update races or zonelist updates to avoid + * a unnecessary OOM kill. + */ + if (check_retry_cpuset(cpuset_mems_cookie, ac) || + check_retry_zonelist(zonelist_iter_cookie)) + goto restart; /* * Make sure that __GFP_NOFAIL request doesn't leak out and make sure @@ -5020,9 +5054,12 @@ static inline void free_the_page(struct page *page, unsigned int order) void __free_pages(struct page *page, unsigned int order) { + /* get PageHead before we drop reference */ + int head = PageHead(page); + if (put_page_testzero(page)) free_the_page(page, order); - else if (!PageHead(page)) + else if (!head) while (order-- > 0) free_the_page(page + (1 << order), order); } @@ -5129,6 +5166,18 @@ void *page_frag_alloc(struct page_frag_cache *nc, /* reset page count bias and offset to start of new frag */ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; offset = size - fragsz; + if (unlikely(offset < 0)) { + /* + * The caller is trying to allocate a fragment + * with fragsz > PAGE_SIZE but the cache isn't big + * enough to satisfy the request, this may + * happen in low memory conditions. + * We don't release the cache page because + * it could make memory pressure worse + * so we simply return NULL here. + */ + return NULL; + } } nc->pagecnt_bias--; @@ -5924,9 +5973,8 @@ static void __build_all_zonelists(void *data) int nid; int __maybe_unused cpu; pg_data_t *self = data; - static DEFINE_SPINLOCK(lock); - spin_lock(&lock); + write_seqlock(&zonelist_update_seq); #ifdef CONFIG_NUMA memset(node_load, 0, sizeof(node_load)); @@ -5959,7 +6007,7 @@ static void __build_all_zonelists(void *data) #endif } - spin_unlock(&lock); + write_sequnlock(&zonelist_update_seq); } static noinline void __init diff --git a/mm/slub.c b/mm/slub.c index b395ef0645444..b0f637519ac99 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5559,7 +5559,8 @@ static char *create_unique_id(struct kmem_cache *s) char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); char *p = name; - BUG_ON(!name); + if (!name) + return ERR_PTR(-ENOMEM); *p++ = ':'; /* @@ -5617,6 +5618,8 @@ static int sysfs_slab_add(struct kmem_cache *s) * for the symlinks. */ name = create_unique_id(s); + if (IS_ERR(name)) + return PTR_ERR(name); } s->kobj.kset = kset; diff --git a/mm/swapfile.c b/mm/swapfile.c index 5af6b0f770de6..d87d6971afc9d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1104,6 +1104,7 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) goto check_out; pr_debug("scan_swap_map of si %d failed to find offset\n", si->type); + cond_resched(); spin_lock(&swap_avail_lock); nextsi: diff --git a/mm/vmscan.c b/mm/vmscan.c index f2817e80a1ab3..51ccd80e70b6a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2439,8 +2439,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) enum lru_list lru; unsigned long nr_reclaimed = 0; unsigned long nr_to_reclaim = sc->nr_to_reclaim; + bool proportional_reclaim; struct blk_plug plug; - bool scan_adjusted; get_scan_count(lruvec, sc, nr); @@ -2458,8 +2458,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) * abort proportional reclaim if either the file or anon lru has already * dropped to zero at the first pass. */ - scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() && - sc->priority == DEF_PRIORITY); + proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && + sc->priority == DEF_PRIORITY); blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || @@ -2479,7 +2479,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) cond_resched(); - if (nr_reclaimed < nr_to_reclaim || scan_adjusted) + if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) continue; /* @@ -2530,8 +2530,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); - - scan_adjusted = true; } blk_finish_plug(&plug); sc->nr_reclaimed += nr_reclaimed; diff --git a/net/802/mrp.c b/net/802/mrp.c index 35e04cc5390c4..c10a432a5b435 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -606,7 +606,10 @@ static void mrp_join_timer(struct timer_list *t) spin_unlock(&app->lock); mrp_queue_xmit(app); - mrp_join_timer_arm(app); + spin_lock(&app->lock); + if (likely(app->active)) + mrp_join_timer_arm(app); + spin_unlock(&app->lock); } static void mrp_periodic_timer_arm(struct mrp_applicant *app) @@ -620,11 +623,12 @@ static void mrp_periodic_timer(struct timer_list *t) struct mrp_applicant *app = from_timer(app, t, periodic_timer); spin_lock(&app->lock); - mrp_mad_event(app, MRP_EVENT_PERIODIC); - mrp_pdu_queue(app); + if (likely(app->active)) { + mrp_mad_event(app, MRP_EVENT_PERIODIC); + mrp_pdu_queue(app); + mrp_periodic_timer_arm(app); + } spin_unlock(&app->lock); - - mrp_periodic_timer_arm(app); } static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) @@ -872,6 +876,7 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl) app->dev = dev; app->app = appl; app->mad = RB_ROOT; + app->active = true; spin_lock_init(&app->lock); skb_queue_head_init(&app->queue); rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); @@ -900,6 +905,9 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) RCU_INIT_POINTER(port->applicants[appl->type], NULL); + spin_lock_bh(&app->lock); + app->active = false; + spin_unlock_bh(&app->lock); /* Delete timer and generate a final TX event to flush out * all pending messages before the applicant is gone. */ diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 8f528e783a6c5..e070a0b8e5ca3 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -118,7 +118,7 @@ struct p9_conn { struct list_head unsent_req_list; struct p9_req_t *rreq; struct p9_req_t *wreq; - char tmp_buf[7]; + char tmp_buf[P9_HDRSZ]; struct p9_fcall rc; int wpos; int wsize; @@ -200,11 +200,15 @@ static void p9_conn_cancel(struct p9_conn *m, int err) list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { list_move(&req->req_list, &cancel_list); + req->status = REQ_STATUS_ERROR; } list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { list_move(&req->req_list, &cancel_list); + req->status = REQ_STATUS_ERROR; } + spin_unlock(&m->client->lock); + list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); list_del(&req->req_list); @@ -212,7 +216,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err) req->t_err = err; p9_client_cb(m->client, req, REQ_STATUS_ERROR); } - spin_unlock(&m->client->lock); } static __poll_t @@ -288,7 +291,7 @@ static void p9_read_work(struct work_struct *work) if (!m->rc.sdata) { m->rc.sdata = m->tmp_buf; m->rc.offset = 0; - m->rc.capacity = 7; /* start by reading header */ + m->rc.capacity = P9_HDRSZ; /* start by reading header */ } clear_bit(Rpending, &m->wsched); @@ -311,7 +314,7 @@ static void p9_read_work(struct work_struct *work) p9_debug(P9_DEBUG_TRANS, "got new header\n"); /* Header size */ - m->rc.size = 7; + m->rc.size = P9_HDRSZ; err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0); if (err) { p9_debug(P9_DEBUG_ERROR, @@ -820,11 +823,14 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) goto out_free_ts; if (!(ts->rd->f_mode & FMODE_READ)) goto out_put_rd; + /* prevent workers from hanging on IO when fd is a pipe */ + ts->rd->f_flags |= O_NONBLOCK; ts->wr = fget(wfd); if (!ts->wr) goto out_put_rd; if (!(ts->wr->f_mode & FMODE_WRITE)) goto out_put_wr; + ts->wr->f_flags |= O_NONBLOCK; client->trans = ts; client->status = Connected; @@ -846,8 +852,10 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket) struct file *file; p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); - if (!p) + if (!p) { + sock_release(csocket); return -ENOMEM; + } csocket->sk->sk_allocation = GFP_NOIO; file = sock_alloc_file(csocket, 0, NULL); diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 432ac5a16f2e0..6c8a33f98f093 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -231,6 +231,14 @@ static void p9_xen_response(struct work_struct *work) continue; } + if (h.size > req->rc.capacity) { + dev_warn(&priv->dev->dev, + "requested packet size too big: %d for tag %d with capacity %zd\n", + h.size, h.tag, req->rc.capacity); + req->status = REQ_STATUS_ERROR; + goto recv_error; + } + memcpy(&req->rc, &h, sizeof(h)); req->rc.offset = 0; @@ -240,6 +248,7 @@ static void p9_xen_response(struct work_struct *work) masked_prod, &masked_cons, XEN_9PFS_RING_SIZE(ring)); +recv_error: virt_mb(); cons += h.size; ring->intf->in_cons = cons; diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index 829db9eba0cb9..aaf64b9539150 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff, if (!page) return -ENOMEM; - for (p = page, len = 0; len < nbytes; p++, len++) { + for (p = page, len = 0; len < nbytes; p++) { if (get_user(*p, buff++)) { free_page((unsigned long)page); return -EFAULT; } + len += 1; if (*p == '\0' || *p == '\n') break; } diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index cff4944d5b663..7601ce9143c18 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -1010,6 +1010,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, hci_dev_lock(hdev); hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type); hci_dev_unlock(hdev); + hci_dev_put(hdev); if (!hcon) return -ENOENT; diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 4ef6a54403aa2..2f87f57e7a4fd 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -736,7 +736,7 @@ static int __init bt_init(void) err = bt_sysfs_init(); if (err < 0) - return err; + goto cleanup_led; err = sock_register(&bt_sock_family_ops); if (err) @@ -772,6 +772,8 @@ static int __init bt_init(void) sock_unregister(PF_BLUETOOTH); cleanup_sysfs: bt_sysfs_cleanup(); +cleanup_led: + bt_leds_cleanup(); return err; } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2cb0cf035476b..08c473aa0113d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1623,6 +1623,7 @@ static int hci_dev_do_open(struct hci_dev *hdev) hdev->flush(hdev); if (hdev->sent_cmd) { + cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } @@ -3799,7 +3800,8 @@ int hci_register_dev(struct hci_dev *hdev) hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); - if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { + if (!hdev->suspend_notifier.notifier_call && + !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { hdev->suspend_notifier.notifier_call = hci_suspend_notifier; error = register_pm_notifier(&hdev->suspend_notifier); if (error) @@ -4482,15 +4484,27 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); } -static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) +static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) { - if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - /* ACL tx timeout must be longer than maximum - * link supervision timeout (40.9 seconds) */ - if (!cnt && time_after(jiffies, hdev->acl_last_tx + - HCI_ACL_TX_TIMEOUT)) - hci_link_tx_to(hdev, ACL_LINK); + unsigned long last_tx; + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return; + + switch (type) { + case LE_LINK: + last_tx = hdev->le_last_tx; + break; + default: + last_tx = hdev->acl_last_tx; + break; } + + /* tx timeout must be longer than maximum link supervision timeout + * (40.9 seconds) + */ + if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) + hci_link_tx_to(hdev, type); } /* Schedule SCO */ @@ -4548,7 +4562,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) struct sk_buff *skb; int quote; - __check_timeout(hdev, cnt); + __check_timeout(hdev, cnt, ACL_LINK); while (hdev->acl_cnt && (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { @@ -4591,8 +4605,6 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) int quote; u8 type; - __check_timeout(hdev, cnt); - BT_DBG("%s", hdev->name); if (hdev->dev_type == HCI_AMP) @@ -4600,6 +4612,8 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) else type = ACL_LINK; + __check_timeout(hdev, cnt, type); + while (hdev->block_cnt > 0 && (chan = hci_chan_sent(hdev, type, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; @@ -4673,7 +4687,7 @@ static void hci_sched_le(struct hci_dev *hdev) cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; - __check_timeout(hdev, cnt); + __check_timeout(hdev, cnt, LE_LINK); tmp = cnt; while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { @@ -4897,7 +4911,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; else *req_complete = bt_cb(skb)->hci.req_complete; - kfree_skb(skb); + dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 954b29605c942..eb111504afc60 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4307,6 +4307,19 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; + switch (ev->link_type) { + case SCO_LINK: + case ESCO_LINK: + break; + default: + /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type + * for HCI_Synchronous_Connection_Complete is limited to + * either SCO or eSCO + */ + bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); + return; + } + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); hci_dev_lock(hdev); diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index b69d88b88d2e4..ccd2c377bf83c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -48,6 +48,9 @@ void hci_conn_add_sysfs(struct hci_conn *conn) BT_DBG("conn %p", conn); + if (device_is_registered(&conn->dev)) + return; + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); if (device_add(&conn->dev) < 0) { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 0c38af2ff2097..cf56582d298ad 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -61,6 +61,9 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event); +static void l2cap_retrans_timeout(struct work_struct *work); +static void l2cap_monitor_timeout(struct work_struct *work); +static void l2cap_ack_timeout(struct work_struct *work); static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) { @@ -476,6 +479,9 @@ struct l2cap_chan *l2cap_chan_create(void) write_unlock(&chan_list_lock); INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); + INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); + INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); + INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); chan->state = BT_OPEN; @@ -1980,7 +1986,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) continue; - if (c->psm == psm) { + if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { int src_match, dst_match; int src_any, dst_any; @@ -3316,10 +3322,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan) chan->rx_state = L2CAP_RX_STATE_RECV; chan->tx_state = L2CAP_TX_STATE_XMIT; - INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); - INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); - INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); - skb_queue_head_init(&chan->srej_q); err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); @@ -3758,7 +3760,8 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); - if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { + if (remote_efs && + test_bit(FLAG_EFS_ENABLE, &chan->flags)) { chan->remote_id = efs.id; chan->remote_stype = efs.stype; chan->remote_msdu = le16_to_cpu(efs.msdu); @@ -4303,6 +4306,12 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, } } + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) { + err = -EBADSLT; + goto unlock; + } + err = 0; l2cap_chan_lock(chan); @@ -4332,6 +4341,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, } l2cap_chan_unlock(chan); + l2cap_chan_put(chan); unlock: mutex_unlock(&conn->chan_lock); @@ -4439,7 +4449,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, chan->ident = cmd->ident; l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); - chan->num_conf_rsp++; + if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) + chan->num_conf_rsp++; /* Reset config buffer. */ chan->conf_len = 0; @@ -5799,6 +5810,19 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), scid, mtu, mps); + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A + * page 1059: + * + * Valid range: 0x0001-0x00ff + * + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges + */ + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { + result = L2CAP_CR_LE_BAD_PSM; + chan = NULL; + goto response; + } + /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, LE_LINK); @@ -5979,6 +6003,18 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, psm = req->psm; + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A + * page 1059: + * + * Valid range: 0x0001-0x00ff + * + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges + */ + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { + result = L2CAP_CR_LE_BAD_PSM; + goto response; + } + BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); memset(&pdu, 0, sizeof(pdu)); @@ -6865,6 +6901,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { + struct l2cap_ctrl local_control; int err = 0; bool skb_in_use = false; @@ -6889,15 +6926,32 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan, chan->buffer_seq = chan->expected_tx_seq; skb_in_use = true; + /* l2cap_reassemble_sdu may free skb, hence invalidate + * control, so make a copy in advance to use it after + * l2cap_reassemble_sdu returns and to avoid the race + * condition, for example: + * + * The current thread calls: + * l2cap_reassemble_sdu + * chan->ops->recv == l2cap_sock_recv_cb + * __sock_queue_rcv_skb + * Another thread calls: + * bt_sock_recvmsg + * skb_recv_datagram + * skb_free_datagram + * Then the current thread tries to access control, but + * it was freed by skb_free_datagram. + */ + local_control = *control; err = l2cap_reassemble_sdu(chan, skb, control); if (err) break; - if (control->final) { + if (local_control.final) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); + local_control.final = 0; + l2cap_retransmit_all(chan, &local_control); l2cap_ertm_send(chan); } } @@ -7277,11 +7331,27 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb) { + /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store + * the txseq field in advance to use it after l2cap_reassemble_sdu + * returns and to avoid the race condition, for example: + * + * The current thread calls: + * l2cap_reassemble_sdu + * chan->ops->recv == l2cap_sock_recv_cb + * __sock_queue_rcv_skb + * Another thread calls: + * bt_sock_recvmsg + * skb_recv_datagram + * skb_free_datagram + * Then the current thread tries to access control, but it was freed by + * skb_free_datagram. + */ + u16 txseq = control->txseq; + BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, chan->rx_state); - if (l2cap_classify_txseq(chan, control->txseq) == - L2CAP_TXSEQ_EXPECTED) { + if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { l2cap_pass_to_tx(chan, control); BT_DBG("buffer_seq %d->%d", chan->buffer_seq, @@ -7304,8 +7374,8 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, } } - chan->last_acked_seq = control->txseq; - chan->expected_tx_seq = __next_seq(chan, control->txseq); + chan->last_acked_seq = txseq; + chan->expected_tx_seq = __next_seq(chan, txseq); return 0; } @@ -7561,6 +7631,7 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, return; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); } else { BT_DBG("unknown cid 0x%4.4x", cid); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 7324764384b67..8d6fce9005bdd 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -590,7 +590,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) ret = rfcomm_dlc_send_frag(d, frag); if (ret < 0) { - kfree_skb(frag); + dev_kfree_skb_irq(frag); goto unlock; } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 2983e926fe3cc..7df14a0e380cb 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -231,6 +231,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size, if (user_size > size) return ERR_PTR(-EMSGSIZE); + size = SKB_DATA_ALIGN(size); data = kzalloc(size + headroom + tailroom, GFP_USER); if (!data) return ERR_PTR(-ENOMEM); @@ -441,9 +442,6 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) { struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; - if (!skb->len) - return -EINVAL; - if (!__skb) return 0; diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 10a2c7bca7199..f3c7cfba31e1b 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { + skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); goto bridged_dnat; } @@ -413,6 +414,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ kfree_skb(skb); return 0; } + skb_dst_drop(skb); skb_dst_set_noref(skb, &rt->dst); } @@ -869,6 +871,7 @@ static unsigned int ip_sabotage_in(void *priv, if (nf_bridge && !nf_bridge->in_prerouting && !netif_is_l3_master(skb->dev) && !netif_is_l3_slave(skb->dev)) { + nf_bridge_info_free(skb); state->okfn(state->net, state->sk, skb); return NF_STOLEN; } diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index e4e0c836c3f51..6b07f30675bb0 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c @@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc kfree_skb(skb); return 0; } + skb_dst_drop(skb); skb_dst_set_noref(skb, &rt->dst); } diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 852f4b54e8811..1dc5db07650c9 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -855,26 +855,37 @@ EXPORT_SYMBOL_GPL(br_vlan_get_proto); int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) { + struct switchdev_attr attr = { + .orig_dev = br->dev, + .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL, + .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP, + .u.vlan_protocol = ntohs(proto), + }; int err = 0; struct net_bridge_port *p; struct net_bridge_vlan *vlan; struct net_bridge_vlan_group *vg; - __be16 oldproto; + __be16 oldproto = br->vlan_proto; if (br->vlan_proto == proto) return 0; + err = switchdev_port_attr_set(br->dev, &attr); + if (err && err != -EOPNOTSUPP) + return err; + /* Add VLANs for the new proto to the device filter. */ list_for_each_entry(p, &br->port_list, list) { vg = nbp_vlan_group(p); list_for_each_entry(vlan, &vg->vlan_list, vlist) { + if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) + continue; err = vlan_vid_add(p->dev, proto, vlan->vid); if (err) goto err_filt; } } - oldproto = br->vlan_proto; br->vlan_proto = proto; recalculate_group_addr(br); @@ -883,20 +894,32 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) /* Delete VLANs for the old proto from the device filter. */ list_for_each_entry(p, &br->port_list, list) { vg = nbp_vlan_group(p); - list_for_each_entry(vlan, &vg->vlan_list, vlist) + list_for_each_entry(vlan, &vg->vlan_list, vlist) { + if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) + continue; vlan_vid_del(p->dev, oldproto, vlan->vid); + } } return 0; err_filt: - list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) + attr.u.vlan_protocol = ntohs(oldproto); + switchdev_port_attr_set(br->dev, &attr); + + list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) { + if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) + continue; vlan_vid_del(p->dev, proto, vlan->vid); + } list_for_each_entry_continue_reverse(p, &br->port_list, list) { vg = nbp_vlan_group(p); - list_for_each_entry(vlan, &vg->vlan_list, vlist) + list_for_each_entry(vlan, &vg->vlan_list, vlist) { + if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) + continue; vlan_vid_del(p->dev, proto, vlan->vid); + } } return err; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 310740cc684ad..06b80b5843819 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -999,8 +999,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, goto free_iterate; } - if (repl->valid_hooks != t->valid_hooks) + if (repl->valid_hooks != t->valid_hooks) { + ret = -EINVAL; goto free_unlock; + } if (repl->num_counters && repl->num_counters != t->private->nentries) { ret = -EINVAL; diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 2809cbd6b7f74..d8cb4b2a076b4 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -269,11 +269,15 @@ int cfctrl_linkup_request(struct cflayer *layer, default: pr_warn("Request setup of bad link type = %d\n", param->linktype); + cfpkt_destroy(pkt); return -EINVAL; } req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) + if (!req) { + cfpkt_destroy(pkt); return -ENOMEM; + } + req->client_layer = user_layer; req->cmd = CFCTRL_CMD_LINK_SETUP; req->param = *param; diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 42dc080a4dbbc..806fb4d84fd3e 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -315,9 +315,6 @@ static int chnl_net_open(struct net_device *dev) if (result == 0) { pr_debug("connect timeout\n"); - caif_disconnect_client(dev_net(dev), &priv->chnl); - priv->state = CAIF_DISCONNECTED; - pr_debug("state disconnected\n"); result = -ETIMEDOUT; goto error; } diff --git a/net/can/af_can.c b/net/can/af_can.c index 1c95ede2c9a6e..79f24c6f43c8c 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -451,7 +451,7 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, /* insert new receiver (dev,canid,mask) -> (func,data) */ - if (dev && dev->type != ARPHRD_CAN) + if (dev && (dev->type != ARPHRD_CAN || !can_get_ml_priv(dev))) return -ENODEV; if (dev && !net_eq(net, dev_net(dev))) @@ -680,7 +680,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) { + if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CAN_MTU)) { pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n", dev->type, skb->len); goto free_skb; @@ -706,7 +706,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) { + if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CANFD_MTU)) { pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n", dev->type, skb->len); goto free_skb; diff --git a/net/can/bcm.c b/net/can/bcm.c index e918a0f3cda28..afa82adaf6cd5 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -274,6 +274,7 @@ static void bcm_can_tx(struct bcm_op *op) struct sk_buff *skb; struct net_device *dev; struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; + int err; /* no target device? => exit */ if (!op->ifindex) @@ -298,11 +299,11 @@ static void bcm_can_tx(struct bcm_op *op) /* send with loopback */ skb->dev = dev; can_skb_set_owner(skb, op->sk); - can_send(skb, 1); + err = can_send(skb, 1); + if (!err) + op->frames_abs++; - /* update statistics */ op->currframe++; - op->frames_abs++; /* reached last frame? */ if (op->currframe >= op->nframes) diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c index f33c473279278..ca4ad6cdd5cbf 100644 --- a/net/can/j1939/address-claim.c +++ b/net/can/j1939/address-claim.c @@ -165,6 +165,46 @@ static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb) * leaving this function. */ ecu = j1939_ecu_get_by_name_locked(priv, name); + + if (ecu && ecu->addr == skcb->addr.sa) { + /* The ISO 11783-5 standard, in "4.5.2 - Address claim + * requirements", states: + * d) No CF shall begin, or resume, transmission on the + * network until 250 ms after it has successfully claimed + * an address except when responding to a request for + * address-claimed. + * + * But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim + * prioritization" show that the CF begins the transmission + * after 250 ms from the first AC (address-claimed) message + * even if it sends another AC message during that time window + * to resolve the address contention with another CF. + * + * As stated in "4.4.2.3 - Address-claimed message": + * In order to successfully claim an address, the CF sending + * an address claimed message shall not receive a contending + * claim from another CF for at least 250 ms. + * + * As stated in "4.4.3.2 - NAME management (NM) message": + * 1) A commanding CF can + * d) request that a CF with a specified NAME transmit + * the address-claimed message with its current NAME. + * 2) A target CF shall + * d) send an address-claimed message in response to a + * request for a matching NAME + * + * Taking the above arguments into account, the 250 ms wait is + * requested only during network initialization. + * + * Do not restart the timer on AC message if both the NAME and + * the address match and so if the address has already been + * claimed (timer has expired) or the AC message has been sent + * to resolve the contention with another CF (timer is still + * running). + */ + goto out_ecu_put; + } + if (!ecu && j1939_address_is_unicast(skcb->addr.sa)) ecu = j1939_ecu_create_locked(priv, name); diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c index ca75d1b8f415c..9da8fbc81c04a 100644 --- a/net/can/j1939/main.c +++ b/net/can/j1939/main.c @@ -332,6 +332,9 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb) /* re-claim the CAN_HDR from the SKB */ cf = skb_push(skb, J1939_CAN_HDR); + /* initialize header structure */ + memset(cf, 0, J1939_CAN_HDR); + /* make it a full can frame again */ skb_put(skb, J1939_CAN_FTR + (8 - dlc)); diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 2830a12a4dd1b..57d6aac7f4353 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -338,10 +338,12 @@ static void j1939_session_skb_drop_old(struct j1939_session *session) __skb_unlink(do_skb, &session->skb_queue); /* drop ref taken in j1939_session_skb_queue() */ skb_unref(do_skb); + spin_unlock_irqrestore(&session->skb_queue.lock, flags); kfree_skb(do_skb); + } else { + spin_unlock_irqrestore(&session->skb_queue.lock, flags); } - spin_unlock_irqrestore(&session->skb_queue.lock, flags); } void j1939_session_skb_queue(struct j1939_session *session, @@ -1085,10 +1087,6 @@ static bool j1939_session_deactivate(struct j1939_session *session) bool active; j1939_session_list_lock(priv); - /* This function should be called with a session ref-count of at - * least 2. - */ - WARN_ON_ONCE(kref_read(&session->kref) < 2); active = j1939_session_deactivate_locked(session); j1939_session_list_unlock(priv); diff --git a/net/core/dev.c b/net/core/dev.c index 34b5aab42b912..b7646d4e079b4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3631,7 +3631,7 @@ static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, int skb_csum_hwoffload_help(struct sk_buff *skb, const netdev_features_t features) { - if (unlikely(skb->csum_not_inet)) + if (unlikely(skb_csum_is_sctp(skb))) return !!(features & NETIF_F_SCTP_CRC) ? 0 : skb_crc32c_csum_help(skb); @@ -10320,24 +10320,16 @@ void netdev_run_todo(void) void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, const struct net_device_stats *netdev_stats) { -#if BITS_PER_LONG == 64 - BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); - memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); - /* zero out counters that only exist in rtnl_link_stats64 */ - memset((char *)stats64 + sizeof(*netdev_stats), 0, - sizeof(*stats64) - sizeof(*netdev_stats)); -#else - size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); - const unsigned long *src = (const unsigned long *)netdev_stats; + size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); + const atomic_long_t *src = (atomic_long_t *)netdev_stats; u64 *dst = (u64 *)stats64; BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); for (i = 0; i < n; i++) - dst[i] = src[i]; + dst[i] = (unsigned long)atomic_long_read(&src[i]); /* zero out counters that only exist in rtnl_link_stats64 */ memset((char *)stats64 + n * sizeof(u64), 0, sizeof(*stats64) - n * sizeof(u64)); -#endif } EXPORT_SYMBOL(netdev_stats_to_stats64); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 54fb18b4f55e4..993420da29307 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include @@ -25,26 +26,6 @@ static int dev_ifname(struct net *net, struct ifreq *ifr) return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex); } -static gifconf_func_t *gifconf_list[NPROTO]; - -/** - * register_gifconf - register a SIOCGIF handler - * @family: Address family - * @gifconf: Function handler - * - * Register protocol dependent address dumping routines. The handler - * that is passed must not be freed or reused until it has been replaced - * by another handler. - */ -int register_gifconf(unsigned int family, gifconf_func_t *gifconf) -{ - if (family >= NPROTO) - return -EINVAL; - gifconf_list[family] = gifconf; - return 0; -} -EXPORT_SYMBOL(register_gifconf); - /* * Perform a SIOCGIFCONF call. This structure will change * size eventually, and there is nothing I can do about it. @@ -57,7 +38,6 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size) char __user *pos; int len; int total; - int i; /* * Fetch the caller's info block. @@ -72,19 +52,15 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size) total = 0; for_each_netdev(net, dev) { - for (i = 0; i < NPROTO; i++) { - if (gifconf_list[i]) { - int done; - if (!pos) - done = gifconf_list[i](dev, NULL, 0, size); - else - done = gifconf_list[i](dev, pos + total, - len - total, size); - if (done < 0) - return -EFAULT; - total += done; - } - } + int done; + if (!pos) + done = inet_gifconf(dev, NULL, 0, size); + else + done = inet_gifconf(dev, pos + total, + len - total, size); + if (done < 0) + return -EFAULT; + total += done; } /* diff --git a/net/core/filter.c b/net/core/filter.c index 4c22e6d1da746..a5df0cf46bbf8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2125,8 +2125,17 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, { unsigned int mlen = skb_network_offset(skb); + if (unlikely(skb->len <= mlen)) { + kfree_skb(skb); + return -ERANGE; + } + if (mlen) { __skb_pull(skb, mlen); + if (unlikely(!skb->len)) { + kfree_skb(skb); + return -ERANGE; + } /* At ingress, the mac header has already been pulled once. * At egress, skb_pospull_rcsum has to be done in case that @@ -2146,7 +2155,7 @@ static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, u32 flags) { /* Verify that a link layer header is carried */ - if (unlikely(skb->mac_header >= skb->network_header)) { + if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) { kfree_skb(skb); return -ERANGE; } @@ -3192,15 +3201,18 @@ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) { + void *old_data; + /* skb_ensure_writable() is not needed here, as we're * already working on an uncloned skb. */ if (unlikely(!pskb_may_pull(skb, off + len))) return -ENOMEM; - skb_postpull_rcsum(skb, skb->data + off, len); - memmove(skb->data + len, skb->data, off); + old_data = skb->data; __skb_pull(skb, len); + skb_postpull_rcsum(skb, old_data + off, len); + memmove(skb->data, old_data, off); return 0; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index f9baa9b1c77f7..b8d082f557183 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -263,7 +263,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb, key->ct_zone = ct->zone.id; #endif #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - key->ct_mark = ct->mark; + key->ct_mark = READ_ONCE(ct->mark); #endif cl = nf_ct_labels_find(ct); @@ -1485,7 +1485,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow) } EXPORT_SYMBOL(flow_get_u32_dst); -/* Sort the source and destination IP (and the ports if the IP are the same), +/* Sort the source and destination IP and the ports, * to have consistent hash within the two directions */ static inline void __flow_hash_consistentify(struct flow_keys *keys) @@ -1494,13 +1494,12 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) switch (keys->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - addr_diff = (__force u32)keys->addrs.v4addrs.dst - - (__force u32)keys->addrs.v4addrs.src; - if ((addr_diff < 0) || - (addr_diff == 0 && - ((__force u16)keys->ports.dst < - (__force u16)keys->ports.src))) { + if ((__force u32)keys->addrs.v4addrs.dst < + (__force u32)keys->addrs.v4addrs.src) swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); + + if ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src) { swap(keys->ports.src, keys->ports.dst); } break; @@ -1508,13 +1507,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) addr_diff = memcmp(&keys->addrs.v6addrs.dst, &keys->addrs.v6addrs.src, sizeof(keys->addrs.v6addrs.dst)); - if ((addr_diff < 0) || - (addr_diff == 0 && - ((__force u16)keys->ports.dst < - (__force u16)keys->ports.src))) { + if (addr_diff < 0) { for (i = 0; i < 4; i++) swap(keys->addrs.v6addrs.src.s6_addr32[i], keys->addrs.v6addrs.dst.s6_addr32[i]); + } + if ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src) { swap(keys->ports.src, keys->ports.dst); } break; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 434c5aab83ea2..f6f580e9d2820 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -373,7 +373,7 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, write_lock_bh(&tbl->lock); neigh_flush_dev(tbl, dev, skip_perm); pneigh_ifdown_and_unlock(tbl, dev); - pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev)); + pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL); if (skb_queue_empty_lockless(&tbl->proxy_queue)) del_timer_sync(&tbl->proxy_timer); return 0; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index cbff7d94b993e..e05dd4f3279a8 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -135,6 +135,7 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data) static int ops_init(const struct pernet_operations *ops, struct net *net) { + struct net_generic *ng; int err = -ENOMEM; void *data = NULL; @@ -153,6 +154,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net) if (!err) return 0; + if (ops->id && ops->size) { + ng = rcu_dereference_protected(net->gen, + lockdep_is_held(&pernet_ops_rwsem)); + ng->ptr[*ops->id] = NULL; + } + cleanup: kfree(data); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 635cabcf8794f..668a9d0fbbc6e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2115,6 +2115,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) insp = list; } else { /* Eaten partially. */ + if (skb_is_gso(skb) && !list->head_frag && + skb_headlen(list)) + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; if (skb_shared(list)) { /* Sucks! We need to fork list. :-( */ @@ -3685,7 +3688,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, skb_shinfo(skb)->frag_list = NULL; - do { + while (list_skb) { nskb = list_skb; list_skb = list_skb->next; @@ -3729,8 +3732,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, if (skb_needs_linearize(nskb, features) && __skb_linearize(nskb)) goto err_linearize; - - } while (list_skb); + } skb->truesize = skb->truesize - delta_truesize; skb->data_len = skb->data_len - delta_len; @@ -3809,23 +3811,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, int i = 0; int pos; - if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && - (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { - /* gso_size is untrusted, and we have a frag_list with a linear - * non head_frag head. - * - * (we assume checking the first list_skb member suffices; - * i.e if either of the list_skb members have non head_frag - * head, then the first one has too). - * - * If head_skb's headlen does not fit requested gso_size, it - * means that the frag_list members do NOT terminate on exact - * gso_size boundaries. Hence we cannot perform skb_frag_t page - * sharing. Therefore we must fallback to copying the frag_list - * skbs; we do so by disabling SG. - */ - if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) - features &= ~NETIF_F_SG; + if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && + mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { + struct sk_buff *check_skb; + + for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { + if (skb_headlen(check_skb) && !check_skb->head_frag) { + /* gso_size is untrusted, and we have a frag_list with + * a linear non head_frag item. + * + * If head_skb's headlen does not fit requested gso_size, + * it means that the frag_list members do NOT terminate + * on exact gso_size boundaries. Hence we cannot perform + * skb_frag_t page sharing. Therefore we must fallback to + * copying the frag_list skbs; we do so by disabling SG. + */ + features &= ~NETIF_F_SG; + break; + } + } } __skb_push(head_skb, doffset); @@ -3986,9 +3990,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, SKB_GSO_CB(nskb)->csum_start = skb_headroom(nskb) + doffset; } else { - skb_copy_bits(head_skb, offset, - skb_put(nskb, len), - len); + if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) + goto err; } continue; } @@ -4257,9 +4260,6 @@ static const u8 skb_ext_type_len[] = { #if IS_ENABLED(CONFIG_MPTCP) [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), #endif -#if IS_ENABLED(CONFIG_KCOV) - [SKB_EXT_KCOV_HANDLE] = SKB_EXT_CHUNKSIZEOF(u64), -#endif }; static __always_inline unsigned int skb_ext_total_length(void) @@ -4276,9 +4276,6 @@ static __always_inline unsigned int skb_ext_total_length(void) #endif #if IS_ENABLED(CONFIG_MPTCP) skb_ext_type_len[SKB_EXT_MPTCP] + -#endif -#if IS_ENABLED(CONFIG_KCOV) - skb_ext_type_len[SKB_EXT_KCOV_HANDLE] + #endif 0; } diff --git a/net/core/sock_map.c b/net/core/sock_map.c index cbf4184fabc98..ee5d3f49b0b5b 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -358,11 +358,13 @@ static void sock_map_free(struct bpf_map *map) sk = xchg(psk, NULL); if (sk) { + sock_hold(sk); lock_sock(sk); rcu_read_lock(); sock_map_unref(sk, psk); rcu_read_unlock(); release_sock(sk); + sock_put(sk); } } diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index b065f0a103ed0..49f9c2c4ffd5a 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -18,6 +18,22 @@ DEFINE_SPINLOCK(reuseport_lock); static DEFINE_IDA(reuseport_ida); +void reuseport_has_conns_set(struct sock *sk) +{ + struct sock_reuseport *reuse; + + if (!rcu_access_pointer(sk->sk_reuseport_cb)) + return; + + spin_lock_bh(&reuseport_lock); + reuse = rcu_dereference_protected(sk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + if (likely(reuse)) + reuse->has_conns = 1; + spin_unlock_bh(&reuseport_lock); +} +EXPORT_SYMBOL(reuseport_has_conns_set); + static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) { unsigned int size = sizeof(struct sock_reuseport) + diff --git a/net/core/stream.c b/net/core/stream.c index a166a32b411fa..d7c5413d16d57 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -159,7 +159,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) *timeo_p = current_timeo; } out: - remove_wait_queue(sk_sleep(sk), &wait); + if (!sock_flag(sk, SOCK_DEAD)) + remove_wait_queue(sk_sleep(sk), &wait); return err; do_error: @@ -195,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk) /* First the read buffer. */ __skb_queue_purge(&sk->sk_receive_queue); + /* Next, the error queue. + * We need to use queue lock, because other threads might + * add packets to the queue without socket lock being held. + */ + skb_queue_purge(&sk->sk_error_queue); + /* Next, the write queue. */ WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 2455b0c0e4866..a2a8b952b3c55 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -130,6 +130,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * This unhashes the socket and releases the local port, if necessary. */ dccp_set_state(sk, DCCP_CLOSED); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 2be5c69824f94..c563f9b325d05 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -541,11 +541,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL); /* Clone pktoptions received with SYN, if we own the req */ if (*own_req && ireq->pktopts) { - newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC); + newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk); consume_skb(ireq->pktopts); ireq->pktopts = NULL; - if (newnp->pktoptions) - skb_set_owner_r(newnp->pktoptions, newsk); } return newsk; @@ -605,7 +603,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) --ANK (980728) */ if (np->rxopt.all) - opt_skb = skb_clone(skb, GFP_ATOMIC); + opt_skb = skb_clone_and_charge_r(skb, sk); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) @@ -669,7 +667,6 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &DCCP_SKB_CB(opt_skb)->header.h6)) { - skb_set_owner_r(opt_skb, sk); memmove(IP6CB(opt_skb), &DCCP_SKB_CB(opt_skb)->header.h6, sizeof(struct inet6_skb_parm)); @@ -957,6 +954,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, late_failure: dccp_set_state(sk, DCCP_CLOSED); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); __sk_dst_reset(sk); failure: inet->inet_dport = 0; diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 4820dbcedfa2d..230ddf45dff0d 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -22,7 +22,8 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb, if (!skb->dev) return NULL; - pskb_trim_rcsum(skb, skb->len - len); + if (pskb_trim_rcsum(skb, skb->len - len)) + return NULL; skb->offload_fwd_mark = true; diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 80d2a00d30977..12bf740e2fb31 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1966,7 +1966,8 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) } else { /* Driver expects to be called at twice the frequency in rc */ int n = rc * 2, interval = HZ / n; - u64 count = n * id.data, i = 0; + u64 count = mul_u32_u32(n, id.data); + u64 i = 0; do { rtnl_lock(); @@ -2051,7 +2052,8 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) return n_stats; if (n_stats > S32_MAX / sizeof(u64)) return -ENOMEM; - WARN_ON_ONCE(!n_stats); + if (WARN_ON_ONCE(!n_stats)) + return -EOPNOTSUPP; if (copy_from_user(&stats, useraddr, sizeof(stats))) return -EFAULT; diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index fec1b014c0a26..84e6ef4f35252 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -219,7 +219,9 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb->dev = master->dev; skb_reset_mac_header(skb); skb_reset_mac_len(skb); + spin_lock_bh(&hsr->seqnr_lock); hsr_forward_skb(skb, master); + spin_unlock_bh(&hsr->seqnr_lock); } else { atomic_long_inc(&dev->tx_dropped); dev_kfree_skb_any(skb); @@ -232,7 +234,7 @@ static const struct header_ops hsr_header_ops = { .parse = eth_header_parse, }; -static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto) +static struct sk_buff *hsr_init_skb(struct hsr_port *master) { struct hsr_priv *hsr = master->hsr; struct sk_buff *skb; @@ -244,8 +246,7 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto) * being, for PRP it is a trailer and for HSR it is a * header */ - skb = dev_alloc_skb(sizeof(struct hsr_tag) + - sizeof(struct hsr_sup_tag) + + skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) + sizeof(struct hsr_sup_payload) + hlen + tlen); if (!skb) @@ -253,10 +254,9 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto) skb_reserve(skb, hlen); skb->dev = master->dev; - skb->protocol = htons(proto); skb->priority = TC_PRIO_CONTROL; - if (dev_hard_header(skb, skb->dev, proto, + if (dev_hard_header(skb, skb->dev, ETH_P_PRP, hsr->sup_multicast_addr, skb->dev->dev_addr, skb->len) <= 0) goto out; @@ -278,12 +278,9 @@ static void send_hsr_supervision_frame(struct hsr_port *master, { struct hsr_priv *hsr = master->hsr; __u8 type = HSR_TLV_LIFE_CHECK; - struct hsr_tag *hsr_tag = NULL; struct hsr_sup_payload *hsr_sp; struct hsr_sup_tag *hsr_stag; - unsigned long irqflags; struct sk_buff *skb; - u16 proto; *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); if (hsr->announce_count < 3 && hsr->prot_version == 0) { @@ -292,39 +289,25 @@ static void send_hsr_supervision_frame(struct hsr_port *master, hsr->announce_count++; } - if (!hsr->prot_version) - proto = ETH_P_PRP; - else - proto = ETH_P_HSR; - - skb = hsr_init_skb(master, proto); + skb = hsr_init_skb(master); if (!skb) { WARN_ONCE(1, "HSR: Could not send supervision frame\n"); return; } - if (hsr->prot_version > 0) { - hsr_tag = skb_put(skb, sizeof(struct hsr_tag)); - hsr_tag->encap_proto = htons(ETH_P_PRP); - set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE); - } - hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag)); set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf)); set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version); /* From HSRv1 on we have separate supervision sequence numbers. */ - spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); + spin_lock_bh(&hsr->seqnr_lock); if (hsr->prot_version > 0) { hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); hsr->sup_sequence_nr++; - hsr_tag->sequence_nr = htons(hsr->sequence_nr); - hsr->sequence_nr++; } else { hsr_stag->sequence_nr = htons(hsr->sequence_nr); hsr->sequence_nr++; } - spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); hsr_stag->HSR_TLV_type = type; /* TODO: Why 12 in HSRv0? */ @@ -335,11 +318,13 @@ static void send_hsr_supervision_frame(struct hsr_port *master, hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr); - if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) + if (skb_put_padto(skb, ETH_ZLEN)) { + spin_unlock_bh(&hsr->seqnr_lock); return; + } hsr_forward_skb(skb, master); - + spin_unlock_bh(&hsr->seqnr_lock); return; } @@ -349,12 +334,9 @@ static void send_prp_supervision_frame(struct hsr_port *master, struct hsr_priv *hsr = master->hsr; struct hsr_sup_payload *hsr_sp; struct hsr_sup_tag *hsr_stag; - unsigned long irqflags; struct sk_buff *skb; - struct prp_rct *rct; - u8 *tail; - skb = hsr_init_skb(master, ETH_P_PRP); + skb = hsr_init_skb(master); if (!skb) { WARN_ONCE(1, "PRP: Could not send supervision frame\n"); return; @@ -366,7 +348,7 @@ static void send_prp_supervision_frame(struct hsr_port *master, set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0)); /* From HSRv1 on we have separate supervision sequence numbers. */ - spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); + spin_lock_bh(&hsr->seqnr_lock); hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); hsr->sup_sequence_nr++; hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD; @@ -376,20 +358,13 @@ static void send_prp_supervision_frame(struct hsr_port *master, hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr); - if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) { - spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); + if (skb_put_padto(skb, ETH_ZLEN)) { + spin_unlock_bh(&hsr->seqnr_lock); return; } - tail = skb_tail_pointer(skb) - HSR_HLEN; - rct = (struct prp_rct *)tail; - rct->PRP_suffix = htons(ETH_P_PRP); - set_prp_LSDU_size(rct, HSR_V1_SUP_LSDUSIZE); - rct->sequence_nr = htons(hsr->sequence_nr); - hsr->sequence_nr++; - spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); - hsr_forward_skb(skb, master); + spin_unlock_bh(&hsr->seqnr_lock); } /* Announce (supervision frame) timer function @@ -468,7 +443,7 @@ void hsr_dev_setup(struct net_device *dev) dev->header_ops = &hsr_header_ops; dev->netdev_ops = &hsr_device_ops; SET_NETDEV_DEVTYPE(dev, &hsr_type); - dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; dev->needs_free_netdev = true; diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index baf4765be6d78..aec48e670fb69 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -108,15 +108,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, struct hsr_port *port) { if (!frame->skb_std) { - if (frame->skb_hsr) { + if (frame->skb_hsr) frame->skb_std = create_stripped_skb_hsr(frame->skb_hsr, frame); - } else { - /* Unexpected */ - WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", - __FILE__, __LINE__, port->dev->name); + else + netdev_warn_once(port->dev, + "Unexpected frame received in hsr_get_untagged_frame()\n"); + + if (!frame->skb_std) return NULL; - } } return skb_clone(frame->skb_std, GFP_ATOMIC); @@ -186,6 +186,7 @@ static struct sk_buff *prp_fill_rct(struct sk_buff *skb, set_prp_LSDU_size(trailer, lsdu_size); trailer->sequence_nr = htons(frame->sequence_nr); trailer->PRP_suffix = htons(ETH_P_PRP); + skb->protocol = eth_hdr(skb)->h_proto; return skb; } @@ -226,6 +227,7 @@ static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; hsr_ethhdr->ethhdr.h_proto = htons(proto_version ? ETH_P_HSR : ETH_P_PRP); + skb->protocol = hsr_ethhdr->ethhdr.h_proto; return skb; } @@ -303,17 +305,18 @@ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, struct hsr_node *node_src) { bool was_multicast_frame; - int res; + int res, recv_len; was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST); hsr_addr_subst_source(node_src, skb); skb_pull(skb, ETH_HLEN); + recv_len = skb->len; res = netif_rx(skb); if (res == NET_RX_DROP) { dev->stats.rx_dropped++; } else { dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += recv_len; if (was_multicast_frame) dev->stats.multicast++; } @@ -434,7 +437,6 @@ static void handle_std_frame(struct sk_buff *skb, { struct hsr_port *port = frame->port_rcv; struct hsr_priv *hsr = port->hsr; - unsigned long irqflags; frame->skb_hsr = NULL; frame->skb_prp = NULL; @@ -444,17 +446,20 @@ static void handle_std_frame(struct sk_buff *skb, frame->is_from_san = true; } else { /* Sequence nr for the master node */ - spin_lock_irqsave(&hsr->seqnr_lock, irqflags); + lockdep_assert_held(&hsr->seqnr_lock); frame->sequence_nr = hsr->sequence_nr; hsr->sequence_nr++; - spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags); } } int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, struct hsr_frame_info *frame) { - if (proto == htons(ETH_P_PRP) || + struct hsr_port *port = frame->port_rcv; + struct hsr_priv *hsr = port->hsr; + + /* HSRv0 supervisory frames double as a tag so treat them as tagged. */ + if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) || proto == htons(ETH_P_HSR)) { /* Check if skb contains hsr_ethhdr */ if (skb->mac_len < sizeof(struct hsr_ethhdr)) @@ -544,11 +549,13 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) { struct hsr_frame_info frame; + rcu_read_lock(); if (fill_frame_info(&frame, skb, port) < 0) goto out_drop; hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); hsr_forward_do(&frame); + rcu_read_unlock(); /* Gets called for ingress frames as well as egress from master port. * So check and increment stats for master port only here. */ @@ -563,6 +570,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) return; out_drop: + rcu_read_unlock(); port->dev->stats.tx_dropped++; kfree_skb(skb); } diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 805f974923b92..20cb6b7dbc694 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -159,6 +159,7 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, return NULL; ether_addr_copy(new_node->macaddress_A, addr); + spin_lock_init(&new_node->seq_out_lock); /* We are only interested in time diffs here, so use current jiffies * as initialization. (0 could trigger an spurious ring error warning). @@ -311,6 +312,7 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) goto done; ether_addr_copy(node_real->macaddress_B, ethhdr->h_source); + spin_lock_bh(&node_real->seq_out_lock); for (i = 0; i < HSR_PT_PORTS; i++) { if (!node_curr->time_in_stale[i] && time_after(node_curr->time_in[i], node_real->time_in[i])) { @@ -321,6 +323,7 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i])) node_real->seq_out[i] = node_curr->seq_out[i]; } + spin_unlock_bh(&node_real->seq_out_lock); node_real->addr_B_port = port_rcv->type; spin_lock_bh(&hsr->list_lock); @@ -413,13 +416,17 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, u16 sequence_nr) { + spin_lock_bh(&node->seq_out_lock); if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) && time_is_after_jiffies(node->time_out[port->type] + - msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) + msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) { + spin_unlock_bh(&node->seq_out_lock); return 1; + } node->time_out[port->type] = jiffies; node->seq_out[port->type] = sequence_nr; + spin_unlock_bh(&node->seq_out_lock); return 0; } diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index d9628e7a5f051..5a771cb3f0325 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -69,6 +69,8 @@ void prp_update_san_info(struct hsr_node *node, bool is_sup); struct hsr_node { struct list_head mac_list; + /* Protect R/W access to seq_out */ + spinlock_t seq_out_lock; unsigned char macaddress_A[ETH_ALEN]; unsigned char macaddress_B[ETH_ALEN]; /* Local slave through which AddrB frames are received from this node */ diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index c25f7617770c8..d4c275e56d825 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -201,8 +201,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len) int err = 0; struct net_device *dev = NULL; - if (len < sizeof(*uaddr)) - return -EINVAL; + err = ieee802154_sockaddr_check_size(uaddr, len); + if (err < 0) + return err; uaddr = (struct sockaddr_ieee802154 *)_uaddr; if (uaddr->family != AF_IEEE802154) @@ -272,6 +273,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) err = -EMSGSIZE; goto out_dev; } + if (!size) { + err = 0; + goto out_dev; + } hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; @@ -494,11 +499,14 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len) ro->bound = 0; - if (len < sizeof(*addr)) + err = ieee802154_sockaddr_check_size(addr, len); + if (err < 0) goto out; - if (addr->family != AF_IEEE802154) + if (addr->family != AF_IEEE802154) { + err = -EINVAL; goto out; + } ieee802154_addr_from_sa(&haddr, &addr->addr); dev = ieee802154_get_dev(sock_net(sk), &haddr); @@ -565,8 +573,9 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr, struct dgram_sock *ro = dgram_sk(sk); int err = 0; - if (len < sizeof(*addr)) - return -EINVAL; + err = ieee802154_sockaddr_check_size(addr, len); + if (err < 0) + return err; if (addr->family != AF_IEEE802154) return -EINVAL; @@ -605,6 +614,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) struct ieee802154_mac_cb *cb; struct dgram_sock *ro = dgram_sk(sk); struct ieee802154_addr dst_addr; + DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name); int hlen, tlen; int err; @@ -613,10 +623,20 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) return -EOPNOTSUPP; } - if (!ro->connected && !msg->msg_name) - return -EDESTADDRREQ; - else if (ro->connected && msg->msg_name) - return -EISCONN; + if (msg->msg_name) { + if (ro->connected) + return -EISCONN; + if (msg->msg_namelen < IEEE802154_MIN_NAMELEN) + return -EINVAL; + err = ieee802154_sockaddr_check_size(daddr, msg->msg_namelen); + if (err < 0) + return err; + ieee802154_addr_from_sa(&dst_addr, &daddr->addr); + } else { + if (!ro->connected) + return -EDESTADDRREQ; + dst_addr = ro->dst_addr; + } if (!ro->bound) dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); @@ -652,16 +672,6 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) cb = mac_cb_init(skb); cb->type = IEEE802154_FC_TYPE_DATA; cb->ackreq = ro->want_ack; - - if (msg->msg_name) { - DECLARE_SOCKADDR(struct sockaddr_ieee802154*, - daddr, msg->msg_name); - - ieee802154_addr_from_sa(&dst_addr, &daddr->addr); - } else { - dst_addr = ro->dst_addr; - } - cb->secen = ro->secen; cb->secen_override = ro->secen_override; cb->seclevel = ro->seclevel; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 87983e70f03f3..23b06063e1a51 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -403,6 +403,16 @@ config INET_IPCOMP If unsure, say Y. +config INET_TABLE_PERTURB_ORDER + int "INET: Source port perturbation table size (as power of 2)" if EXPERT + default 16 + help + Source port perturbation table size (as power of 2) for + RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm. + + The default is almost always what you want. + Only change this if you know what you are doing. + config INET_XFRM_TUNNEL tristate select INET_TUNNEL diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index a733ce1a3f8f4..8dab0d311aba3 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -158,7 +158,7 @@ void inet_sock_destruct(struct sock *sk) kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); - dst_release(sk->sk_rx_dst); + dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1)); sk_refcnt_debug_dec(sk); } EXPORT_SYMBOL(inet_sock_destruct); @@ -1017,7 +1017,6 @@ static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon const struct proto_ops inet_stream_ops = { .family = PF_INET, - .flags = PROTO_CMSG_DATA_ONLY, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, @@ -1726,12 +1725,7 @@ static const struct net_protocol igmp_protocol = { }; #endif -/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct net_protocol tcp_protocol = { - .early_demux = tcp_v4_early_demux, - .early_demux_handler = tcp_v4_early_demux, +static const struct net_protocol tcp_protocol = { .handler = tcp_v4_rcv, .err_handler = tcp_v4_err, .no_policy = 1, @@ -1739,12 +1733,7 @@ static struct net_protocol tcp_protocol = { .icmp_strict_tag_validation = 1, }; -/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct net_protocol udp_protocol = { - .early_demux = udp_v4_early_demux, - .early_demux_handler = udp_v4_early_demux, +static const struct net_protocol udp_protocol = { .handler = udp_rcv, .err_handler = udp_err, .no_policy = 1, diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 4a8550c49202d..112c6e892d305 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -70,7 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len } inet->inet_daddr = fl4->daddr; inet->inet_dport = usin->sin_port; - reuseport_has_conns(sk, true); + reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); inet->inet_id = prandom_u32(); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 8f17538755507..88b6120878cd9 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1244,7 +1244,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) return ret; } -static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) { struct in_device *in_dev = __in_dev_get_rtnl(dev); const struct in_ifaddr *ifa; @@ -2766,8 +2766,6 @@ void __init devinet_init(void) INIT_HLIST_HEAD(&inet_addr_lst[i]); register_pernet_subsys(&devinet_ops); - - register_gifconf(PF_INET, inet_gifconf); register_netdevice_notifier(&ip_netdev_notifier); queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0); diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 3450c9ba2728c..84257678160a3 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -312,6 +312,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ xo->seq.low += skb_shinfo(skb)->gso_segs; } + if (xo->seq.low < seq) + xo->seq.hi++; + esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); ip_hdr(skb)->tot_len = htons(skb->len); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index af8a4255cf1ba..5f786ef662ead 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -830,6 +830,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, return -EINVAL; } + if (!cfg->fc_table) + cfg->fc_table = RT_TABLE_MAIN; + return 0; errout: return err; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 3824b7abecf7e..4e94796ccdbd1 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -423,6 +424,7 @@ static struct fib_info *fib_find_info(struct fib_info *nfi) nfi->fib_prefsrc == fi->fib_prefsrc && nfi->fib_priority == fi->fib_priority && nfi->fib_type == fi->fib_type && + nfi->fib_tb_id == fi->fib_tb_id && memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(u32) * RTAX_MAX) == 0 && !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) && @@ -887,13 +889,15 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, return 1; } + if (fi->nh) { + if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_mp) + return 1; + return 0; + } + if (cfg->fc_oif || cfg->fc_gw_family) { struct fib_nh *nh; - /* cannot match on nexthop object attributes */ - if (fi->nh) - return 1; - nh = fib_info_nh(fi, 0); if (cfg->fc_encap) { if (fib_encap_match(net, cfg->fc_encap_type, @@ -1018,6 +1022,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) if (type > RTAX_MAX) return false; + type = array_index_nospec(type, RTAX_MAX + 1); if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; bool ecn_ca = false; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index a28f525e2c474..d11fb16234a6a 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1331,8 +1331,10 @@ int fib_table_insert(struct net *net, struct fib_table *tb, /* The alias was already inserted, so the node must exist. */ l = l ? l : fib_find_node(t, &tp, key); - if (WARN_ON_ONCE(!l)) + if (WARN_ON_ONCE(!l)) { + err = -ENOENT; goto out_free_new_fa; + } if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) == new_fa) { diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 4d97133240036..9ed59147ef66b 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -147,10 +147,14 @@ static int inet_csk_bind_conflict(const struct sock *sk, */ sk_for_each_bound(sk2, &tb->owners) { - if (sk != sk2 && - (!sk->sk_bound_dev_if || - !sk2->sk_bound_dev_if || - sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { + int bound_dev_if2; + + if (sk == sk2) + continue; + bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); + if ((!sk->sk_bound_dev_if || + !bound_dev_if2 || + sk->sk_bound_dev_if == bound_dev_if2)) { if (reuse && sk2->sk_reuse && sk2->sk_state != TCP_LISTEN) { if ((!relax || @@ -912,11 +916,25 @@ void inet_csk_prepare_forced_close(struct sock *sk) } EXPORT_SYMBOL(inet_csk_prepare_forced_close); +static int inet_ulp_can_listen(const struct sock *sk) +{ + const struct inet_connection_sock *icsk = inet_csk(sk); + + if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone) + return -EINVAL; + + return 0; +} + int inet_csk_listen_start(struct sock *sk, int backlog) { struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); - int err = -EADDRINUSE; + int err; + + err = inet_ulp_can_listen(sk); + if (unlikely(err)) + return err; reqsk_queue_alloc(&icsk->icsk_accept_queue); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index feb7f072f2b26..2615b72118d1f 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -571,8 +571,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) spin_lock(lock); if (osk) { WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); - ret = sk_nulls_del_node_init_rcu(osk); - } else if (found_dup_sk) { + ret = sk_hashed(osk); + if (ret) { + /* Before deleting the node, we insert a new one to make + * sure that the look-up-sk process would not miss either + * of them and that at least one node would exist in ehash + * table all the time. Otherwise there's a tiny chance + * that lookup process could find nothing in ehash table. + */ + __sk_nulls_add_node_tail_rcu(sk, list); + sk_nulls_del_node_init_rcu(osk); + } + goto unlock; + } + if (found_dup_sk) { *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); if (*found_dup_sk) ret = false; @@ -581,6 +593,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) if (ret) __sk_nulls_add_node_rcu(sk, list); +unlock: spin_unlock(lock); return ret; @@ -721,13 +734,13 @@ EXPORT_SYMBOL_GPL(inet_unhash); * Note that we use 32bit integers (vs RFC 'short integers') * because 2^16 is not a multiple of num_ephemeral and this * property might be used by clever attacker. + * * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though - * attacks were since demonstrated, thus we use 65536 instead to really - * give more isolation and privacy, at the expense of 256kB of kernel - * memory. + * attacks were since demonstrated, thus we use 65536 by default instead + * to really give more isolation and privacy, at the expense of 256kB + * of kernel memory. */ -#define INET_TABLE_PERTURB_SHIFT 16 -#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT) +#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER) static u32 *table_perturb; int __inet_hash_connect(struct inet_timewait_death_row *death_row, @@ -771,8 +784,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, if (likely(remaining > 1)) remaining &= ~1U; - net_get_random_once(table_perturb, - INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); + get_random_slow_once(table_perturb, + INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c411c87ae865f..a00102d7c7fd4 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -81,10 +81,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw) } EXPORT_SYMBOL_GPL(inet_twsk_put); -static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, - struct hlist_nulls_head *list) +static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw, + struct hlist_nulls_head *list) { - hlist_nulls_add_head_rcu(&tw->tw_node, list); + hlist_nulls_add_tail_rcu(&tw->tw_node, list); } static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, @@ -120,7 +120,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, spin_lock(lock); - inet_twsk_add_node_rcu(tw, &ehead->chain); + inet_twsk_add_node_tail_rcu(tw, &ehead->chain); /* Step 3: Remove SK from hash chain */ if (__sk_nulls_del_node_init_rcu(sk)) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 6ab5c50aa7a87..65ead8a749337 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1493,24 +1493,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) struct ip_tunnel_parm *p = &t->parms; __be16 o_flags = p->o_flags; - if (t->erspan_ver <= 2) { - if (t->erspan_ver != 0 && !t->collect_md) - o_flags |= TUNNEL_KEY; - - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) - goto nla_put_failure; - - if (t->erspan_ver == 1) { - if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) - goto nla_put_failure; - } else if (t->erspan_ver == 2) { - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) - goto nla_put_failure; - if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) - goto nla_put_failure; - } - } - if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || nla_put_be16(skb, IFLA_GRE_IFLAGS, gre_tnl_flags_to_gre_flags(p->i_flags)) || @@ -1551,6 +1533,34 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) return -EMSGSIZE; } +static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct ip_tunnel *t = netdev_priv(dev); + + if (t->erspan_ver <= 2) { + if (t->erspan_ver != 0 && !t->collect_md) + t->parms.o_flags |= TUNNEL_KEY; + + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) + goto nla_put_failure; + + if (t->erspan_ver == 1) { + if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) + goto nla_put_failure; + } else if (t->erspan_ver == 2) { + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) + goto nla_put_failure; + } + } + + return ipgre_fill_info(skb, dev); + +nla_put_failure: + return -EMSGSIZE; +} + static void erspan_setup(struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); @@ -1629,7 +1639,7 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = { .changelink = erspan_changelink, .dellink = ip_tunnel_dellink, .get_size = ipgre_get_size, - .fill_info = ipgre_fill_info, + .fill_info = erspan_fill_info, .get_link_net = ip_tunnel_get_link_net, }; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index b0c244af1e4d5..eccd7897e7aa6 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -309,14 +309,13 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, ip_hdr(hint)->tos == iph->tos; } -INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *)); -INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *)); +int tcp_v4_early_demux(struct sk_buff *skb); +int udp_v4_early_demux(struct sk_buff *skb); static int ip_rcv_finish_core(struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct sk_buff *hint) { const struct iphdr *iph = ip_hdr(skb); - int (*edemux)(struct sk_buff *skb); struct rtable *rt; int err; @@ -327,21 +326,29 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, goto drop_error; } - if (net->ipv4.sysctl_ip_early_demux && + if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && !skb_dst(skb) && !skb->sk && !ip_is_fragment(iph)) { - const struct net_protocol *ipprot; - int protocol = iph->protocol; - - ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { - err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux, - udp_v4_early_demux, skb); - if (unlikely(err)) - goto drop_error; - /* must reload iph, skb->head might have changed */ - iph = ip_hdr(skb); + switch (iph->protocol) { + case IPPROTO_TCP: + if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) { + tcp_v4_early_demux(skb); + + /* must reload iph, skb->head might have changed */ + iph = ip_hdr(skb); + } + break; + case IPPROTO_UDP: + if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) { + err = udp_v4_early_demux(skb); + if (unlikely(err)) + goto drop_error; + + /* must reload iph, skb->head might have changed */ + iph = ip_hdr(skb); + } + break; } } @@ -354,6 +361,11 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, iph->tos, dev); if (unlikely(err)) goto drop_error; + } else { + struct in_device *in_dev = __in_dev_get_rcu(dev); + + if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY)) + IPCB(skb)->flags |= IPSKB_NOPOLICY; } #ifdef CONFIG_IP_ROUTE_CLASSID diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c index 3205d5f7c8c94..4966ac2aaf87d 100644 --- a/net/ipv4/metrics.c +++ b/net/ipv4/metrics.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include +#include #include #include #include @@ -28,6 +29,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, return -EINVAL; } + type = array_index_nospec(type, RTAX_MAX + 1); if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 1088564d4dbcb..77e3b67e8790b 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -424,7 +424,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) switch (ctinfo) { case IP_CT_NEW: - ct->mark = hash; + WRITE_ONCE(ct->mark, hash); break; case IP_CT_RELATED: case IP_CT_RELATED_REPLY: @@ -441,7 +441,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) #ifdef DEBUG nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); #endif - pr_debug("hash=%u ct_hash=%u ", hash, ct->mark); + pr_debug("hash=%u ct_hash=%u ", hash, READ_ONCE(ct->mark)); if (!clusterip_responsible(cipinfo->config, hash)) { pr_debug("not responsible\n"); return NF_DROP; diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index 03df986217b7b..9e6f0f1275e2c 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -83,6 +83,9 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, else oif = NULL; + if (priv->flags & NFTA_FIB_F_IIF) + fl4.flowi4_oif = l3mdev_master_ifindex_rcu(oif); + if (nft_hook(pkt) == NF_INET_PRE_ROUTING && nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { nft_fib_store_result(dest, priv, nft_in(pkt)); diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 2a17dc9413ae9..7a0102a4b1de7 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -1346,7 +1346,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh, if (!err) { nh->nh_flags = fib_nh->fib_nh_flags; fib_info_update_nhc_saddr(net, &fib_nh->nh_common, - fib_nh->fib_nh_scope); + !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1); } else { fib_nh_release(net, fib_nh); } diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 41afc9155f311..542b66783493b 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -290,12 +290,11 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, struct tcp_request_sock *treq; struct request_sock *req; -#ifdef CONFIG_MPTCP if (sk_is_mptcp(sk)) - ops = &mptcp_subflow_request_sock_ops; -#endif + req = mptcp_subflow_reqsk_alloc(ops, sk, false); + else + req = inet_reqsk_alloc(ops, sk, false); - req = inet_reqsk_alloc(ops, sk, false); if (!req) return NULL; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 86f553864f98f..439970e02ac65 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -361,61 +361,6 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, return ret; } -static void proc_configure_early_demux(int enabled, int protocol) -{ - struct net_protocol *ipprot; -#if IS_ENABLED(CONFIG_IPV6) - struct inet6_protocol *ip6prot; -#endif - - rcu_read_lock(); - - ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot) - ipprot->early_demux = enabled ? ipprot->early_demux_handler : - NULL; - -#if IS_ENABLED(CONFIG_IPV6) - ip6prot = rcu_dereference(inet6_protos[protocol]); - if (ip6prot) - ip6prot->early_demux = enabled ? ip6prot->early_demux_handler : - NULL; -#endif - rcu_read_unlock(); -} - -static int proc_tcp_early_demux(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - - ret = proc_dointvec(table, write, buffer, lenp, ppos); - - if (write && !ret) { - int enabled = init_net.ipv4.sysctl_tcp_early_demux; - - proc_configure_early_demux(enabled, IPPROTO_TCP); - } - - return ret; -} - -static int proc_udp_early_demux(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - - ret = proc_dointvec(table, write, buffer, lenp, ppos); - - if (write && !ret) { - int enabled = init_net.ipv4.sysctl_udp_early_demux; - - proc_configure_early_demux(enabled, IPPROTO_UDP); - } - - return ret; -} - static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) @@ -685,14 +630,14 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_udp_early_demux, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_udp_early_demux + .proc_handler = proc_douintvec_minmax, }, { .procname = "tcp_early_demux", .data = &init_net.ipv4.sysctl_tcp_early_demux, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_tcp_early_demux + .proc_handler = proc_douintvec_minmax, }, { .procname = "nexthop_compat_mode", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bfeb05f62b94f..6a0560a735ce4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -432,6 +432,7 @@ void tcp_init_sock(struct sock *sk) /* There's a bubble in the pipe until at least the first ACK. */ tp->app_limited = ~0U; + tp->rate_app_limited = 1; /* See draft-stevens-tcpca-spec-01 for discussion of the * initialization of these values. @@ -2796,6 +2797,8 @@ int tcp_disconnect(struct sock *sk, int flags) tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd = TCP_INIT_CWND; tp->snd_cwnd_cnt = 0; + tp->is_cwnd_limited = 0; + tp->max_packets_out = 0; tp->window_clamp = 0; tp->delivered = 0; tp->delivered_ce = 0; @@ -2814,8 +2817,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); - dst_release(sk->sk_rx_dst); - sk->sk_rx_dst = NULL; + dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); tcp_saved_syn_free(tp); tp->compressed_ack = 0; tp->segs_in = 0; @@ -2836,6 +2838,7 @@ int tcp_disconnect(struct sock *sk, int flags) tp->last_oow_ack_time = 0; /* There's a bubble in the pipe until at least the first ACK. */ tp->app_limited = ~0U; + tp->rate_app_limited = 1; tp->rack.mstamp = 0; tp->rack.advanced = 0; tp->rack.reo_wnd_steps = 1; @@ -3290,7 +3293,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, case TCP_REPAIR_OPTIONS: if (!tp->repair) err = -EINVAL; - else if (sk->sk_state == TCP_ESTABLISHED) + else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) err = tcp_repair_options_est(sk, optval, optlen); else err = -EPERM; @@ -4041,12 +4044,16 @@ static void __tcp_alloc_md5sig_pool(void) * to memory. See smp_rmb() in tcp_get_md5sig_pool() */ smp_wmb(); - tcp_md5sig_pool_populated = true; + /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() + * and tcp_get_md5sig_pool(). + */ + WRITE_ONCE(tcp_md5sig_pool_populated, true); } bool tcp_alloc_md5sig_pool(void) { - if (unlikely(!tcp_md5sig_pool_populated)) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { mutex_lock(&tcp_md5sig_mutex); if (!tcp_md5sig_pool_populated) { @@ -4057,7 +4064,8 @@ bool tcp_alloc_md5sig_pool(void) mutex_unlock(&tcp_md5sig_mutex); } - return tcp_md5sig_pool_populated; + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + return READ_ONCE(tcp_md5sig_pool_populated); } EXPORT_SYMBOL(tcp_alloc_md5sig_pool); @@ -4073,7 +4081,8 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { local_bh_disable(); - if (tcp_md5sig_pool_populated) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (READ_ONCE(tcp_md5sig_pool_populated)) { /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ smp_rmb(); return this_cpu_ptr(&tcp_md5sig_pool); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index eaf2308c355a6..926e29e84b40b 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -125,8 +126,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, tmp->sg.end = i; if (apply) { apply_bytes -= size; - if (!apply_bytes) + if (!apply_bytes) { + if (sge->length) + sk_msg_iter_var_prev(i); break; + } } } while (i != msg->sg.end); @@ -315,8 +319,8 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, { bool cork = false, enospc = sk_msg_full(msg); struct sock *sk_redir; - u32 tosend, delta = 0; - u32 eval = __SK_NONE; + u32 tosend, origsize, sent, delta = 0; + u32 eval; int ret; more_data: @@ -347,6 +351,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, tosend = msg->sg.size; if (psock->apply_bytes && psock->apply_bytes < tosend) tosend = psock->apply_bytes; + eval = __SK_NONE; switch (psock->eval) { case __SK_PASS: @@ -370,10 +375,12 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, cork = true; psock->cork = NULL; } - sk_msg_return(sk, msg, msg->sg.size); + sk_msg_return(sk, msg, tosend); release_sock(sk); + origsize = msg->sg.size; ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); + sent = origsize - msg->sg.size; if (eval == __SK_REDIRECT) sock_put(sk_redir); @@ -412,7 +419,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, msg->sg.data[msg->sg.start].page_link && msg->sg.data[msg->sg.start].length) { if (eval == __SK_REDIRECT) - sk_mem_charge(sk, msg->sg.size); + sk_mem_charge(sk, tosend - sent); goto more_data; } } @@ -636,10 +643,9 @@ struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock) */ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) { - int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; struct proto *prot = newsk->sk_prot; - if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE]) + if (is_insidevar(prot, tcp_bpf_prots)) newsk->sk_prot = sk->sk_prot_creator; } #endif /* CONFIG_BPF_STREAM_PARSER */ diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index 709d238018239..56dede4b59d95 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -375,6 +375,7 @@ static void tcp_cdg_init(struct sock *sk) struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); + ca->gradients = NULL; /* We silently fall back to window = 1 if allocation fails. */ if (window > 1) ca->gradients = kcalloc(window, sizeof(ca->gradients[0]), @@ -388,6 +389,7 @@ static void tcp_cdg_release(struct sock *sk) struct cdg *ca = inet_csk_ca(sk); kfree(ca->gradients); + ca->gradients = NULL; } static struct tcp_congestion_ops tcp_cdg __read_mostly = { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e62500d6fe0d0..541758cd0b81f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2175,7 +2175,8 @@ void tcp_enter_loss(struct sock *sk) */ static bool tcp_check_sack_reneging(struct sock *sk, int flag) { - if (flag & FLAG_SACK_RENEGING) { + if (flag & FLAG_SACK_RENEGING && + flag & FLAG_SND_UNA_ADVANCED) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), msecs_to_jiffies(10)); @@ -2496,6 +2497,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp) return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } +static bool tcp_is_non_sack_preventing_reopen(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { + /* Hold old state until something *above* high_seq + * is ACKed. For Reno it is MUST to prevent false + * fast retransmits (RFC2582). SACK TCP is safe. */ + if (!tcp_any_retrans_done(sk)) + tp->retrans_stamp = 0; + return true; + } + return false; +} + /* People celebrate: "We love our President!" */ static bool tcp_try_undo_recovery(struct sock *sk) { @@ -2518,14 +2534,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) } else if (tp->rack.reo_wnd_persist) { tp->rack.reo_wnd_persist--; } - if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { - /* Hold old state until something *above* high_seq - * is ACKed. For Reno it is MUST to prevent false - * fast retransmits (RFC2582). SACK TCP is safe. */ - if (!tcp_any_retrans_done(sk)) - tp->retrans_stamp = 0; + if (tcp_is_non_sack_preventing_reopen(sk)) return true; - } tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; return false; @@ -2561,6 +2571,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; + if (tcp_is_non_sack_preventing_reopen(sk)) + return true; if (frto_undo || tcp_is_sack(tp)) { tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; @@ -5766,7 +5778,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) trace_tcp_probe(sk, skb); tcp_mstamp_refresh(tp); - if (unlikely(!sk->sk_rx_dst)) + if (unlikely(!rcu_access_pointer(sk->sk_rx_dst))) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0d165ce2d80a7..8bd7b1ec3b6a3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -322,6 +322,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * if necessary. */ tcp_set_state(sk, TCP_CLOSE); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; @@ -1670,15 +1672,18 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || !dst->ops->check(dst, 0)) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb); @@ -1753,7 +1758,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); @@ -1767,8 +1772,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { - u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf); - u32 tail_gso_size, tail_gso_segs; + u32 limit, tail_gso_size, tail_gso_segs; struct skb_shared_info *shinfo; const struct tcphdr *th; struct tcphdr *thtail; @@ -1871,11 +1875,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) __skb_push(skb, hdrlen); no_coalesce: + limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1); + /* Only socket owner can try to collapse/prune rx queues * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty. */ - limit += 64*1024; + limit += 64 * 1024; if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); @@ -2162,7 +2168,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; } } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 48fce999dc612..eefd032bc6dbd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1876,15 +1876,20 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; struct tcp_sock *tp = tcp_sk(sk); - /* Track the maximum number of outstanding packets in each - * window, and remember whether we were cwnd-limited then. + /* Track the strongest available signal of the degree to which the cwnd + * is fully utilized. If cwnd-limited then remember that fact for the + * current window. If not cwnd-limited then track the maximum number of + * outstanding packets in the current window. (If cwnd-limited then we + * chose to not update tp->max_packets_out to avoid an extra else + * clause with no functional impact.) */ - if (!before(tp->snd_una, tp->max_packets_seq) || - tp->packets_out > tp->max_packets_out || - is_cwnd_limited) { - tp->max_packets_out = tp->packets_out; - tp->max_packets_seq = tp->snd_nxt; + if (!before(tp->snd_una, tp->cwnd_usage_seq) || + is_cwnd_limited || + (!tp->is_cwnd_limited && + tp->packets_out > tp->max_packets_out)) { tp->is_cwnd_limited = is_cwnd_limited; + tp->max_packets_out = tp->packets_out; + tp->cwnd_usage_seq = tp->snd_nxt; } if (tcp_is_cwnd_limited(sk)) { diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c index 7c27aa629af19..8e135af0d4f70 100644 --- a/net/ipv4/tcp_ulp.c +++ b/net/ipv4/tcp_ulp.c @@ -136,6 +136,10 @@ static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops) if (icsk->icsk_ulp_ops) goto out_err; + err = -ENOTCONN; + if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN) + goto out_err; + err = ulp_ops->init(sk); if (err) goto out_err; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index e498c7666ec62..b093daaa3deb9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -446,7 +446,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); /* Fall back to scoring if group has connections */ - if (result && !reuseport_has_conns(sk, false)) + if (result && !reuseport_has_conns(sk)) return result; result = result ? : sk; @@ -2193,7 +2193,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old; if (dst_hold_safe(dst)) { - old = xchg(&sk->sk_rx_dst, dst); + old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); dst_release(old); return old != dst; } @@ -2383,7 +2383,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp_sk_rx_dst_set(sk, dst); ret = udp_unicast_rcv_skb(sk, skb, uh); @@ -2541,7 +2541,7 @@ int udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c index 3eecba0874aa2..d70f683d3c495 100644 --- a/net/ipv4/udp_tunnel_core.c +++ b/net/ipv4/udp_tunnel_core.c @@ -194,6 +194,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); void udp_tunnel_sock_release(struct socket *sock) { rcu_assign_sk_user_data(sock->sk, NULL); + synchronize_rcu(); kernel_sock_shutdown(sock, SHUT_RDWR); sock_release(sock); } diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 8a22486cf2702..17ac45aa7194c 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -437,6 +437,7 @@ static void ip6addrlbl_putmsg(struct nlmsghdr *nlh, { struct ifaddrlblmsg *ifal = nlmsg_data(nlh); ifal->ifal_family = AF_INET6; + ifal->__ifal_reserved = 0; ifal->ifal_prefixlen = prefixlen; ifal->ifal_flags = 0; ifal->ifal_index = ifindex; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index d30c9d949c1b5..4df9dc9375c8e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -661,7 +661,6 @@ int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, const struct proto_ops inet6_stream_ops = { .family = PF_INET6, - .flags = PROTO_CMSG_DATA_ONLY, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 206f66310a88d..a30ff5d6808aa 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -51,7 +51,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk) fl6->flowi6_mark = sk->sk_mark; fl6->fl6_dport = inet->inet_dport; fl6->fl6_sport = inet->inet_sport; - fl6->flowlabel = np->flow_label; + fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label); fl6->flowi6_uid = sk->sk_uid; if (!fl6->flowi6_oif) @@ -256,7 +256,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, goto out; } - reuseport_has_conns(sk, true); + reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); out: diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 1c3f02d05d2bf..7608be04d0f58 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -343,6 +343,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features xo->seq.low += skb_shinfo(skb)->gso_segs; } + if (xo->seq.low < seq) + xo->seq.hi++; + esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); len = skb->len - sizeof(struct ipv6hdr); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 9e0890738d93f..0010f9e54f13b 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1153,14 +1153,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, dev->needed_headroom = dst_len; if (set_mtu) { - dev->mtu = rt->dst.dev->mtu - t_hlen; + int mtu = rt->dst.dev->mtu - t_hlen; + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu -= 8; + mtu -= 8; if (dev->type == ARPHRD_ETHER) - dev->mtu -= ETH_HLEN; + mtu -= ETH_HLEN; - if (dev->mtu < IPV6_MIN_MTU) - dev->mtu = IPV6_MIN_MTU; + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + WRITE_ONCE(dev->mtu, mtu); } } ip6_rt_put(rt); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 15ea3d082534d..4eb9fbfdce332 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -44,21 +44,25 @@ #include #include -INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *)); -INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *)); +void udp_v6_early_demux(struct sk_buff *); +void tcp_v6_early_demux(struct sk_buff *); static void ip6_rcv_finish_core(struct net *net, struct sock *sk, struct sk_buff *skb) { - void (*edemux)(struct sk_buff *skb); - - if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { - const struct inet6_protocol *ipprot; - - ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); - if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) - INDIRECT_CALL_2(edemux, tcp_v6_early_demux, - udp_v6_early_demux, skb); + if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && + !skb_dst(skb) && !skb->sk) { + switch (ipv6_hdr(skb)->nexthdr) { + case IPPROTO_TCP: + if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) + tcp_v6_early_demux(skb); + break; + case IPPROTO_UDP: + if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) + udp_v6_early_demux(skb); + break; + } } + if (!skb_valid_dst(skb)) ip6_route_input(skb); } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index fadad8e83521d..e427f5040a08e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -919,6 +919,9 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (err < 0) goto fail; + /* We prevent @rt from being freed. */ + rcu_read_lock(); + for (;;) { /* Prepare header of the next frame, * before previous one went down. */ @@ -942,6 +945,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); + rcu_read_unlock(); return 0; } @@ -949,6 +953,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); + rcu_read_unlock(); return err; slow_path_clean: diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 3a2741569b847..0d4cab94c5dd2 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1476,8 +1476,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) struct net_device *tdev = NULL; struct __ip6_tnl_parm *p = &t->parms; struct flowi6 *fl6 = &t->fl.u.ip6; - unsigned int mtu; int t_hlen; + int mtu; memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); @@ -1524,12 +1524,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) dev->hard_header_len = tdev->hard_header_len + t_hlen; mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU); - dev->mtu = mtu - t_hlen; + mtu = mtu - t_hlen; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu -= 8; + mtu -= 8; - if (dev->mtu < IPV6_MIN_MTU) - dev->mtu = IPV6_MIN_MTU; + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + WRITE_ONCE(dev->mtu, mtu); } } } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 6fa118bf40cdd..2017257cb2784 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -417,6 +417,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, rtnl_lock(); lock_sock(sk); + /* Another thread has converted the socket into IPv4 with + * IPV6_ADDRFORM concurrently. + */ + if (unlikely(sk->sk_family != AF_INET6)) + goto unlock; + switch (optname) { case IPV6_ADDRFORM: @@ -976,6 +982,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; } +unlock: release_sock(sk); if (needs_rtnl) rtnl_unlock(); diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 92f3235fa2874..602743f6dcee0 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -37,6 +37,9 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv, if (ipv6_addr_type(&fl6->daddr) & IPV6_ADDR_LINKLOCAL) { lookup_flags |= RT6_LOOKUP_F_IFACE; fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev); + } else if ((priv->flags & NFTA_FIB_F_IIF) && + (netif_is_l3_master(dev) || netif_is_l3_slave(dev))) { + fl6->flowi6_oif = dev->ifindex; } if (ipv6_addr_type(&fl6->saddr) & IPV6_ADDR_UNICAST) @@ -193,7 +196,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) goto put_rt_err; - if (oif && oif != rt->rt6i_idev->dev) + if (oif && oif != rt->rt6i_idev->dev && + l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) != oif->ifindex) goto put_rt_err; nft_fib_store_result(dest, priv, rt->rt6i_idev->dev); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 31eb54e92b3f9..110254f44a468 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -539,6 +539,7 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct raw6_sock *rp) { + struct ipv6_txoptions *opt; struct sk_buff *skb; int err = 0; int offset; @@ -556,6 +557,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, offset = rp->offset; total_len = inet_sk(sk)->cork.base.length; + opt = inet6_sk(sk)->cork.opt; + total_len -= opt ? opt->opt_flen : 0; + if (offset >= total_len - 1) { err = -EINVAL; ip6_flush_pending_frames(sk); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index cdf215442d373..803d1aa83140c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -6405,10 +6405,16 @@ static void __net_exit ip6_route_net_exit(struct net *net) static int __net_init ip6_route_net_init_late(struct net *net) { #ifdef CONFIG_PROC_FS - proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops, - sizeof(struct ipv6_route_iter)); - proc_create_net_single("rt6_stats", 0444, net->proc_net, - rt6_stats_seq_show, NULL); + if (!proc_create_net("ipv6_route", 0, net->proc_net, + &ipv6_route_seq_ops, + sizeof(struct ipv6_route_iter))) + return -ENOMEM; + + if (!proc_create_net_single("rt6_stats", 0444, net->proc_net, + rt6_stats_seq_show, NULL)) { + remove_proc_entry("ipv6_route", net->proc_net); + return -ENOMEM; + } #endif return 0; } diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index d2f8138e5a73a..2278c0234c497 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -135,6 +135,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } + if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) { + err = -EINVAL; + goto out_unlock; + } + if (hinfo) { err = seg6_hmac_info_del(net, hmackeyid); if (err) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3c92e8cacbbab..1ce486a9bc076 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1123,10 +1123,12 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) if (tdev && !netif_is_l3_master(tdev)) { int t_hlen = tunnel->hlen + sizeof(struct iphdr); + int mtu; - dev->mtu = tdev->mtu - t_hlen; - if (dev->mtu < IPV6_MIN_MTU) - dev->mtu = IPV6_MIN_MTU; + mtu = tdev->mtu - t_hlen; + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + WRITE_ONCE(dev->mtu, mtu); } } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8d91f36cb11bc..e4ae5362cb51b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); } @@ -269,6 +269,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = sk->sk_v6_daddr; fl6.saddr = saddr ? *saddr : np->saddr; + fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; @@ -339,6 +340,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, late_failure: tcp_set_state(sk, TCP_CLOSE); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; @@ -1404,14 +1407,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * /* Clone pktoptions received with SYN, if we own the req */ if (ireq->pktopts) { - newnp->pktoptions = skb_clone(ireq->pktopts, - sk_gfp_mask(sk, GFP_ATOMIC)); + newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk); consume_skb(ireq->pktopts); ireq->pktopts = NULL; - if (newnp->pktoptions) { + if (newnp->pktoptions) tcp_v6_restore_cb(newnp->pktoptions); - skb_set_owner_r(newnp->pktoptions, newsk); - } } } else { if (!req_unhash && found_dup_sk) { @@ -1479,18 +1479,21 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) --ANK (980728) */ if (np->rxopt.all) - opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); + opt_skb = skb_clone_and_charge_r(skb, sk); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } @@ -1558,7 +1561,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (np->repflow) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { - skb_set_owner_r(opt_skb, sk); tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { @@ -1815,7 +1817,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) goto discard_it; } -INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) +void tcp_v6_early_demux(struct sk_buff *skb) { const struct ipv6hdr *hdr; const struct tcphdr *th; @@ -1842,7 +1844,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie); @@ -2166,12 +2168,7 @@ struct proto tcpv6_prot = { }; EXPORT_SYMBOL_GPL(tcpv6_prot); -/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct inet6_protocol tcpv6_protocol = { - .early_demux = tcp_v6_early_demux, - .early_demux_handler = tcp_v6_early_demux, +static const struct inet6_protocol tcpv6_protocol = { .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4e90e5a529455..1805cc5f7418b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -179,7 +179,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); /* Fall back to scoring if group has connections */ - if (result && !reuseport_has_conns(sk, false)) + if (result && !reuseport_has_conns(sk)) return result; result = result ? : sk; @@ -941,7 +941,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp6_sk_rx_dst_set(sk, dst); if (!uh->check && !udp_sk(sk)->no_check6_rx) { @@ -1027,7 +1027,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, return NULL; } -INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) +void udp_v6_early_demux(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct udphdr *uh; @@ -1055,7 +1055,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); @@ -1640,12 +1640,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, return ipv6_getsockopt(sk, level, optname, optval, optlen); } -/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct inet6_protocol udpv6_protocol = { - .early_demux = udp_v6_early_demux, - .early_demux_handler = udp_v6_early_demux, +static const struct inet6_protocol udpv6_protocol = { .handler = udpv6_rcv, .err_handler = udpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index af7a4b8b1e9c4..247296e3294bd 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -289,9 +289,13 @@ int __init xfrm6_init(void) if (ret) goto out_state; - register_pernet_subsys(&xfrm6_net_ops); + ret = register_pernet_subsys(&xfrm6_net_ops); + if (ret) + goto out_protocol; out: return ret; +out_protocol: + xfrm6_protocol_fini(); out_state: xfrm6_state_fini(); out_policy: diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 18469f1f707e5..32b516ab9c475 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -161,7 +161,8 @@ static void kcm_rcv_ready(struct kcm_sock *kcm) /* Buffer limit is okay now, add to ready list */ list_add_tail(&kcm->wait_rx_list, &kcm->mux->kcm_rx_waiters); - kcm->rx_wait = true; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, true); } static void kcm_rfree(struct sk_buff *skb) @@ -177,7 +178,7 @@ static void kcm_rfree(struct sk_buff *skb) /* For reading rx_wait and rx_psock without holding lock */ smp_mb__after_atomic(); - if (!kcm->rx_wait && !kcm->rx_psock && + if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) && sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) { spin_lock_bh(&mux->rx_lock); kcm_rcv_ready(kcm); @@ -220,7 +221,7 @@ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) struct sk_buff *skb; struct kcm_sock *kcm; - while ((skb = __skb_dequeue(head))) { + while ((skb = skb_dequeue(head))) { /* Reset destructor to avoid calling kcm_rcv_ready */ skb->destructor = sock_rfree; skb_orphan(skb); @@ -236,7 +237,8 @@ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) if (kcm_queue_rcv_skb(&kcm->sk, skb)) { /* Should mean socket buffer full */ list_del(&kcm->wait_rx_list); - kcm->rx_wait = false; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, false); /* Commit rx_wait to read in kcm_free */ smp_wmb(); @@ -279,10 +281,12 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock, kcm = list_first_entry(&mux->kcm_rx_waiters, struct kcm_sock, wait_rx_list); list_del(&kcm->wait_rx_list); - kcm->rx_wait = false; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, false); psock->rx_kcm = kcm; - kcm->rx_psock = psock; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_psock, psock); spin_unlock_bh(&mux->rx_lock); @@ -309,7 +313,8 @@ static void unreserve_rx_kcm(struct kcm_psock *psock, spin_lock_bh(&mux->rx_lock); psock->rx_kcm = NULL; - kcm->rx_psock = NULL; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_psock, NULL); /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with * kcm_rfree @@ -1079,53 +1084,18 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) return err; } -static struct sk_buff *kcm_wait_data(struct sock *sk, int flags, - long timeo, int *err) -{ - struct sk_buff *skb; - - while (!(skb = skb_peek(&sk->sk_receive_queue))) { - if (sk->sk_err) { - *err = sock_error(sk); - return NULL; - } - - if (sock_flag(sk, SOCK_DONE)) - return NULL; - - if ((flags & MSG_DONTWAIT) || !timeo) { - *err = -EAGAIN; - return NULL; - } - - sk_wait_data(sk, &timeo, NULL); - - /* Handle signals */ - if (signal_pending(current)) { - *err = sock_intr_errno(timeo); - return NULL; - } - } - - return skb; -} - static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { + int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct kcm_sock *kcm = kcm_sk(sk); int err = 0; - long timeo; struct strp_msg *stm; int copied = 0; struct sk_buff *skb; - timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); - - lock_sock(sk); - - skb = kcm_wait_data(sk, flags, timeo, &err); + skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; @@ -1156,14 +1126,11 @@ static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, /* Finished with message */ msg->msg_flags |= MSG_EOR; KCM_STATS_INCR(kcm->stats.rx_msgs); - skb_unlink(skb, &sk->sk_receive_queue); - kfree_skb(skb); } } out: - release_sock(sk); - + skb_free_datagram(sk, skb); return copied ? : err; } @@ -1171,9 +1138,9 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { + int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct kcm_sock *kcm = kcm_sk(sk); - long timeo; struct strp_msg *stm; int err = 0; ssize_t copied; @@ -1181,11 +1148,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, /* Only support splice for SOCKSEQPACKET */ - timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); - - lock_sock(sk); - - skb = kcm_wait_data(sk, flags, timeo, &err); + skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto err_out; @@ -1213,13 +1176,11 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, * finish reading the message. */ - release_sock(sk); - + skb_free_datagram(sk, skb); return copied; err_out: - release_sock(sk); - + skb_free_datagram(sk, skb); return err; } @@ -1239,7 +1200,8 @@ static void kcm_recv_disable(struct kcm_sock *kcm) if (!kcm->rx_psock) { if (kcm->rx_wait) { list_del(&kcm->wait_rx_list); - kcm->rx_wait = false; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, false); } requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); @@ -1792,7 +1754,8 @@ static void kcm_done(struct kcm_sock *kcm) if (kcm->rx_wait) { list_del(&kcm->wait_rx_list); - kcm->rx_wait = false; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, false); } /* Move any pending receive messages to other kcm sockets */ requeue_rx_msgs(mux, &sk->sk_receive_queue); @@ -1837,10 +1800,10 @@ static int kcm_release(struct socket *sock) kcm = kcm_sk(sk); mux = kcm->mux; + lock_sock(sk); sock_orphan(sk); kfree_skb(kcm->seq_skb); - lock_sock(sk); /* Purge queue under lock to avoid race condition with tx_work trying * to act when queue is nonempty. If tx_work runs after this point * it will just return. diff --git a/net/key/af_key.c b/net/key/af_key.c index 05e2710988883..8bc7d399987b2 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2909,7 +2909,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) break; if (!aalg->pfkey_supported) continue; - if (aalg_tmpl_set(t, aalg) && aalg->available) + if (aalg_tmpl_set(t, aalg)) sz += sizeof(struct sadb_comb); } return sz + sizeof(struct sadb_prop); @@ -2927,7 +2927,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) if (!ealg->pfkey_supported) continue; - if (!(ealg_tmpl_set(t, ealg) && ealg->available)) + if (!(ealg_tmpl_set(t, ealg))) continue; for (k = 1; ; k++) { @@ -2938,16 +2938,17 @@ static int count_esp_combs(const struct xfrm_tmpl *t) if (!aalg->pfkey_supported) continue; - if (aalg_tmpl_set(t, aalg) && aalg->available) + if (aalg_tmpl_set(t, aalg)) sz += sizeof(struct sadb_comb); } } return sz + sizeof(struct sadb_prop); } -static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) +static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) { struct sadb_prop *p; + int sz = 0; int i; p = skb_put(skb, sizeof(struct sadb_prop)); @@ -2975,13 +2976,17 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) c->sadb_comb_soft_addtime = 20*60*60; c->sadb_comb_hard_usetime = 8*60*60; c->sadb_comb_soft_usetime = 7*60*60; + sz += sizeof(*c); } } + + return sz + sizeof(*p); } -static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) +static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) { struct sadb_prop *p; + int sz = 0; int i, k; p = skb_put(skb, sizeof(struct sadb_prop)); @@ -3023,8 +3028,11 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) c->sadb_comb_soft_addtime = 20*60*60; c->sadb_comb_hard_usetime = 8*60*60; c->sadb_comb_soft_usetime = 7*60*60; + sz += sizeof(*c); } } + + return sz + sizeof(*p); } static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c) @@ -3154,6 +3162,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct struct sadb_x_sec_ctx *sec_ctx; struct xfrm_sec_ctx *xfrm_ctx; int ctx_size = 0; + int alg_size = 0; sockaddr_size = pfkey_sockaddr_size(x->props.family); if (!sockaddr_size) @@ -3165,16 +3174,16 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct sizeof(struct sadb_x_policy); if (x->id.proto == IPPROTO_AH) - size += count_ah_combs(t); + alg_size = count_ah_combs(t); else if (x->id.proto == IPPROTO_ESP) - size += count_esp_combs(t); + alg_size = count_esp_combs(t); if ((xfrm_ctx = x->security)) { ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len); size += sizeof(struct sadb_x_sec_ctx) + ctx_size; } - skb = alloc_skb(size + 16, GFP_ATOMIC); + skb = alloc_skb(size + alg_size + 16, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; @@ -3228,10 +3237,13 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct pol->sadb_x_policy_priority = xp->priority; /* Set sadb_comb's. */ + alg_size = 0; if (x->id.proto == IPPROTO_AH) - dump_ah_combs(skb, t); + alg_size = dump_ah_combs(skb, t); else if (x->id.proto == IPPROTO_ESP) - dump_esp_combs(skb, t); + alg_size = dump_esp_combs(skb, t); + + hdr->sadb_msg_len += alg_size / 8; /* security context */ if (xfrm_ctx) { diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 561b6d67ab8b9..a4b793d1b7d76 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -104,9 +104,9 @@ static struct workqueue_struct *l2tp_wq; /* per-net private data for this module */ static unsigned int l2tp_net_id; struct l2tp_net { - struct list_head l2tp_tunnel_list; - /* Lock for write access to l2tp_tunnel_list */ - spinlock_t l2tp_tunnel_list_lock; + /* Lock for write access to l2tp_tunnel_idr */ + spinlock_t l2tp_tunnel_idr_lock; + struct idr l2tp_tunnel_idr; struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; /* Lock for write access to l2tp_session_hlist */ spinlock_t l2tp_session_hlist_lock; @@ -208,13 +208,10 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) struct l2tp_tunnel *tunnel; rcu_read_lock_bh(); - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { - if (tunnel->tunnel_id == tunnel_id && - refcount_inc_not_zero(&tunnel->ref_count)) { - rcu_read_unlock_bh(); - - return tunnel; - } + tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id); + if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) { + rcu_read_unlock_bh(); + return tunnel; } rcu_read_unlock_bh(); @@ -224,13 +221,14 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_get); struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) { - const struct l2tp_net *pn = l2tp_pernet(net); + struct l2tp_net *pn = l2tp_pernet(net); + unsigned long tunnel_id, tmp; struct l2tp_tunnel *tunnel; int count = 0; rcu_read_lock_bh(); - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { - if (++count > nth && + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { + if (tunnel && ++count > nth && refcount_inc_not_zero(&tunnel->ref_count)) { rcu_read_unlock_bh(); return tunnel; @@ -1043,7 +1041,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); nf_reset_ct(skb); - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (sock_owned_by_user(sk)) { kfree_skb(skb); ret = NET_XMIT_DROP; @@ -1150,8 +1148,10 @@ static void l2tp_tunnel_destruct(struct sock *sk) } /* Remove hooks into tunnel socket */ + write_lock_bh(&sk->sk_callback_lock); sk->sk_destruct = tunnel->old_sk_destruct; sk->sk_user_data = NULL; + write_unlock_bh(&sk->sk_callback_lock); /* Call the original destructor */ if (sk->sk_destruct) @@ -1227,6 +1227,15 @@ static void l2tp_udp_encap_destroy(struct sock *sk) l2tp_tunnel_delete(tunnel); } +static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel) +{ + struct l2tp_net *pn = l2tp_pernet(net); + + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); + idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id); + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); +} + /* Workqueue tunnel deletion function */ static void l2tp_tunnel_del_work(struct work_struct *work) { @@ -1234,7 +1243,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work) del_work); struct sock *sk = tunnel->sock; struct socket *sock = sk->sk_socket; - struct l2tp_net *pn; l2tp_tunnel_closeall(tunnel); @@ -1248,12 +1256,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) } } - /* Remove the tunnel struct from the tunnel list */ - pn = l2tp_pernet(tunnel->l2tp_net); - spin_lock_bh(&pn->l2tp_tunnel_list_lock); - list_del_rcu(&tunnel->list); - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - + l2tp_tunnel_remove(tunnel->l2tp_net, tunnel); /* drop initial ref */ l2tp_tunnel_dec_refcount(tunnel); @@ -1384,8 +1387,6 @@ static int l2tp_tunnel_sock_create(struct net *net, return err; } -static struct lock_class_key l2tp_socket_class; - int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) { @@ -1455,12 +1456,19 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net, int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, struct l2tp_tunnel_cfg *cfg) { - struct l2tp_tunnel *tunnel_walk; - struct l2tp_net *pn; + struct l2tp_net *pn = l2tp_pernet(net); + u32 tunnel_id = tunnel->tunnel_id; struct socket *sock; struct sock *sk; int ret; + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); + ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id, + GFP_ATOMIC); + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); + if (ret) + return ret == -ENOSPC ? -EEXIST : ret; + if (tunnel->fd < 0) { ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id, tunnel->peer_tunnel_id, cfg, @@ -1471,30 +1479,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, sock = sockfd_lookup(tunnel->fd, &ret); if (!sock) goto err; - - ret = l2tp_validate_socket(sock->sk, net, tunnel->encap); - if (ret < 0) - goto err_sock; } - tunnel->l2tp_net = net; - pn = l2tp_pernet(net); - - spin_lock_bh(&pn->l2tp_tunnel_list_lock); - list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) { - if (tunnel_walk->tunnel_id == tunnel->tunnel_id) { - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - - ret = -EEXIST; - goto err_sock; - } - } - list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - sk = sock->sk; - sock_hold(sk); - tunnel->sock = sk; + lock_sock(sk); + write_lock_bh(&sk->sk_callback_lock); + ret = l2tp_validate_socket(sk, net, tunnel->encap); + if (ret < 0) + goto err_inval_sock; + rcu_assign_sk_user_data(sk, tunnel); + write_unlock_bh(&sk->sk_callback_lock); if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { struct udp_tunnel_sock_cfg udp_cfg = { @@ -1505,15 +1499,20 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, }; setup_udp_tunnel_sock(net, sock, &udp_cfg); - } else { - sk->sk_user_data = tunnel; } tunnel->old_sk_destruct = sk->sk_destruct; sk->sk_destruct = &l2tp_tunnel_destruct; - lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, - "l2tp_sock"); sk->sk_allocation = GFP_ATOMIC; + release_sock(sk); + + sock_hold(sk); + tunnel->sock = sk; + tunnel->l2tp_net = net; + + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); + idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id); + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); trace_register_tunnel(tunnel); @@ -1522,12 +1521,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, return 0; -err_sock: +err_inval_sock: + write_unlock_bh(&sk->sk_callback_lock); + release_sock(sk); + if (tunnel->fd < 0) sock_release(sock); else sockfd_put(sock); err: + l2tp_tunnel_remove(net, tunnel); return ret; } EXPORT_SYMBOL_GPL(l2tp_tunnel_register); @@ -1641,8 +1644,8 @@ static __net_init int l2tp_init_net(struct net *net) struct l2tp_net *pn = net_generic(net, l2tp_net_id); int hash; - INIT_LIST_HEAD(&pn->l2tp_tunnel_list); - spin_lock_init(&pn->l2tp_tunnel_list_lock); + idr_init(&pn->l2tp_tunnel_idr); + spin_lock_init(&pn->l2tp_tunnel_idr_lock); for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); @@ -1656,11 +1659,13 @@ static __net_exit void l2tp_exit_net(struct net *net) { struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel = NULL; + unsigned long tunnel_id, tmp; int hash; rcu_read_lock_bh(); - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { - l2tp_tunnel_delete(tunnel); + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { + if (tunnel) + l2tp_tunnel_delete(tunnel); } rcu_read_unlock_bh(); @@ -1670,6 +1675,7 @@ static __net_exit void l2tp_exit_net(struct net *net) for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash])); + idr_destroy(&pn->l2tp_tunnel_idr); } static struct pernet_operations l2tp_net_ops = { diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 4b4ab1961068f..92e5812daf892 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -491,7 +491,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) { struct tid_ampdu_tx *tid_tx; struct ieee80211_local *local = sta->local; - struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_sub_if_data *sdata; struct ieee80211_ampdu_params params = { .sta = &sta->sta, .action = IEEE80211_AMPDU_TX_START, @@ -521,6 +521,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) */ synchronize_net(); + sdata = sta->sdata; params.ssn = sta->tid_seq[tid] >> 4; ret = drv_ampdu_action(local, sdata, ¶ms); tid_tx->ssn = params.ssn; @@ -534,6 +535,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) */ set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state); } else if (ret) { + if (!sdata) + return; + ht_dbg(sdata, "BA request denied - HW unavailable for %pM tid %d\n", sta->sta.addr, tid); diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c index 26d2f8ba70297..758ef63669e7b 100644 --- a/net/mac80211/airtime.c +++ b/net/mac80211/airtime.c @@ -457,6 +457,9 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw, (status->encoding == RX_ENC_HE && streams > 8))) return 0; + if (idx >= MCS_GROUP_RATES) + return 0; + duration = airtime_mcs_groups[group].duration[idx]; duration <<= airtime_mcs_groups[group].shift; *overhead = 36 + (streams << 2); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 8010967a68741..c6a7f1c99abc5 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3357,9 +3357,6 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_MESH_POINT: { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - if (params->chandef.width != sdata->vif.bss_conf.chandef.width) - return -EINVAL; - /* changes into another band are not supported */ if (sdata->vif.bss_conf.chandef.chan->band != params->chandef.chan->band) diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index 48322e45e7ddb..120bd9cdf7dfa 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -331,6 +331,9 @@ int drv_ampdu_action(struct ieee80211_local *local, might_sleep(); + if (!sdata) + return -EIO; + sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return -EIO; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index bcc94cc1b6201..bd349ae9ee4b4 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -644,6 +644,26 @@ struct mesh_csa_settings { struct cfg80211_csa_settings settings; }; +/** + * struct mesh_table + * + * @known_gates: list of known mesh gates and their mpaths by the station. The + * gate's mpath may or may not be resolved and active. + * @gates_lock: protects updates to known_gates + * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr + * @walk_head: linked list containing all mesh_path objects + * @walk_lock: lock protecting walk_head + * @entries: number of entries in the table + */ +struct mesh_table { + struct hlist_head known_gates; + spinlock_t gates_lock; + struct rhashtable rhead; + struct hlist_head walk_head; + spinlock_t walk_lock; + atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ +}; + struct ieee80211_if_mesh { struct timer_list housekeeping_timer; struct timer_list mesh_path_timer; @@ -718,8 +738,8 @@ struct ieee80211_if_mesh { /* offset from skb->data while building IE */ int meshconf_offset; - struct mesh_table *mesh_paths; - struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ + struct mesh_table mesh_paths; + struct mesh_table mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; int mpp_paths_generation; }; @@ -1485,7 +1505,6 @@ struct ieee802_11_elems { const u8 *supp_rates; const u8 *ds_params; const struct ieee80211_tim_ie *tim; - const u8 *challenge; const u8 *rsn; const u8 *rsnx; const u8 *erp_info; @@ -1538,7 +1557,6 @@ struct ieee802_11_elems { u8 ssid_len; u8 supp_rates_len; u8 tim_len; - u8 challenge_len; u8 rsn_len; u8 rsnx_len; u8 ext_supp_rates_len; @@ -1553,6 +1571,8 @@ struct ieee802_11_elems { u8 country_elem_len; u8 bssid_index_len; + void *nontx_profile; + /* whether a parse error occurred while retrieving these elements */ bool parse_error; }; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 73893025922fd..ae90ac3be59aa 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1349,8 +1349,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) ieee80211_led_exit(local); destroy_workqueue(local->workqueue); fail_workqueue: - if (local->wiphy_ciphers_allocated) + if (local->wiphy_ciphers_allocated) { kfree(local->hw.wiphy->cipher_suites); + local->wiphy_ciphers_allocated = false; + } kfree(local->int_scan_req); return result; } @@ -1420,8 +1422,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) mutex_destroy(&local->iflist_mtx); mutex_destroy(&local->mtx); - if (local->wiphy_ciphers_allocated) + if (local->wiphy_ciphers_allocated) { kfree(local->hw.wiphy->cipher_suites); + local->wiphy_ciphers_allocated = false; + } idr_for_each(&local->ack_status_frames, ieee80211_free_ack_frame, NULL); diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 40492d1bd8fda..b2b717a78114f 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -127,26 +127,6 @@ struct mesh_path { u32 path_change_count; }; -/** - * struct mesh_table - * - * @known_gates: list of known mesh gates and their mpaths by the station. The - * gate's mpath may or may not be resolved and active. - * @gates_lock: protects updates to known_gates - * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr - * @walk_head: linked list containging all mesh_path objects - * @walk_lock: lock protecting walk_head - * @entries: number of entries in the table - */ -struct mesh_table { - struct hlist_head known_gates; - spinlock_t gates_lock; - struct rhashtable rhead; - struct hlist_head walk_head; - spinlock_t walk_lock; - atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ -}; - /* Recent multicast cache */ /* RMC_BUCKETS must be a power of 2, maximum 256 */ #define RMC_BUCKETS 256 @@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); void mesh_path_flush_pending(struct mesh_path *mpath); void mesh_path_tx_pending(struct mesh_path *mpath); -int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); +void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata); int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); void mesh_path_timer(struct timer_list *t); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 870c8eafef929..d936ef0c17a37 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr) mesh_path_free_rcu(tbl, mpath); } -static struct mesh_table *mesh_table_alloc(void) +static void mesh_table_init(struct mesh_table *tbl) { - struct mesh_table *newtbl; + INIT_HLIST_HEAD(&tbl->known_gates); + INIT_HLIST_HEAD(&tbl->walk_head); + atomic_set(&tbl->entries, 0); + spin_lock_init(&tbl->gates_lock); + spin_lock_init(&tbl->walk_lock); - newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); - if (!newtbl) - return NULL; - - INIT_HLIST_HEAD(&newtbl->known_gates); - INIT_HLIST_HEAD(&newtbl->walk_head); - atomic_set(&newtbl->entries, 0); - spin_lock_init(&newtbl->gates_lock); - spin_lock_init(&newtbl->walk_lock); - if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) { - kfree(newtbl); - return NULL; - } - - return newtbl; + /* rhashtable_init() may fail only in case of wrong + * mesh_rht_params + */ + WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params)); } static void mesh_table_free(struct mesh_table *tbl) { rhashtable_free_and_destroy(&tbl->rhead, mesh_path_rht_free, tbl); - kfree(tbl); } /** @@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct mesh_path * mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); + return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata); } struct mesh_path * mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); + return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata); } static struct mesh_path * @@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); + return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx); } /** @@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); + return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx); } /** @@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath) int err; rcu_read_lock(); - tbl = mpath->sdata->u.mesh.mesh_paths; + tbl = &mpath->sdata->u.mesh.mesh_paths; spin_lock_bh(&mpath->state_lock); if (mpath->is_gate) { @@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, if (!new_mpath) return ERR_PTR(-ENOMEM); - tbl = sdata->u.mesh.mesh_paths; + tbl = &sdata->u.mesh.mesh_paths; spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, &new_mpath->rhash, @@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, return -ENOMEM; memcpy(new_mpath->mpp, mpp, ETH_ALEN); - tbl = sdata->u.mesh.mpp_paths; + tbl = &sdata->u.mesh.mpp_paths; spin_lock_bh(&tbl->walk_lock); ret = rhashtable_lookup_insert_fast(&tbl->rhead, @@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, void mesh_plink_broken(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - struct mesh_table *tbl = sdata->u.mesh.mesh_paths; + struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; @@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) void mesh_path_flush_by_nexthop(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - struct mesh_table *tbl = sdata->u.mesh.mesh_paths; + struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; struct mesh_path *mpath; struct hlist_node *n; @@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, const u8 *proxy) { - struct mesh_table *tbl = sdata->u.mesh.mpp_paths; + struct mesh_table *tbl = &sdata->u.mesh.mpp_paths; struct mesh_path *mpath; struct hlist_node *n; @@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl) */ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) { - table_flush_by_iface(sdata->u.mesh.mesh_paths); - table_flush_by_iface(sdata->u.mesh.mpp_paths); + table_flush_by_iface(&sdata->u.mesh.mesh_paths); + table_flush_by_iface(&sdata->u.mesh.mpp_paths); } /** @@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) /* flush relevant mpp entries first */ mpp_flush_by_proxy(sdata, addr); - err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); + err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr); sdata->u.mesh.mesh_paths_generation++; return err; } @@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) struct mesh_path *gate; bool copy = false; - tbl = sdata->u.mesh.mesh_paths; + tbl = &sdata->u.mesh.mesh_paths; rcu_read_lock(); hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { @@ -718,7 +710,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - kfree_skb(skb); + ieee80211_free_txskb(&sdata->local->hw, skb); sdata->u.mesh.mshstats.dropped_frames_no_route++; } @@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) mesh_path_tx_pending(mpath); } -int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) +void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) { - struct mesh_table *tbl_path, *tbl_mpp; - int ret; - - tbl_path = mesh_table_alloc(); - if (!tbl_path) - return -ENOMEM; - - tbl_mpp = mesh_table_alloc(); - if (!tbl_mpp) { - ret = -ENOMEM; - goto free_path; - } - - sdata->u.mesh.mesh_paths = tbl_path; - sdata->u.mesh.mpp_paths = tbl_mpp; - - return 0; - -free_path: - mesh_table_free(tbl_path); - return ret; + mesh_table_init(&sdata->u.mesh.mesh_paths); + mesh_table_init(&sdata->u.mesh.mpp_paths); } static @@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { - mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); - mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); + mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths); + mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths); } void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) { - mesh_table_free(sdata->u.mesh.mesh_paths); - mesh_table_free(sdata->u.mesh.mpp_paths); + mesh_table_free(&sdata->u.mesh.mesh_paths); + mesh_table_free(&sdata->u.mesh.mpp_paths); } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3988403064ab6..c52b8eb7fb8a2 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2899,14 +2899,14 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; + const struct element *challenge; u8 *pos; - struct ieee802_11_elems elems; u32 tx_flags = 0; pos = mgmt->u.auth.variable; - ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, - mgmt->bssid, auth_data->bss->bssid); - if (!elems.challenge) + challenge = cfg80211_find_elem(WLAN_EID_CHALLENGE, pos, + len - (pos - (u8 *)mgmt)); + if (!challenge) return; auth_data->expected_transaction = 4; drv_mgd_prepare_tx(sdata->local, sdata, 0); @@ -2914,7 +2914,8 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, - elems.challenge - 2, elems.challenge_len + 2, + (void *)challenge, + challenge->datalen + sizeof(*challenge), auth_data->bss->bssid, auth_data->bss->bssid, auth_data->key, auth_data->key_len, auth_data->key_idx, tx_flags); @@ -3299,7 +3300,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, } capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, elems, - mgmt->bssid, assoc_data->bss->bssid); + mgmt->bssid, NULL); if (elems->aid_resp) aid = le16_to_cpu(elems->aid_resp->aid); @@ -3393,6 +3394,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, sdata_info(sdata, "AP bug: VHT operation missing from AssocResp\n"); } + kfree(bss_elems.nontx_profile); } /* @@ -3707,7 +3709,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, return; ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, - mgmt->bssid, assoc_data->bss->bssid); + mgmt->bssid, NULL); if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && elems.timeout_int && @@ -4044,6 +4046,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; run_again(sdata, ifmgd->assoc_data->timeout); + kfree(elems.nontx_profile); return; } @@ -4221,7 +4224,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_report_disconnect(sdata, deauth_buf, sizeof(deauth_buf), true, WLAN_REASON_DEAUTH_LEAVING); - return; + goto free; } if (sta && elems.opmode_notif) @@ -4236,6 +4239,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, elems.cisco_dtpc_elem); ieee80211_bss_info_change_notify(sdata, changed); +free: + kfree(elems.nontx_profile); } void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index e991abb45f68f..97a63b940482d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1975,10 +1975,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + - NUM_DEFAULT_BEACON_KEYS) { - cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, - skb->data, - skb->len); + NUM_DEFAULT_BEACON_KEYS) { + if (rx->sdata->dev) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + skb->data, + skb->len); return RX_DROP_MONITOR; /* unexpected BIP keyidx */ } @@ -2126,7 +2127,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) /* either the frame has been decrypted or will be dropped */ status->flag |= RX_FLAG_DECRYPTED; - if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE)) + if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE && + rx->sdata->dev)) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, skb->data, skb->len); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index d6afaacaf7ef8..b241ff8c015a9 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -227,6 +227,8 @@ ieee80211_bss_info_update(struct ieee80211_local *local, rx_status, beacon); } + kfree(elems.nontx_profile); + return bss; } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a1f129292ad88..7fa6efa8b83c1 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1124,10 +1124,6 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, } else elem_parse_failed = true; break; - case WLAN_EID_CHALLENGE: - elems->challenge = pos; - elems->challenge_len = elen; - break; case WLAN_EID_VENDOR_SPECIFIC: if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { @@ -1409,6 +1405,8 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) { if (elem->datalen < 2) continue; + if (elem->data[0] < 1 || elem->data[0] > 8) + continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 new_bssid[ETH_ALEN]; @@ -1485,6 +1483,11 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, nontransmitted_profile, nontransmitted_profile_len); + if (!nontransmitted_profile_len) { + nontransmitted_profile_len = 0; + kfree(nontransmitted_profile); + nontransmitted_profile = NULL; + } } crc = _ieee802_11_parse_elems_crc(start, len, action, elems, filter, @@ -1514,7 +1517,7 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, offsetofend(struct ieee80211_bssid_index, dtim_count)) elems->dtim_count = elems->bssid_index->dtim_count; - kfree(nontransmitted_profile); + elems->nontx_profile = nontransmitted_profile; return crc; } diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 1cf5ac09edcbc..a08240fe68a74 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -661,6 +661,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name, sdata->dev = ndev; sdata->wpan_dev.wpan_phy = local->hw.phy; sdata->local = local; + INIT_LIST_HEAD(&sdata->wpan_dev.list); /* setup type-dependent data */ ret = ieee802154_setup_sdata(sdata, type); diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index c439125ef2b91..726b47a4611b5 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -132,7 +132,7 @@ static int ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr) { int hlen; - struct ieee802154_mac_cb *cb = mac_cb_init(skb); + struct ieee802154_mac_cb *cb = mac_cb(skb); skb_reset_mac_header(skb); @@ -294,8 +294,9 @@ void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi) { struct ieee802154_local *local = hw_to_local(hw); + struct ieee802154_mac_cb *cb = mac_cb_init(skb); - mac_cb(skb)->lqi = lqi; + cb->lqi = lqi; skb->pkt_type = IEEE802154_RX_MSG; skb_queue_tail(&local->skb_queue, skb); tasklet_schedule(&local->tasklet); diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 72398149e4d4f..1dcbdab9319bb 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1427,6 +1427,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev, free: kfree(table); out: + mdev->sysctl = NULL; return -ENOBUFS; } @@ -1436,6 +1437,9 @@ static void mpls_dev_sysctl_unregister(struct net_device *dev, struct net *net = dev_net(dev); struct ctl_table *table; + if (!mdev->sysctl) + return; + table = mdev->sysctl->ctl_table_arg; unregister_net_sysctl_table(mdev->sysctl); kfree(table); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 2e92384909241..3b154ad4945c4 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -40,7 +40,6 @@ static void subflow_req_destructor(struct request_sock *req) sock_put((struct sock *)subflow_req->msk); mptcp_token_destroy_request(req); - tcp_request_sock_ops.destructor(req); } static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, @@ -359,9 +358,8 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) mptcp_subflow_reset(sk); } -struct request_sock_ops mptcp_subflow_request_sock_ops; -EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops); -static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops; +static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; +static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) { @@ -373,7 +371,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; - return tcp_conn_request(&mptcp_subflow_request_sock_ops, + return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, &subflow_request_sock_ipv4_ops, sk, skb); drop: @@ -381,10 +379,17 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) return 0; } +static void subflow_v4_req_destructor(struct request_sock *req) +{ + subflow_req_destructor(req); + tcp_request_sock_ops.destructor(req); +} + #if IS_ENABLED(CONFIG_MPTCP_IPV6) -static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops; -static struct inet_connection_sock_af_ops subflow_v6_specific; -static struct inet_connection_sock_af_ops subflow_v6m_specific; +static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; +static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; +static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; +static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) { @@ -403,15 +408,36 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) return 0; } - return tcp_conn_request(&mptcp_subflow_request_sock_ops, + return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, &subflow_request_sock_ipv6_ops, sk, skb); drop: tcp_listendrop(sk); return 0; /* don't send reset */ } + +static void subflow_v6_req_destructor(struct request_sock *req) +{ + subflow_req_destructor(req); + tcp6_request_sock_ops.destructor(req); +} +#endif + +struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener, + bool attach_listener) +{ + if (ops->family == AF_INET) + ops = &mptcp_subflow_v4_request_sock_ops; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + else if (ops->family == AF_INET6) + ops = &mptcp_subflow_v6_request_sock_ops; #endif + return inet_reqsk_alloc(ops, sk_listener, attach_listener); +} +EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); + /* validate hmac received in third ACK */ static bool subflow_hmac_valid(const struct request_sock *req, const struct mptcp_options_received *mp_opt) @@ -636,7 +662,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, return child; } -static struct inet_connection_sock_af_ops subflow_specific; +static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; enum mapping_status { MAPPING_OK, @@ -1017,7 +1043,7 @@ static void subflow_write_space(struct sock *sk) } } -static struct inet_connection_sock_af_ops * +static const struct inet_connection_sock_af_ops * subflow_default_af_ops(struct sock *sk) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) @@ -1032,7 +1058,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); struct inet_connection_sock *icsk = inet_csk(sk); - struct inet_connection_sock_af_ops *target; + const struct inet_connection_sock_af_ops *target; target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); @@ -1377,7 +1403,6 @@ static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { static int subflow_ops_init(struct request_sock_ops *subflow_ops) { subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); - subflow_ops->slab_name = "request_sock_subflow"; subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, subflow_ops->obj_size, 0, @@ -1387,16 +1412,17 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops) if (!subflow_ops->slab) return -ENOMEM; - subflow_ops->destructor = subflow_req_destructor; - return 0; } void __init mptcp_subflow_init(void) { - mptcp_subflow_request_sock_ops = tcp_request_sock_ops; - if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0) - panic("MPTCP: failed to init subflow request sock ops\n"); + mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; + mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; + mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; + + if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) + panic("MPTCP: failed to init subflow v4 request sock ops\n"); subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req; @@ -1407,6 +1433,20 @@ void __init mptcp_subflow_init(void) subflow_specific.sk_rx_dst_set = subflow_finish_connect; #if IS_ENABLED(CONFIG_MPTCP_IPV6) + /* In struct mptcp_subflow_request_sock, we assume the TCP request sock + * structures for v4 and v6 have the same size. It should not changed in + * the future but better to make sure to be warned if it is no longer + * the case. + */ + BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); + + mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; + mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; + mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; + + if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) + panic("MPTCP: failed to init subflow v6 request sock ops\n"); + subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req; diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index a8ce04a4bb72a..e4fa00abde6a2 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -308,8 +308,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], return -IPSET_ERR_BITMAP_RANGE; pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask); - hosts = 2 << (32 - netmask - 1); - elements = 2 << (netmask - mask_bits - 1); + hosts = 2U << (32 - netmask - 1); + elements = 2UL << (netmask - mask_bits - 1); } if (elements > IPSET_BITMAP_MAX_RANGE + 1) return -IPSET_ERR_BITMAP_RANGE_SIZE; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index c17a7dda0163f..1bf6ab83644b3 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1708,9 +1708,10 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); ip_set_unlock(set); retried = true; - } while (ret == -EAGAIN && - set->variant->resize && - (ret = set->variant->resize(set, retried)) == 0); + } while (ret == -ERANGE || + (ret == -EAGAIN && + set->variant->resize && + (ret = set->variant->resize(set, retried)) == 0)); if (!ret || (ret == -IPSET_ERR_EXIST && eexist)) return 0; diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index 5d6d68eaf6a9c..8720dc3bb6891 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -97,11 +97,11 @@ static int hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ip4 *h = set->data; + struct hash_ip4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ip4_elem e = { 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0, hosts; + u32 ip = 0, ip_to = 0, hosts, i = 0; int ret = 0; if (tb[IPSET_ATTR_LINENO]) @@ -131,8 +131,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; - if (ip > ip_to) + if (ip > ip_to) { + if (ip_to == 0) + return -IPSET_ERR_HASH_ELEM; swap(ip, ip_to); + } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); @@ -143,18 +146,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); - if (retried) { + if (retried) ip = ntohl(h->next.ip); + for (; ip <= ip_to; i++) { e.ip = htonl(ip); - } - for (; ip <= ip_to;) { + if (i > IPSET_MAX_RANGE) { + hash_ip4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ip += hosts; - e.ip = htonl(ip); - if (e.ip == 0) + if (ip == 0) return 0; ret = 0; diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c index aba1df617d6e6..cbb05cb188f22 100644 --- a/net/netfilter/ipset/ip_set_hash_ipmark.c +++ b/net/netfilter/ipset/ip_set_hash_ipmark.c @@ -96,11 +96,11 @@ static int hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ipmark4 *h = set->data; + struct hash_ipmark4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipmark4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip, ip_to = 0; + u32 ip, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -120,6 +120,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK])); e.mark &= h->markmask; + if (e.mark == 0 && e.ip == 0) + return -IPSET_ERR_HASH_ELEM; if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) { @@ -132,8 +134,11 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; - if (ip > ip_to) + if (ip > ip_to) { + if (e.mark == 0 && ip_to == 0) + return -IPSET_ERR_HASH_ELEM; swap(ip, ip_to); + } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); @@ -144,8 +149,12 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], if (retried) ip = ntohl(h->next.ip); - for (; ip <= ip_to; ip++) { + for (; ip <= ip_to; ip++, i++) { e.ip = htonl(ip); + if (i > IPSET_MAX_RANGE) { + hash_ipmark4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 1ff228717e298..c560f7873ecaf 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -104,11 +104,11 @@ static int hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ipport4 *h = set->data; + struct hash_ipport4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipport4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip, ip_to = 0, p = 0, port, port_to; + u32 ip, ip_to = 0, p = 0, port, port_to, i = 0; bool with_ports = false; int ret; @@ -177,9 +177,13 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], for (; ip <= ip_to; ip++) { p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) : port; - for (; p <= port_to; p++) { + for (; p <= port_to; p++, i++) { e.ip = htonl(ip); e.port = htons(p); + if (i > IPSET_MAX_RANGE) { + hash_ipport4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index fa88afd812fa6..b7eb8d1e77d9c 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -107,11 +107,11 @@ static int hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ipportip4 *h = set->data; + struct hash_ipportip4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip, ip_to = 0, p = 0, port, port_to; + u32 ip, ip_to = 0, p = 0, port, port_to, i = 0; bool with_ports = false; int ret; @@ -184,9 +184,13 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], for (; ip <= ip_to; ip++) { p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) : port; - for (; p <= port_to; p++) { + for (; p <= port_to; p++, i++) { e.ip = htonl(ip); e.port = htons(p); + if (i > IPSET_MAX_RANGE) { + hash_ipportip4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index eef6ecfcb4099..16c5641ced53c 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -159,12 +159,12 @@ static int hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ipportnet4 *h = set->data; + struct hash_ipportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; - u32 ip2_from = 0, ip2_to = 0, ip2; + u32 ip2_from = 0, ip2_to = 0, ip2, i = 0; bool with_ports = false; u8 cidr; int ret; @@ -278,9 +278,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], for (; p <= port_to; p++) { e.port = htons(p); do { + i++; e.ip2 = htonl(ip2); ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr); e.cidr = cidr - 1; + if (i > IPSET_MAX_RANGE) { + hash_ipportnet4_data_next(&h->next, + &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 136cf0781d3a4..5ab5873d1d162 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -135,11 +135,11 @@ static int hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_net4 *h = set->data; + struct hash_net4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_net4_elem e = { .cidr = HOST_MASK }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0; + u32 ip = 0, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -187,10 +187,16 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } + if (retried) ip = ntohl(h->next.ip); do { + i++; e.ip = htonl(ip); + if (i > IPSET_MAX_RANGE) { + hash_net4_data_next(&h->next, &e); + return -ERANGE; + } ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index be5e95a0d8762..7ef240380a457 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -201,7 +201,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0; + u32 ip = 0, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -259,7 +259,12 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], if (retried) ip = ntohl(h->next.ip); do { + i++; e.ip = htonl(ip); + if (i > IPSET_MAX_RANGE) { + hash_netiface4_data_next(&h->next, &e); + return -ERANGE; + } ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ret = adtfn(set, &e, &ext, &ext, flags); diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index da4ef910b12d1..15f4b0292f0de 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c @@ -162,12 +162,12 @@ static int hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_netnet4 *h = set->data; + struct hash_netnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netnet4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0; - u32 ip2 = 0, ip2_from = 0, ip2_to = 0; + u32 ip2 = 0, ip2_from = 0, ip2_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -255,7 +255,12 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], e.ip[0] = htonl(ip); ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); do { + i++; e.ip[1] = htonl(ip2); + if (i > IPSET_MAX_RANGE) { + hash_netnet4_data_next(&h->next, &e); + return -ERANGE; + } ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 34448df80fb98..e73ba50afe96f 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -153,11 +153,11 @@ static int hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_netport4 *h = set->data; + struct hash_netport4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 port, port_to, p = 0, ip = 0, ip_to = 0; + u32 port, port_to, p = 0, ip = 0, ip_to = 0, i = 0; bool with_ports = false; u8 cidr; int ret; @@ -245,8 +245,12 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], e.ip = htonl(ip); ip = ip_set_range_to_cidr(ip, ip_to, &cidr); e.cidr = cidr - 1; - for (; p <= port_to; p++) { + for (; p <= port_to; p++, i++) { e.port = htons(p); + if (i > IPSET_MAX_RANGE) { + hash_netport4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index 934c1712cba85..144346faffc13 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -172,16 +172,26 @@ hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } +static u32 +hash_netportnet4_range_to_cidr(u32 from, u32 to, u8 *cidr) +{ + if (from == 0 && to == UINT_MAX) { + *cidr = 0; + return to; + } + return ip_set_range_to_cidr(from, to, cidr); +} + static int hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_netportnet4 *h = set->data; + struct hash_netportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netportnet4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; - u32 ip2_from = 0, ip2_to = 0, ip2; + u32 ip2_from = 0, ip2_to = 0, ip2, i = 0; bool with_ports = false; int ret; @@ -295,13 +305,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], do { e.ip[0] = htonl(ip); - ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); + ip = hash_netportnet4_range_to_cidr(ip, ip_to, &e.cidr[0]); for (; p <= port_to; p++) { e.port = htons(p); do { + i++; e.ip[1] = htonl(ip2); - ip2 = ip_set_range_to_cidr(ip2, ip2_to, - &e.cidr[1]); + if (i > IPSET_MAX_RANGE) { + hash_netportnet4_data_next(&h->next, + &e); + return -ERANGE; + } + ip2 = hash_netportnet4_range_to_cidr(ip2, + ip2_to, &e.cidr[1]); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index f9b16f2b22191..fdacbc3c15bef 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c @@ -599,13 +599,19 @@ static const struct seq_operations ip_vs_app_seq_ops = { int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs) { INIT_LIST_HEAD(&ipvs->app_list); - proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_seq_ops, - sizeof(struct seq_net_private)); +#ifdef CONFIG_PROC_FS + if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, + &ip_vs_app_seq_ops, + sizeof(struct seq_net_private))) + return -ENOMEM; +#endif return 0; } void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs) { unregister_ip_vs_app(ipvs, NULL /* all */); +#ifdef CONFIG_PROC_FS remove_proc_entry("ip_vs_app", ipvs->net->proc_net); +#endif } diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index fb67f1ca2495b..cb6d68220c265 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1265,8 +1265,8 @@ static inline int todrop_entry(struct ip_vs_conn *cp) * The drop rate array needs tuning for real environments. * Called from timer bh only => no locking */ - static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - static char todrop_counter[9] = {0}; + static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + static signed char todrop_counter[9] = {0}; int i; /* if the conn entry hasn't lasted for 60 seconds, don't drop it. @@ -1447,20 +1447,36 @@ int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs) { atomic_set(&ipvs->conn_count, 0); - proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net, - &ip_vs_conn_seq_ops, sizeof(struct ip_vs_iter_state)); - proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net, - &ip_vs_conn_sync_seq_ops, - sizeof(struct ip_vs_iter_state)); +#ifdef CONFIG_PROC_FS + if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net, + &ip_vs_conn_seq_ops, + sizeof(struct ip_vs_iter_state))) + goto err_conn; + + if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net, + &ip_vs_conn_sync_seq_ops, + sizeof(struct ip_vs_iter_state))) + goto err_conn_sync; +#endif + return 0; + +#ifdef CONFIG_PROC_FS +err_conn_sync: + remove_proc_entry("ip_vs_conn", ipvs->net->proc_net); +err_conn: + return -ENOMEM; +#endif } void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs) { /* flush all the connection entries first */ ip_vs_conn_flush(ipvs); +#ifdef CONFIG_PROC_FS remove_proc_entry("ip_vs_conn", ipvs->net->proc_net); remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net); +#endif } int __init ip_vs_conn_init(void) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 8369af0c50eab..193a18bfddc0a 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1598,7 +1598,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, } #ifdef CONFIG_NF_CONNTRACK_MARK - ct->mark = exp->master->mark; + ct->mark = READ_ONCE(exp->master->mark); #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK ct->secmark = exp->master->secmark; diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index e40988a2f22fb..65b5b05fe38d3 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -148,15 +148,37 @@ static int help(struct sk_buff *skb, unsigned int protoff, data = ib_ptr; data_limit = ib_ptr + skb->len - dataoff; - /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 - * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ - while (data < data_limit - (19 + MINMATCHLEN)) { - if (memcmp(data, "\1DCC ", 5)) { + /* Skip any whitespace */ + while (data < data_limit - 10) { + if (*data == ' ' || *data == '\r' || *data == '\n') + data++; + else + break; + } + + /* strlen("PRIVMSG x ")=10 */ + if (data < data_limit - 10) { + if (strncasecmp("PRIVMSG ", data, 8)) + goto out; + data += 8; + } + + /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26 + * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26 + */ + while (data < data_limit - (21 + MINMATCHLEN)) { + /* Find first " :", the start of message */ + if (memcmp(data, " :", 2)) { data++; continue; } + data += 2; + + /* then check that place only for the DCC command */ + if (memcmp(data, "\1DCC ", 5)) + goto out; data += 5; - /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ + /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */ iph = ip_hdr(skb); pr_debug("DCC found in master %pI4:%u %pI4:%u\n", @@ -172,7 +194,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, pr_debug("DCC %s detected\n", dccprotos[i]); /* we have at least - * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid + * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid * data left (== 14/13 bytes) */ if (parse_dcc(data, data_limit, &dcc_ip, &dcc_port, &addr_beg_p, &addr_end_p)) { @@ -185,8 +207,9 @@ static int help(struct sk_buff *skb, unsigned int protoff, /* dcc_ip can be the internal OR external (NAT'ed) IP */ tuple = &ct->tuplehash[dir].tuple; - if (tuple->src.u3.ip != dcc_ip && - tuple->dst.u3.ip != dcc_ip) { + if ((tuple->src.u3.ip != dcc_ip && + ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) || + dcc_port == 0) { net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", &tuple->src.u3.ip, &dcc_ip, dcc_port); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 9e6898164199b..2efdc50f978b0 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -319,7 +319,12 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) #ifdef CONFIG_NF_CONNTRACK_MARK static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) { - if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark))) + u32 mark = READ_ONCE(ct->mark); + + if (!mark) + return 0; + + if (nla_put_be32(skb, CTA_MARK, htonl(mark))) goto nla_put_failure; return 0; @@ -811,8 +816,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) } #ifdef CONFIG_NF_CONNTRACK_MARK - if ((events & (1 << IPCT_MARK) || ct->mark) - && ctnetlink_dump_mark(skb, ct) < 0) + if (events & (1 << IPCT_MARK) && + ctnetlink_dump_mark(skb, ct) < 0) goto nla_put_failure; #endif nlmsg_end(skb, nlh); @@ -1099,7 +1104,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data) } #ifdef CONFIG_NF_CONNTRACK_MARK - if ((ct->mark & filter->mark.mask) != filter->mark.val) + if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val) goto ignore_entry; #endif @@ -1979,9 +1984,9 @@ static void ctnetlink_change_mark(struct nf_conn *ct, mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK])); mark = ntohl(nla_get_be32(cda[CTA_MARK])); - newmark = (ct->mark & mask) ^ mark; - if (newmark != ct->mark) - ct->mark = newmark; + newmark = (READ_ONCE(ct->mark) & mask) ^ mark; + if (newmark != READ_ONCE(ct->mark)) + WRITE_ONCE(ct->mark, newmark); } #endif @@ -2726,7 +2731,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_MARK - if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0) + if (ctnetlink_dump_mark(skb, ct) < 0) goto nla_put_failure; #endif if (ctnetlink_dump_labels(skb, ct) < 0) diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c index facd8c64ec4eb..f1a87de1c60ef 100644 --- a/net/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/netfilter/nf_conntrack_proto_icmpv6.c @@ -130,6 +130,56 @@ static void icmpv6_error_log(const struct sk_buff *skb, IPPROTO_ICMPV6, "%s", msg); } +static noinline_for_stack int +nf_conntrack_icmpv6_redirect(struct nf_conn *tmpl, struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state) +{ + u8 hl = ipv6_hdr(skb)->hop_limit; + union nf_inet_addr outer_daddr; + union { + struct nd_opt_hdr nd_opt; + struct rd_msg rd_msg; + } tmp; + const struct nd_opt_hdr *nd_opt; + const struct rd_msg *rd_msg; + + rd_msg = skb_header_pointer(skb, dataoff, sizeof(*rd_msg), &tmp.rd_msg); + if (!rd_msg) { + icmpv6_error_log(skb, state, "short redirect"); + return -NF_ACCEPT; + } + + if (rd_msg->icmph.icmp6_code != 0) + return NF_ACCEPT; + + if (hl != 255 || !(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { + icmpv6_error_log(skb, state, "invalid saddr or hoplimit for redirect"); + return -NF_ACCEPT; + } + + dataoff += sizeof(*rd_msg); + + /* warning: rd_msg no longer usable after this call */ + nd_opt = skb_header_pointer(skb, dataoff, sizeof(*nd_opt), &tmp.nd_opt); + if (!nd_opt || nd_opt->nd_opt_len == 0) { + icmpv6_error_log(skb, state, "redirect without options"); + return -NF_ACCEPT; + } + + /* We could call ndisc_parse_options(), but it would need + * skb_linearize() and a bit more work. + */ + if (nd_opt->nd_opt_type != ND_OPT_REDIRECT_HDR) + return NF_ACCEPT; + + memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr, + sizeof(outer_daddr.ip6)); + dataoff += 8; + return nf_conntrack_inet_error(tmpl, skb, dataoff, state, + IPPROTO_ICMPV6, &outer_daddr); +} + int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, @@ -160,6 +210,9 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, return NF_ACCEPT; } + if (icmp6h->icmp6_type == NDISC_REDIRECT) + return nf_conntrack_icmpv6_redirect(tmpl, skb, dataoff, state); + /* is not error message ? */ if (icmp6h->icmp6_type >= 128) return NF_ACCEPT; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 7626f3e1c70a7..cec4b16170a0b 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -27,22 +27,16 @@ #include #include -/* FIXME: Examine ipfilter's timeouts and conntrack transitions more - closely. They're more complex. --RR - - And so for me for SCTP :D -Kiran */ - static const char *const sctp_conntrack_names[] = { - "NONE", - "CLOSED", - "COOKIE_WAIT", - "COOKIE_ECHOED", - "ESTABLISHED", - "SHUTDOWN_SENT", - "SHUTDOWN_RECD", - "SHUTDOWN_ACK_SENT", - "HEARTBEAT_SENT", - "HEARTBEAT_ACKED", + [SCTP_CONNTRACK_NONE] = "NONE", + [SCTP_CONNTRACK_CLOSED] = "CLOSED", + [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT", + [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED", + [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED", + [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT", + [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD", + [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT", + [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT", }; #define SECS * HZ @@ -54,12 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_CLOSED] = 10 SECS, [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, - [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS, + [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, - [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, }; #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 @@ -73,7 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT #define sHS SCTP_CONNTRACK_HEARTBEAT_SENT -#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED #define sIV SCTP_CONNTRACK_MAX /* @@ -96,9 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of the SHUTDOWN chunk. Connection is closed. HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow. -HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK in the direction opposite to - that of the HEARTBEAT chunk. Secondary connection is - established. */ /* TODO @@ -115,33 +104,33 @@ cookie echoed to closed. static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ -/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */ -/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA}, -/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA}, -/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, -/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS}, -/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA}, -/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/ -/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */ -/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */ -/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA}, -/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}, -/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA} +/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ +/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, +/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, +/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, +/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, +/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA}, +/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/ +/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */ +/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */ +/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL}, +/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, +/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, }, { /* REPLY */ -/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */ -/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */ -/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA}, -/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL}, -/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR}, -/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA}, -/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA}, -/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */ -/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA}, -/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA}, -/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}, -/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA} +/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ +/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */ +/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV}, +/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV}, +/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV}, +/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV}, +/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV}, +/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */ +/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV}, +/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV}, +/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, +/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES}, } }; @@ -412,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { /* Special cases of Verification tag check (Sec 8.5.1) */ if (sch->type == SCTP_CID_INIT) { - /* Sec 8.5.1 (A) */ + /* (A) vtag MUST be zero */ if (sh->vtag != 0) goto out_unlock; } else if (sch->type == SCTP_CID_ABORT) { - /* Sec 8.5.1 (B) */ - if (sh->vtag != ct->proto.sctp.vtag[dir] && - sh->vtag != ct->proto.sctp.vtag[!dir]) + /* (B) vtag MUST match own vtag if T flag is unset OR + * MUST match peer's vtag if T flag is set + */ + if ((!(sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[dir]) || + ((sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[!dir])) goto out_unlock; } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { - /* Sec 8.5.1 (C) */ - if (sh->vtag != ct->proto.sctp.vtag[dir] && - sh->vtag != ct->proto.sctp.vtag[!dir] && - sch->flags & SCTP_CHUNK_FLAG_T) + /* (C) vtag MUST match own vtag if T flag is unset OR + * MUST match peer's vtag if T flag is set + */ + if ((!(sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[dir]) || + ((sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[!dir])) goto out_unlock; } else if (sch->type == SCTP_CID_COOKIE_ECHO) { - /* Sec 8.5.1 (D) */ + /* (D) vtag must be same as init_vtag as found in INIT_ACK */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; } else if (sch->type == SCTP_CID_HEARTBEAT) { @@ -501,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, } ct->proto.sctp.state = new_state; - if (old_state != new_state) + if (old_state != new_state) { nf_conntrack_event_cache(IPCT_PROTOINFO, ct); + if (new_state == SCTP_CONNTRACK_ESTABLISHED && + !test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) + nf_conntrack_event_cache(IPCT_ASSURED, ct); + } } spin_unlock_bh(&ct->lock); @@ -516,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); - if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && - dir == IP_CT_DIR_REPLY && - new_state == SCTP_CONNTRACK_ESTABLISHED) { - pr_debug("Setting assured bit\n"); - set_bit(IPS_ASSURED_BIT, &ct->status); - nf_conntrack_event_cache(IPCT_ASSURED, ct); - } - return NF_ACCEPT; out_unlock: diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 3f785bdfa942d..c1d02c0b4f005 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -1158,6 +1158,16 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, nf_ct_kill_acct(ct, ctinfo, skb); return NF_ACCEPT; } + + if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) { + /* do not renew timeout on SYN retransmit. + * + * Else port reuse by client or NAT middlebox can keep + * entry alive indefinitely (including nat info). + */ + return NF_ACCEPT; + } + /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection * pickup with loose=1. Avoid large ESTABLISHED timeout. */ diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index b83dc9bf0a5dd..78fd9122b70c7 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, return ret; if (ret == 0) break; - dataoff += *matchoff; + dataoff = *matchoff; } *in_header = 0; } @@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, break; if (ret == 0) return ret; - dataoff += *matchoff; + dataoff = *matchoff; } if (in_header) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 313d1c8ff066a..e12b52019a550 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -360,7 +360,7 @@ static int ct_seq_show(struct seq_file *s, void *v) goto release; #if defined(CONFIG_NF_CONNTRACK_MARK) - seq_printf(s, "mark=%u ", ct->mark); + seq_printf(s, "mark=%u ", READ_ONCE(ct->mark)); #endif ct_show_secctx(s, ct); @@ -583,7 +583,6 @@ enum nf_ct_sysctl_index { NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT, - NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED, #endif #ifdef CONFIG_NF_CT_PROTO_DCCP NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST, @@ -853,12 +852,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { - .procname = "nf_conntrack_sctp_timeout_heartbeat_acked", - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, #endif #ifdef CONFIG_NF_CT_PROTO_DCCP [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = { @@ -987,7 +980,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net, XASSIGN(SHUTDOWN_RECD, sn); XASSIGN(SHUTDOWN_ACK_SENT, sn); XASSIGN(HEARTBEAT_SENT, sn); - XASSIGN(HEARTBEAT_ACKED, sn); #undef XASSIGN #endif } diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index d1862782be450..746ca77d0aad6 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -305,12 +305,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, const __be32 *addr, const __be32 *mask) { struct flow_action_entry *entry; - int i, j; + int i; - for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) { + for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) { entry = flow_action_entry_next(flow_rule); flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6, - offset + i, &addr[j], mask); + offset + i * sizeof(u32), &addr[i], mask); } } @@ -910,6 +910,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, struct flow_block_cb *block_cb, *next; int err = 0; + down_write(&flowtable->flow_block_lock); switch (cmd) { case FLOW_BLOCK_BIND: list_splice(&bo->cb_list, &flowtable->flow_block.cb_list); @@ -924,6 +925,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, WARN_ON_ONCE(1); err = -EOPNOTSUPP; } + up_write(&flowtable->flow_block_lock); return err; } @@ -980,7 +982,9 @@ static int nf_flow_table_offload_cmd(struct flow_block_offload *bo, nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, extack); + down_write(&flowtable->flow_block_lock); err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo); + up_write(&flowtable->flow_block_lock); if (err < 0) return err; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1b039476e4d6a..2143edafba772 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1971,8 +1971,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, chain->flags |= NFT_CHAIN_BASE | flags; basechain->policy = NF_ACCEPT; if (chain->flags & NFT_CHAIN_HW_OFFLOAD && - !nft_chain_offload_support(basechain)) + !nft_chain_offload_support(basechain)) { + list_splice_init(&basechain->hook_list, &hook->list); return -EOPNOTSUPP; + } flow_block_init(&basechain->flow_block); @@ -1999,7 +2001,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, u8 policy, u32 flags) { const struct nlattr * const *nla = ctx->nla; - struct nft_stats __percpu *stats = NULL; struct nft_table *table = ctx->table; struct nft_base_chain *basechain; struct net *net = ctx->net; @@ -2013,6 +2014,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, return -EOVERFLOW; if (nla[NFTA_CHAIN_HOOK]) { + struct nft_stats __percpu *stats = NULL; struct nft_chain_hook hook; if (flags & NFT_CHAIN_BINDING) @@ -2043,8 +2045,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (err < 0) { nft_chain_release_hook(&hook); kfree(basechain); + free_percpu(stats); return err; } + if (stats) + static_branch_inc(&nft_counters_enabled); } else { if (flags & NFT_CHAIN_BASE) return -EINVAL; @@ -2119,9 +2124,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, goto err_unregister_hook; } - if (stats) - static_branch_inc(&nft_counters_enabled); - table->use++; return 0; @@ -7525,9 +7527,6 @@ static void nft_commit_release(struct nft_trans *trans) nf_tables_chain_destroy(&trans->ctx); break; case NFT_MSG_DELRULE: - if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) - nft_flow_rule_destroy(nft_trans_flow_rule(trans)); - nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); break; case NFT_MSG_DELSET: @@ -7971,6 +7970,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans), NFT_TRANS_COMMIT); + + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_NEWSET: nft_clear(net, nft_trans_set(trans)); diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 79fbf37291f38..51e3953b414c0 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb, struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; struct tcphdr _tcph; + bool found = false; memset(&ctx, 0, sizeof(ctx)); @@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb, data->genre = f->genre; data->version = f->version; + found = true; break; } - return true; + return found; } EXPORT_SYMBOL_GPL(nf_osf_find); diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 781118465d466..14093d86e6823 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -97,7 +97,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, return; #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: - *dest = ct->mark; + *dest = READ_ONCE(ct->mark); return; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK @@ -294,8 +294,8 @@ static void nft_ct_set_eval(const struct nft_expr *expr, switch (priv->key) { #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: - if (ct->mark != value) { - ct->mark = value; + if (READ_ONCE(ct->mark) != value) { + WRITE_ONCE(ct->mark, value); nf_conntrack_event_cache(IPCT_MARK, ct); } break; diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 551e0d6cf63d4..74c220eeec1a8 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -62,7 +62,7 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) return false; if (offset + len > VLAN_ETH_HLEN + vlan_hlen) - ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen; + ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen; memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen); diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 949da87dbb063..30cf0673d6c19 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1162,6 +1162,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, struct nft_pipapo_match *m = priv->clone; u8 genmask = nft_genmask_next(net); struct nft_pipapo_field *f; + const u8 *start_p, *end_p; int i, bsize_max, err = 0; if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) @@ -1202,9 +1203,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, } /* Validate */ + start_p = start; + end_p = end; nft_pipapo_for_each_field(f, i, m) { - const u8 *start_p = start, *end_p = end; - if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX) return -ENOSPC; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 94a5446c5eae8..4b9a499fe8f4d 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe) return !nft_rbtree_interval_end(rbe); } -static bool nft_rbtree_equal(const struct nft_set *set, const void *this, - const struct nft_rbtree_elem *interval) +static int nft_rbtree_cmp(const struct nft_set *set, + const struct nft_rbtree_elem *e1, + const struct nft_rbtree_elem *e2) { - return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0; + return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext), + set->klen); } static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set, @@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set const struct nft_rbtree_elem *rbe, *interval = NULL; u8 genmask = nft_genmask_cur(net); const struct rb_node *parent; - const void *this; int d; parent = rcu_dereference_raw(priv->root.rb_node); @@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set rbe = rb_entry(parent, struct nft_rbtree_elem, node); - this = nft_set_ext_key(&rbe->ext); - d = memcmp(this, key, set->klen); + d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen); if (d < 0) { parent = rcu_dereference_raw(parent->rb_left); if (interval && - nft_rbtree_equal(set, this, interval) && + !nft_rbtree_cmp(set, rbe, interval) && nft_rbtree_interval_end(rbe) && nft_rbtree_interval_start(interval)) continue; @@ -214,154 +214,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set, return rbe; } +static int nft_rbtree_gc_elem(const struct nft_set *__set, + struct nft_rbtree *priv, + struct nft_rbtree_elem *rbe) +{ + struct nft_set *set = (struct nft_set *)__set; + struct rb_node *prev = rb_prev(&rbe->node); + struct nft_rbtree_elem *rbe_prev; + struct nft_set_gc_batch *gcb; + + gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); + if (!gcb) + return -ENOMEM; + + /* search for expired end interval coming before this element. */ + do { + rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); + if (nft_rbtree_interval_end(rbe_prev)) + break; + + prev = rb_prev(prev); + } while (prev != NULL); + + rb_erase(&rbe_prev->node, &priv->root); + rb_erase(&rbe->node, &priv->root); + atomic_sub(2, &set->nelems); + + nft_set_gc_batch_add(gcb, rbe); + nft_set_gc_batch_complete(gcb); + + return 0; +} + +static bool nft_rbtree_update_first(const struct nft_set *set, + struct nft_rbtree_elem *rbe, + struct rb_node *first) +{ + struct nft_rbtree_elem *first_elem; + + first_elem = rb_entry(first, struct nft_rbtree_elem, node); + /* this element is closest to where the new element is to be inserted: + * update the first element for the node list path. + */ + if (nft_rbtree_cmp(set, rbe, first_elem) < 0) + return true; + + return false; +} + static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_rbtree_elem *new, struct nft_set_ext **ext) { - bool overlap = false, dup_end_left = false, dup_end_right = false; + struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL; + struct rb_node *node, *parent, **p, *first = NULL; struct nft_rbtree *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); - struct nft_rbtree_elem *rbe; - struct rb_node *parent, **p; - int d; + int d, err; - /* Detect overlaps as we descend the tree. Set the flag in these cases: - * - * a1. _ _ __>| ?_ _ __| (insert end before existing end) - * a2. _ _ ___| ?_ _ _>| (insert end after existing end) - * a3. _ _ ___? >|_ _ __| (insert start before existing end) - * - * and clear it later on, as we eventually reach the points indicated by - * '?' above, in the cases described below. We'll always meet these - * later, locally, due to tree ordering, and overlaps for the intervals - * that are the closest together are always evaluated last. - * - * b1. _ _ __>| !_ _ __| (insert end before existing start) - * b2. _ _ ___| !_ _ _>| (insert end after existing start) - * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf) - * '--' no nodes falling in this range - * b4. >|_ _ ! (insert start before existing start) - * - * Case a3. resolves to b3.: - * - if the inserted start element is the leftmost, because the '0' - * element in the tree serves as end element - * - otherwise, if an existing end is found immediately to the left. If - * there are existing nodes in between, we need to further descend the - * tree before we can conclude the new start isn't causing an overlap - * - * or to b4., which, preceded by a3., means we already traversed one or - * more existing intervals entirely, from the right. - * - * For a new, rightmost pair of elements, we'll hit cases b3. and b2., - * in that order. - * - * The flag is also cleared in two special cases: - * - * b5. |__ _ _!|<_ _ _ (insert start right before existing end) - * b6. |__ _ >|!__ _ _ (insert end right after existing start) - * - * which always happen as last step and imply that no further - * overlapping is possible. - * - * Another special case comes from the fact that start elements matching - * an already existing start element are allowed: insertion is not - * performed but we return -EEXIST in that case, and the error will be - * cleared by the caller if NLM_F_EXCL is not present in the request. - * This way, request for insertion of an exact overlap isn't reported as - * error to userspace if not desired. - * - * However, if the existing start matches a pre-existing start, but the - * end element doesn't match the corresponding pre-existing end element, - * we need to report a partial overlap. This is a local condition that - * can be noticed without need for a tracking flag, by checking for a - * local duplicated end for a corresponding start, from left and right, - * separately. + /* Descend the tree to search for an existing element greater than the + * key value to insert that is greater than the new element. This is the + * first element to walk the ordered elements to find possible overlap. */ - parent = NULL; p = &priv->root.rb_node; while (*p != NULL) { parent = *p; rbe = rb_entry(parent, struct nft_rbtree_elem, node); - d = memcmp(nft_set_ext_key(&rbe->ext), - nft_set_ext_key(&new->ext), - set->klen); + d = nft_rbtree_cmp(set, rbe, new); + if (d < 0) { p = &parent->rb_left; - - if (nft_rbtree_interval_start(new)) { - if (nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext) && !*p) - overlap = false; - } else { - if (dup_end_left && !*p) - return -ENOTEMPTY; - - overlap = nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, - genmask) && - !nft_set_elem_expired(&rbe->ext); - - if (overlap) { - dup_end_right = true; - continue; - } - } } else if (d > 0) { - p = &parent->rb_right; + if (!first || + nft_rbtree_update_first(set, rbe, first)) + first = &rbe->node; - if (nft_rbtree_interval_end(new)) { - if (dup_end_right && !*p) - return -ENOTEMPTY; - - overlap = nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, - genmask) && - !nft_set_elem_expired(&rbe->ext); - - if (overlap) { - dup_end_left = true; - continue; - } - } else if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) { - overlap = nft_rbtree_interval_end(rbe); - } + p = &parent->rb_right; } else { - if (nft_rbtree_interval_end(rbe) && - nft_rbtree_interval_start(new)) { + if (nft_rbtree_interval_end(rbe)) p = &parent->rb_left; - - if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) - overlap = false; - } else if (nft_rbtree_interval_start(rbe) && - nft_rbtree_interval_end(new)) { + else p = &parent->rb_right; + } + } + + if (!first) + first = rb_first(&priv->root); + + /* Detect overlap by going through the list of valid tree nodes. + * Values stored in the tree are in reversed order, starting from + * highest to lowest value. + */ + for (node = first; node != NULL; node = rb_next(node)) { + rbe = rb_entry(node, struct nft_rbtree_elem, node); - if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) - overlap = false; - } else if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) { - *ext = &rbe->ext; - return -EEXIST; - } else { - overlap = false; - if (nft_rbtree_interval_end(rbe)) - p = &parent->rb_left; - else - p = &parent->rb_right; + if (!nft_set_elem_active(&rbe->ext, genmask)) + continue; + + /* perform garbage collection to avoid bogus overlap reports. */ + if (nft_set_elem_expired(&rbe->ext)) { + err = nft_rbtree_gc_elem(set, priv, rbe); + if (err < 0) + return err; + + continue; + } + + d = nft_rbtree_cmp(set, rbe, new); + if (d == 0) { + /* Matching end element: no need to look for an + * overlapping greater or equal element. + */ + if (nft_rbtree_interval_end(rbe)) { + rbe_le = rbe; + break; + } + + /* first element that is greater or equal to key value. */ + if (!rbe_ge) { + rbe_ge = rbe; + continue; + } + + /* this is a closer more or equal element, update it. */ + if (nft_rbtree_cmp(set, rbe_ge, new) != 0) { + rbe_ge = rbe; + continue; + } + + /* element is equal to key value, make sure flags are + * the same, an existing more or equal start element + * must not be replaced by more or equal end element. + */ + if ((nft_rbtree_interval_start(new) && + nft_rbtree_interval_start(rbe_ge)) || + (nft_rbtree_interval_end(new) && + nft_rbtree_interval_end(rbe_ge))) { + rbe_ge = rbe; + continue; } + } else if (d > 0) { + /* annotate element greater than the new element. */ + rbe_ge = rbe; + continue; + } else if (d < 0) { + /* annotate element less than the new element. */ + rbe_le = rbe; + break; } + } - dup_end_left = dup_end_right = false; + /* - new start element matching existing start element: full overlap + * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. + */ + if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) && + nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) { + *ext = &rbe_ge->ext; + return -EEXIST; } - if (overlap) + /* - new end element matching existing end element: full overlap + * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. + */ + if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) && + nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) { + *ext = &rbe_le->ext; + return -EEXIST; + } + + /* - new start element with existing closest, less or equal key value + * being a start element: partial overlap, reported as -ENOTEMPTY. + * Anonymous sets allow for two consecutive start element since they + * are constant, skip them to avoid bogus overlap reports. + */ + if (!nft_set_is_anonymous(set) && rbe_le && + nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new)) + return -ENOTEMPTY; + + /* - new end element with existing closest, less or equal key value + * being a end element: partial overlap, reported as -ENOTEMPTY. + */ + if (rbe_le && + nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new)) return -ENOTEMPTY; + /* - new end element with existing closest, greater or equal key value + * being an end element: partial overlap, reported as -ENOTEMPTY + */ + if (rbe_ge && + nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new)) + return -ENOTEMPTY; + + /* Accepted element: pick insertion point depending on key value */ + parent = NULL; + p = &priv->root.rb_node; + while (*p != NULL) { + parent = *p; + rbe = rb_entry(parent, struct nft_rbtree_elem, node); + d = nft_rbtree_cmp(set, rbe, new); + + if (d < 0) + p = &parent->rb_left; + else if (d > 0) + p = &parent->rb_right; + else if (nft_rbtree_interval_end(rbe)) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + rb_link_node_rcu(&new->node, parent, p); rb_insert_color(&new->node, &priv->root); return 0; @@ -500,23 +562,37 @@ static void nft_rbtree_gc(struct work_struct *work) struct nft_rbtree *priv; struct rb_node *node; struct nft_set *set; + struct net *net; + u8 genmask; priv = container_of(work, struct nft_rbtree, gc_work.work); set = nft_set_container_of(priv); + net = read_pnet(&set->net); + genmask = nft_genmask_cur(net); write_lock_bh(&priv->lock); write_seqcount_begin(&priv->count); for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { rbe = rb_entry(node, struct nft_rbtree_elem, node); + if (!nft_set_elem_active(&rbe->ext, genmask)) + continue; + + /* elements are reversed in the rbtree for historical reasons, + * from highest to lowest value, that is why end element is + * always visited before the start element. + */ if (nft_rbtree_interval_end(rbe)) { rbe_end = rbe; continue; } if (!nft_set_elem_expired(&rbe->ext)) continue; - if (nft_set_elem_mark_busy(&rbe->ext)) + + if (nft_set_elem_mark_busy(&rbe->ext)) { + rbe_end = NULL; continue; + } if (rbe_prev) { rb_erase(&rbe_prev->node, &priv->root); diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index 37c728bdad41c..c49d318f8e6ed 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -289,6 +289,13 @@ static int nft_tproxy_dump(struct sk_buff *skb, return 0; } +static int nft_tproxy_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING); +} + static struct nft_expr_type nft_tproxy_type; static const struct nft_expr_ops nft_tproxy_ops = { .type = &nft_tproxy_type, @@ -296,6 +303,7 @@ static const struct nft_expr_ops nft_tproxy_ops = { .eval = nft_tproxy_eval, .init = nft_tproxy_init, .dump = nft_tproxy_dump, + .validate = nft_tproxy_validate, }; static struct nft_expr_type nft_tproxy_type __read_mostly = { diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index e5ebc0810675a..ad3c033db64e7 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -30,6 +30,7 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) u_int32_t new_targetmark; struct nf_conn *ct; u_int32_t newmark; + u_int32_t oldmark; ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) @@ -37,14 +38,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) switch (info->mode) { case XT_CONNMARK_SET: - newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; + oldmark = READ_ONCE(ct->mark); + newmark = (oldmark & ~info->ctmask) ^ info->ctmark; if (info->shift_dir == D_SHIFT_RIGHT) newmark >>= info->shift_bits; else newmark <<= info->shift_bits; - if (ct->mark != newmark) { - ct->mark = newmark; + if (READ_ONCE(ct->mark) != newmark) { + WRITE_ONCE(ct->mark, newmark); nf_conntrack_event_cache(IPCT_MARK, ct); } break; @@ -55,15 +57,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) else new_targetmark <<= info->shift_bits; - newmark = (ct->mark & ~info->ctmask) ^ + newmark = (READ_ONCE(ct->mark) & ~info->ctmask) ^ new_targetmark; - if (ct->mark != newmark) { - ct->mark = newmark; + if (READ_ONCE(ct->mark) != newmark) { + WRITE_ONCE(ct->mark, newmark); nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_RESTORE: - new_targetmark = (ct->mark & info->ctmask); + new_targetmark = (READ_ONCE(ct->mark) & info->ctmask); if (info->shift_dir == D_SHIFT_RIGHT) new_targetmark >>= info->shift_bits; else @@ -126,7 +128,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) if (ct == NULL) return false; - return ((ct->mark & info->mask) == info->mark) ^ info->invert; + return ((READ_ONCE(ct->mark) & info->mask) == info->mark) ^ info->invert; } static int connmark_mt_check(const struct xt_mtchk_param *par) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d96a610929d9a..2104fbdd63d29 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -570,7 +570,9 @@ static int netlink_insert(struct sock *sk, u32 portid) if (nlk_sk(sk)->bound) goto err; - nlk_sk(sk)->portid = portid; + /* portid can be read locklessly from netlink_getname(). */ + WRITE_ONCE(nlk_sk(sk)->portid, portid); + sock_hold(sk); err = __netlink_insert(table, sk); @@ -1079,9 +1081,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, return -EINVAL; if (addr->sa_family == AF_UNSPEC) { - sk->sk_state = NETLINK_UNCONNECTED; - nlk->dst_portid = 0; - nlk->dst_group = 0; + /* paired with READ_ONCE() in netlink_getsockbyportid() */ + WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED); + /* dst_portid and dst_group can be read locklessly */ + WRITE_ONCE(nlk->dst_portid, 0); + WRITE_ONCE(nlk->dst_group, 0); return 0; } if (addr->sa_family != AF_NETLINK) @@ -1102,9 +1106,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, err = netlink_autobind(sock); if (err == 0) { - sk->sk_state = NETLINK_CONNECTED; - nlk->dst_portid = nladdr->nl_pid; - nlk->dst_group = ffs(nladdr->nl_groups); + /* paired with READ_ONCE() in netlink_getsockbyportid() */ + WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED); + /* dst_portid and dst_group can be read locklessly */ + WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid); + WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups)); } return err; @@ -1121,10 +1127,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, nladdr->nl_pad = 0; if (peer) { - nladdr->nl_pid = nlk->dst_portid; - nladdr->nl_groups = netlink_group_mask(nlk->dst_group); + /* Paired with WRITE_ONCE() in netlink_connect() */ + nladdr->nl_pid = READ_ONCE(nlk->dst_portid); + nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group)); } else { - nladdr->nl_pid = nlk->portid; + /* Paired with WRITE_ONCE() in netlink_insert() */ + nladdr->nl_pid = READ_ONCE(nlk->portid); netlink_lock_table(); nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; netlink_unlock_table(); @@ -1151,8 +1159,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) /* Don't bother queuing skb if kernel socket has no input function */ nlk = nlk_sk(sock); - if (sock->sk_state == NETLINK_CONNECTED && - nlk->dst_portid != nlk_sk(ssk)->portid) { + /* dst_portid and sk_state can be changed in netlink_connect() */ + if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED && + READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) { sock_put(sock); return ERR_PTR(-ECONNREFUSED); } @@ -1888,8 +1897,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) goto out; netlink_skb_flags |= NETLINK_SKB_DST; } else { - dst_portid = nlk->dst_portid; - dst_group = nlk->dst_group; + /* Paired with WRITE_ONCE() in netlink_connect() */ + dst_portid = READ_ONCE(nlk->dst_portid); + dst_group = READ_ONCE(nlk->dst_group); } /* Paired with WRITE_ONCE() in netlink_insert() */ diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index e5c8a295e6406..5c04da4cfbad0 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -400,6 +400,11 @@ static int nr_listen(struct socket *sock, int backlog) struct sock *sk = sock->sk; lock_sock(sk); + if (sock->state != SS_UNCONNECTED) { + release_sock(sk); + return -EINVAL; + } + if (sk->sk_state != TCP_LISTEN) { memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index a8da88db7893f..4e7c968cde2dc 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t) is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { + sock_hold(sk); bh_unlock_sock(sk); nr_destroy_socket(sk); goto out; diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index cc997518f79d1..edadebb3efd2a 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -159,6 +159,7 @@ static void local_cleanup(struct nfc_llcp_local *local) cancel_work_sync(&local->rx_work); cancel_work_sync(&local->timeout_work); kfree_skb(local->rx_pending); + local->rx_pending = NULL; del_timer_sync(&local->sdreq_timer); cancel_work_sync(&local->sdreq_timeout_work); nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs); diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 2cfff70f70e06..ed9019d807c78 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -530,7 +530,7 @@ static int nci_open_device(struct nci_dev *ndev) skb_queue_purge(&ndev->tx_q); ndev->ops->close(ndev); - ndev->flags = 0; + ndev->flags &= BIT(NCI_UNREG); } done: diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index b002e18f38c81..b4548d8874899 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c @@ -279,8 +279,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_plen(skb->data)); conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data)); - if (!conn_info) + if (!conn_info) { + kfree_skb(skb); return; + } /* strip the nci data header */ skb_pull(skb, NCI_DATA_HDR_SIZE); diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 33e1170817f0f..f8b20cddd5c96 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -218,6 +218,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev, target->sens_res = nfca_poll->sens_res; target->sel_res = nfca_poll->sel_res; target->nfcid1_len = nfca_poll->nfcid1_len; + if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1)) + return -EPROTO; if (target->nfcid1_len > 0) { memcpy(target->nfcid1, nfca_poll->nfcid1, target->nfcid1_len); @@ -226,6 +228,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev, nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params; target->sensb_res_len = nfcb_poll->sensb_res_len; + if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res)) + return -EPROTO; if (target->sensb_res_len > 0) { memcpy(target->sensb_res, nfcb_poll->sensb_res, target->sensb_res_len); @@ -234,6 +238,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev, nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params; target->sensf_res_len = nfcf_poll->sensf_res_len; + if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res)) + return -EPROTO; if (target->sensf_res_len > 0) { memcpy(target->sensf_res, nfcf_poll->sensf_res, target->sensf_res_len); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index b8939ebaa6d3e..610caea4feec8 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1497,6 +1497,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) u32 dev_idx, se_idx; u8 *apdu; size_t apdu_len; + int rc; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX] || @@ -1510,25 +1511,37 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) if (!dev) return -ENODEV; - if (!dev->ops || !dev->ops->se_io) - return -ENOTSUPP; + if (!dev->ops || !dev->ops->se_io) { + rc = -EOPNOTSUPP; + goto put_dev; + } apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]); - if (apdu_len == 0) - return -EINVAL; + if (apdu_len == 0) { + rc = -EINVAL; + goto put_dev; + } apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]); - if (!apdu) - return -EINVAL; + if (!apdu) { + rc = -EINVAL; + goto put_dev; + } ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + if (!ctx) { + rc = -ENOMEM; + goto put_dev; + } ctx->dev_idx = dev_idx; ctx->se_idx = se_idx; - return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); + rc = nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); + +put_dev: + nfc_put_device(dev); + return rc; } static int nfc_genl_vendor_cmd(struct sk_buff *skb, @@ -1551,14 +1564,21 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb, subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]); dev = nfc_get_device(dev_idx); - if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds) + if (!dev) return -ENODEV; + if (!dev->vendor_cmds || !dev->n_vendor_cmds) { + err = -ENODEV; + goto put_dev; + } + if (info->attrs[NFC_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]); data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]); - if (data_len == 0) - return -EINVAL; + if (data_len == 0) { + err = -EINVAL; + goto put_dev; + } } else { data = NULL; data_len = 0; @@ -1573,10 +1593,14 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb, dev->cur_cmd_info = info; err = cmd->doit(dev, data, data_len); dev->cur_cmd_info = NULL; - return err; + goto put_dev; } - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + +put_dev: + nfc_put_device(dev); + return err; } /* message building helper */ diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 41f248895a871..0f0f380e81a40 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -150,7 +150,7 @@ static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo) static u32 ovs_ct_get_mark(const struct nf_conn *ct) { #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - return ct ? ct->mark : 0; + return ct ? READ_ONCE(ct->mark) : 0; #else return 0; #endif @@ -336,9 +336,9 @@ static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key, #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) u32 new_mark; - new_mark = ct_mark | (ct->mark & ~(mask)); - if (ct->mark != new_mark) { - ct->mark = new_mark; + new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask)); + if (READ_ONCE(ct->mark) != new_mark) { + WRITE_ONCE(ct->mark, new_mark); if (nf_ct_is_confirmed(ct)) nf_conntrack_event_cache(IPCT_MARK, ct); key->ct.mark = new_mark; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 9d6ef6cb9b263..b625ab5e9a430 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -241,10 +241,17 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) upcall.portid = ovs_vport_find_upcall_portid(p, skb); upcall.mru = OVS_CB(skb)->mru; error = ovs_dp_upcall(dp, skb, key, &upcall, 0); - if (unlikely(error)) - kfree_skb(skb); - else + switch (error) { + case 0: + case -EAGAIN: + case -ERESTARTSYS: + case -EINTR: consume_skb(skb); + break; + default: + kfree_skb(skb); + break; + } stats_counter = &stats->n_missed; goto out; } @@ -537,8 +544,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, out: if (err) skb_tx_error(skb); - kfree_skb(user_skb); - kfree_skb(nskb); + consume_skb(user_skb); + consume_skb(nskb); + return err; } @@ -925,6 +933,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) struct sw_flow_mask mask; struct sk_buff *reply; struct datapath *dp; + struct sw_flow_key *key; struct sw_flow_actions *acts; struct sw_flow_match match; u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); @@ -952,30 +961,32 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) } /* Extract key. */ - ovs_match_init(&match, &new_flow->key, false, &mask); + key = kzalloc(sizeof(*key), GFP_KERNEL); + if (!key) { + error = -ENOMEM; + goto err_kfree_flow; + } + + ovs_match_init(&match, key, false, &mask); error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK], log); if (error) - goto err_kfree_flow; + goto err_kfree_key; + + ovs_flow_mask_key(&new_flow->key, key, true, &mask); /* Extract flow identifier. */ error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], - &new_flow->key, log); + key, log); if (error) - goto err_kfree_flow; - - /* unmasked key is needed to match when ufid is not used. */ - if (ovs_identifier_is_key(&new_flow->id)) - match.key = new_flow->id.unmasked_key; - - ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask); + goto err_kfree_key; /* Validate actions. */ error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, &acts, log); if (error) { OVS_NLERR(log, "Flow actions may not be safe on all matching packets."); - goto err_kfree_flow; + goto err_kfree_key; } reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false, @@ -996,7 +1007,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) if (ovs_identifier_is_ufid(&new_flow->id)) flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id); if (!flow) - flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key); + flow = ovs_flow_tbl_lookup(&dp->table, key); if (likely(!flow)) { rcu_assign_pointer(new_flow->sf_acts, acts); @@ -1066,6 +1077,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) if (reply) ovs_notify(&dp_flow_genl_family, reply, info); + + kfree(key); return 0; err_unlock_ovs: @@ -1073,6 +1086,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) kfree_skb(reply); err_kfree_acts: ovs_nla_free_flow_actions(acts); +err_kfree_key: + kfree(key); err_kfree_flow: ovs_flow_free(new_flow, false); error: @@ -1584,7 +1599,8 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, if (IS_ERR(dp)) return; - WARN(dp->user_features, "Dropping previously announced user features\n"); + pr_warn("%s: Dropping previously announced user features\n", + ovs_dp_name(dp)); dp->user_features = 0; } diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index e594b4d6b58a9..0cf3dda5319fe 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -450,7 +450,7 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) err = attach_meter(meter_tbl, meter); if (err) - goto exit_unlock; + goto exit_free_old_meter; ovs_unlock(); @@ -473,6 +473,8 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) genlmsg_end(reply, ovs_reply_header); return genlmsg_reply(reply, info); +exit_free_old_meter: + ovs_meter_free(old_meter); exit_unlock: ovs_unlock(); nlmsg_free(reply); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index b70b06e312bd0..3716797c55643 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1885,12 +1885,22 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) { + int depth; + if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && sock->type == SOCK_RAW) { skb_reset_mac_header(skb); skb->protocol = dev_parse_header_protocol(skb); } + /* Move network header to the right position for VLAN tagged packets */ + if (likely(skb->dev->type == ARPHRD_ETHER) && + eth_type_vlan(skb->protocol) && + __vlan_get_protocol(skb, skb->protocol, &depth) != 0) { + if (pskb_may_pull(skb, depth)) + skb_set_network_header(skb, depth); + } + skb_probe_transport_header(skb); } @@ -2243,8 +2253,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && - (skb->ip_summed == CHECKSUM_COMPLETE || - skb_csum_unnecessary(skb))) + skb_csum_unnecessary(skb)) status |= TP_STATUS_CSUM_VALID; if (snaplen > res) @@ -3006,6 +3015,11 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->mark = sockc.mark; skb->tstamp = sockc.transmit_time; + if (unlikely(extra_len == 4)) + skb->no_fcs = 1; + + packet_parse_headers(skb, sock); + if (has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); if (err) @@ -3014,11 +3028,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) virtio_net_hdr_set_proto(skb, &vnet_hdr); } - packet_parse_headers(skb, sock); - - if (unlikely(extra_len == 4)) - skb->no_fcs = 1; - err = po->xmit(skb); if (unlikely(err != 0)) { if (err > 0) @@ -3480,8 +3489,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && - (skb->ip_summed == CHECKSUM_COMPLETE || - skb_csum_unnecessary(skb))) + skb_csum_unnecessary(skb)) aux.tp_status |= TP_STATUS_CSUM_VALID; aux.tp_len = origlen; diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c index e760d4a38fafd..fe81e03851686 100644 --- a/net/qrtr/ns.c +++ b/net/qrtr/ns.c @@ -83,7 +83,10 @@ static struct qrtr_node *node_get(unsigned int node_id) node->id = node_id; - radix_tree_insert(&nodes, node_id, node); + if (radix_tree_insert(&nodes, node_id, node)) { + kfree(node); + return NULL; + } return node; } diff --git a/net/rds/message.c b/net/rds/message.c index 799034e0f513d..b363ef13c75ef 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -104,9 +104,9 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs, spin_lock_irqsave(&q->lock, flags); head = &q->zcookie_head; if (!list_empty(head)) { - info = list_entry(head, struct rds_msg_zcopy_info, - rs_zcookie_next); - if (info && rds_zcookie_add(info, cookie)) { + info = list_first_entry(head, struct rds_msg_zcopy_info, + rs_zcookie_next); + if (rds_zcookie_add(info, cookie)) { spin_unlock_irqrestore(&q->lock, flags); kfree(rds_info_from_znotifier(znotif)); /* caller invokes rds_wake_sk_sleep() */ diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 5327d130c4b56..b560d06e6d96d 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -166,10 +166,10 @@ void rds_tcp_reset_callbacks(struct socket *sock, */ atomic_set(&cp->cp_state, RDS_CONN_RESETTING); wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags)); - lock_sock(osock->sk); /* reset receive side state for rds_tcp_data_recv() for osock */ cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); + lock_sock(osock->sk); if (tc->t_tinc) { rds_inc_put(&tc->t_tinc->ti_inc); tc->t_tinc = NULL; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 29a208ed8fb88..86c93cf1744b0 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -487,6 +487,12 @@ static int rose_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; + lock_sock(sk); + if (sock->state != SS_UNCONNECTED) { + release_sock(sk); + return -EINVAL; + } + if (sk->sk_state != TCP_LISTEN) { struct rose_sock *rose = rose_sk(sk); @@ -496,8 +502,10 @@ static int rose_listen(struct socket *sock, int backlog) memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; + release_sock(sk); return 0; } + release_sock(sk); return -EOPNOTSUPP; } diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index f6102e6f51617..730d2205f1976 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c @@ -236,6 +236,9 @@ void rose_transmit_clear_request(struct rose_neigh *neigh, unsigned int lci, uns unsigned char *dptr; int len; + if (!neigh->dev) + return; + len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3; if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 41671af6b33f9..0354f90dc93aa 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -351,7 +351,7 @@ static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall, */ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) { - _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); + _enter("%d{%d}", call->debug_id, refcount_read(&call->ref)); mutex_lock(&call->user_mutex); rxrpc_release_call(rxrpc_sk(sock->sk), call); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index ccb65412b6704..d86894a1c35d3 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -14,14 +14,6 @@ #include #include "protocol.h" -#if 0 -#define CHECK_SLAB_OKAY(X) \ - BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \ - (POISON_FREE << 8 | POISON_FREE)) -#else -#define CHECK_SLAB_OKAY(X) do {} while (0) -#endif - #define FCRYPT_BSIZE 8 struct rxrpc_crypt { union { @@ -86,7 +78,7 @@ struct rxrpc_net { struct work_struct client_conn_reaper; struct timer_list client_conn_reap_timer; - struct list_head local_endpoints; + struct hlist_head local_endpoints; struct mutex local_mutex; /* Lock for ->local_endpoints */ DECLARE_HASHTABLE (peer_hash, 10); @@ -264,9 +256,9 @@ struct rxrpc_security { struct rxrpc_local { struct rcu_head rcu; atomic_t active_users; /* Number of users of the local endpoint */ - atomic_t usage; /* Number of references to the structure */ + refcount_t ref; /* Number of references to the structure */ struct rxrpc_net *rxnet; /* The network ns in which this resides */ - struct list_head link; + struct hlist_node link; struct socket *socket; /* my UDP socket */ struct work_struct processor; struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */ @@ -289,7 +281,7 @@ struct rxrpc_local { */ struct rxrpc_peer { struct rcu_head rcu; /* This must be first */ - atomic_t usage; + refcount_t ref; unsigned long hash_key; struct hlist_node hash_link; struct rxrpc_local *local; @@ -391,7 +383,8 @@ enum rxrpc_conn_proto_state { */ struct rxrpc_bundle { struct rxrpc_conn_parameters params; - atomic_t usage; + refcount_t ref; + atomic_t active; /* Number of active users */ unsigned int debug_id; bool try_upgrade; /* True if the bundle is attempting upgrade */ bool alloc_conn; /* True if someone's getting a conn */ @@ -412,7 +405,7 @@ struct rxrpc_connection { struct rxrpc_conn_proto proto; struct rxrpc_conn_parameters params; - atomic_t usage; + refcount_t ref; struct rcu_head rcu; struct list_head cache_link; @@ -592,7 +585,7 @@ struct rxrpc_call { int error; /* Local error incurred */ enum rxrpc_call_state state; /* current state of call */ enum rxrpc_call_completion completion; /* Call completion condition */ - atomic_t usage; + refcount_t ref; u16 service_id; /* service ID */ u8 security_ix; /* Security type */ enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */ @@ -1001,6 +994,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *); extern const struct seq_operations rxrpc_call_seq_ops; extern const struct seq_operations rxrpc_connection_seq_ops; extern const struct seq_operations rxrpc_peer_seq_ops; +extern const struct seq_operations rxrpc_local_seq_ops; /* * recvmsg.c diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index a0b033954ceac..2a14d69b171f3 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -91,7 +91,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, (head + 1) & (size - 1)); trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service, - atomic_read(&conn->usage), here); + refcount_read(&conn->ref), here); } /* Now it gets complicated, because calls get registered with the @@ -104,7 +104,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, call->state = RXRPC_CALL_SERVER_PREALLOC; trace_rxrpc_call(call->debug_id, rxrpc_call_new_service, - atomic_read(&call->usage), + refcount_read(&call->ref), here, (const void *)user_call_ID); write_lock(&rx->call_lock); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index f8ecad2b730e8..2a93e7b5fbd05 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -166,7 +166,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); now = ktime_get_real(); - max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j)); + max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j)); spin_lock_bh(&call->lock); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 150cd7b2154c8..10dad2834d5b6 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -112,7 +112,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, found_extant_call: rxrpc_get_call(call, rxrpc_call_got); read_unlock(&rx->call_lock); - _leave(" = %p [%d]", call, atomic_read(&call->usage)); + _leave(" = %p [%d]", call, refcount_read(&call->ref)); return call; } @@ -160,7 +160,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, spin_lock_init(&call->notify_lock); spin_lock_init(&call->input_lock); rwlock_init(&call->state_lock); - atomic_set(&call->usage, 1); + refcount_set(&call->ref, 1); call->debug_id = debug_id; call->tx_total_len = -1; call->next_rx_timo = 20 * HZ; @@ -301,7 +301,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, call->interruptibility = p->interruptibility; call->tx_total_len = p->tx_total_len; trace_rxrpc_call(call->debug_id, rxrpc_call_new_client, - atomic_read(&call->usage), + refcount_read(&call->ref), here, (const void *)p->user_call_ID); if (p->kernel) __set_bit(RXRPC_CALL_KERNEL, &call->flags); @@ -354,7 +354,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, goto error_attached_to_socket; trace_rxrpc_call(call->debug_id, rxrpc_call_connected, - atomic_read(&call->usage), here, NULL); + refcount_read(&call->ref), here, NULL); rxrpc_start_call_timer(call); @@ -374,7 +374,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(-EEXIST)); + refcount_read(&call->ref), here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); @@ -388,7 +388,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ error_attached_to_socket: trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(ret)); + refcount_read(&call->ref), here, ERR_PTR(ret)); set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, RX_CALL_DEAD, ret); @@ -444,8 +444,9 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx, bool rxrpc_queue_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); - int n = atomic_fetch_add_unless(&call->usage, 1, 0); - if (n == 0) + int n; + + if (!__refcount_inc_not_zero(&call->ref, &n)) return false; if (rxrpc_queue_work(&call->processor)) trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1, @@ -461,7 +462,7 @@ bool rxrpc_queue_call(struct rxrpc_call *call) bool __rxrpc_queue_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); - int n = atomic_read(&call->usage); + int n = refcount_read(&call->ref); ASSERTCMP(n, >=, 1); if (rxrpc_queue_work(&call->processor)) trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n, @@ -478,7 +479,7 @@ void rxrpc_see_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); if (call) { - int n = atomic_read(&call->usage); + int n = refcount_read(&call->ref); trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n, here, NULL); @@ -488,11 +489,11 @@ void rxrpc_see_call(struct rxrpc_call *call) bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) { const void *here = __builtin_return_address(0); - int n = atomic_fetch_add_unless(&call->usage, 1, 0); + int n; - if (n == 0) + if (!__refcount_inc_not_zero(&call->ref, &n)) return false; - trace_rxrpc_call(call->debug_id, op, n, here, NULL); + trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL); return true; } @@ -502,9 +503,10 @@ bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) { const void *here = __builtin_return_address(0); - int n = atomic_inc_return(&call->usage); + int n; - trace_rxrpc_call(call->debug_id, op, n, here, NULL); + __refcount_inc(&call->ref, &n); + trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL); } /* @@ -529,10 +531,10 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) struct rxrpc_connection *conn = call->conn; bool put = false; - _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); + _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref)); trace_rxrpc_call(call->debug_id, rxrpc_call_release, - atomic_read(&call->usage), + refcount_read(&call->ref), here, (const void *)call->flags); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); @@ -621,14 +623,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) struct rxrpc_net *rxnet = call->rxnet; const void *here = __builtin_return_address(0); unsigned int debug_id = call->debug_id; + bool dead; int n; ASSERT(call != NULL); - n = atomic_dec_return(&call->usage); + dead = __refcount_dec_and_test(&call->ref, &n); trace_rxrpc_call(debug_id, op, n, here, NULL); - ASSERTCMP(n, >=, 0); - if (n == 0) { + if (dead) { _debug("call %d dead", call->debug_id); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); @@ -718,7 +720,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) list_del_init(&call->link); pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", - call, atomic_read(&call->usage), + call, refcount_read(&call->ref), rxrpc_call_states[call->state], call->flags, call->events); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index f5fb223aba82a..f5fa5f3083bdd 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -40,6 +40,8 @@ __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; DEFINE_IDR(rxrpc_client_conn_ids); static DEFINE_SPINLOCK(rxrpc_conn_id_lock); +static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); + /* * Get a connection ID and epoch for a client connection from the global pool. * The connection struct pointer is then recorded in the idr radix tree. The @@ -102,7 +104,7 @@ void rxrpc_destroy_client_conn_ids(void) if (!idr_is_empty(&rxrpc_client_conn_ids)) { idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", - conn, atomic_read(&conn->usage)); + conn, refcount_read(&conn->ref)); } BUG(); } @@ -122,7 +124,8 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp, if (bundle) { bundle->params = *cp; rxrpc_get_peer(bundle->params.peer); - atomic_set(&bundle->usage, 1); + refcount_set(&bundle->ref, 1); + atomic_set(&bundle->active, 1); spin_lock_init(&bundle->channel_lock); INIT_LIST_HEAD(&bundle->waiting_calls); } @@ -131,7 +134,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp, struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle) { - atomic_inc(&bundle->usage); + refcount_inc(&bundle->ref); return bundle; } @@ -144,10 +147,13 @@ static void rxrpc_free_bundle(struct rxrpc_bundle *bundle) void rxrpc_put_bundle(struct rxrpc_bundle *bundle) { unsigned int d = bundle->debug_id; - unsigned int u = atomic_dec_return(&bundle->usage); + bool dead; + int r; + + dead = __refcount_dec_and_test(&bundle->ref, &r); - _debug("PUT B=%x %u", d, u); - if (u == 0) + _debug("PUT B=%x %d", d, r - 1); + if (dead) rxrpc_free_bundle(bundle); } @@ -169,7 +175,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) return ERR_PTR(-ENOMEM); } - atomic_set(&conn->usage, 1); + refcount_set(&conn->ref, 1); conn->bundle = bundle; conn->params = bundle->params; conn->out_clientflag = RXRPC_CLIENT_INITIATED; @@ -199,7 +205,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) key_get(conn->params.key); trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client, - atomic_read(&conn->usage), + refcount_read(&conn->ref), __builtin_return_address(0)); atomic_inc(&rxnet->nr_client_conns); @@ -341,6 +347,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c rxrpc_free_bundle(candidate); found_bundle: rxrpc_get_bundle(bundle); + atomic_inc(&bundle->active); spin_unlock(&local->client_bundles_lock); _leave(" = %u [found]", bundle->debug_id); return bundle; @@ -438,6 +445,7 @@ static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp) if (old) trace_rxrpc_client(old, -1, rxrpc_client_replace); candidate->bundle_shift = shift; + atomic_inc(&bundle->active); bundle->conns[i] = candidate; for (j = 0; j < RXRPC_MAXCALLS; j++) set_bit(shift + j, &bundle->avail_chans); @@ -728,6 +736,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, smp_rmb(); out_put_bundle: + rxrpc_deactivate_bundle(bundle); rxrpc_put_bundle(bundle); out: _leave(" = %d", ret); @@ -903,9 +912,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) { struct rxrpc_bundle *bundle = conn->bundle; - struct rxrpc_local *local = bundle->params.local; unsigned int bindex; - bool need_drop = false, need_put = false; + bool need_drop = false; int i; _enter("C=%x", conn->debug_id); @@ -924,15 +932,22 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) } spin_unlock(&bundle->channel_lock); - /* If there are no more connections, remove the bundle */ - if (!bundle->avail_chans) { - _debug("maybe unbundle"); - spin_lock(&local->client_bundles_lock); + if (need_drop) { + rxrpc_deactivate_bundle(bundle); + rxrpc_put_connection(conn); + } +} - for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) - if (bundle->conns[i]) - break; - if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) { +/* + * Drop the active count on a bundle. + */ +static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle) +{ + struct rxrpc_local *local = bundle->params.local; + bool need_put = false; + + if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) { + if (!bundle->params.exclusive) { _debug("erase bundle"); rb_erase(&bundle->local_node, &local->client_bundles); need_put = true; @@ -942,10 +957,6 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) if (need_put) rxrpc_put_bundle(bundle); } - - if (need_drop) - rxrpc_put_connection(conn); - _leave(""); } /* @@ -972,14 +983,13 @@ void rxrpc_put_client_conn(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); unsigned int debug_id = conn->debug_id; - int n; + bool dead; + int r; - n = atomic_dec_return(&conn->usage); - trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here); - if (n <= 0) { - ASSERTCMP(n, >=, 0); + dead = __refcount_dec_and_test(&conn->ref, &r); + trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, r - 1, here); + if (dead) rxrpc_kill_client_conn(conn); - } } /* diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 3ef05a0e90ad0..d829b97550cc2 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -105,7 +105,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, goto not_found; *_peer = peer; conn = rxrpc_find_service_conn_rcu(peer, skb); - if (!conn || atomic_read(&conn->usage) == 0) + if (!conn || refcount_read(&conn->ref) == 0) goto not_found; _leave(" = %p", conn); return conn; @@ -115,7 +115,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, */ conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT); - if (!conn || atomic_read(&conn->usage) == 0) { + if (!conn || refcount_read(&conn->ref) == 0) { _debug("no conn"); goto not_found; } @@ -264,11 +264,12 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn) bool rxrpc_queue_conn(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); - int n = atomic_fetch_add_unless(&conn->usage, 1, 0); - if (n == 0) + int r; + + if (!__refcount_inc_not_zero(&conn->ref, &r)) return false; if (rxrpc_queue_work(&conn->processor)) - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here); + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, r + 1, here); else rxrpc_put_connection(conn); return true; @@ -281,7 +282,7 @@ void rxrpc_see_connection(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); if (conn) { - int n = atomic_read(&conn->usage); + int n = refcount_read(&conn->ref); trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here); } @@ -293,9 +294,10 @@ void rxrpc_see_connection(struct rxrpc_connection *conn) struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); - int n = atomic_inc_return(&conn->usage); + int r; - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here); + __refcount_inc(&conn->ref, &r); + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r, here); return conn; } @@ -306,11 +308,11 @@ struct rxrpc_connection * rxrpc_get_connection_maybe(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); + int r; if (conn) { - int n = atomic_fetch_add_unless(&conn->usage, 1, 0); - if (n > 0) - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here); + if (__refcount_inc_not_zero(&conn->ref, &r)) + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r + 1, here); else conn = NULL; } @@ -334,12 +336,11 @@ void rxrpc_put_service_conn(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); unsigned int debug_id = conn->debug_id; - int n; + int r; - n = atomic_dec_return(&conn->usage); - trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here); - ASSERTCMP(n, >=, 0); - if (n == 1) + __refcount_dec(&conn->ref, &r); + trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, r - 1, here); + if (r - 1 == 1) rxrpc_set_service_reap_timer(conn->params.local->rxnet, jiffies + rxrpc_connection_expiry); } @@ -352,9 +353,9 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu) struct rxrpc_connection *conn = container_of(rcu, struct rxrpc_connection, rcu); - _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); + _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref)); - ASSERTCMP(atomic_read(&conn->usage), ==, 0); + ASSERTCMP(refcount_read(&conn->ref), ==, 0); _net("DESTROY CONN %d", conn->debug_id); @@ -394,8 +395,8 @@ void rxrpc_service_connection_reaper(struct work_struct *work) write_lock(&rxnet->conn_lock); list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { - ASSERTCMP(atomic_read(&conn->usage), >, 0); - if (likely(atomic_read(&conn->usage) > 1)) + ASSERTCMP(refcount_read(&conn->ref), >, 0); + if (likely(refcount_read(&conn->ref) > 1)) continue; if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) continue; @@ -407,7 +408,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ; _debug("reap CONN %d { u=%d,t=%ld }", - conn->debug_id, atomic_read(&conn->usage), + conn->debug_id, refcount_read(&conn->ref), (long)expire_at - (long)now); if (time_before(now, expire_at)) { @@ -420,7 +421,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) /* The usage count sits at 1 whilst the object is unused on the * list; we reduce that to 0 to make the object unavailable. */ - if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) + if (!refcount_dec_if_one(&conn->ref)) continue; trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL); @@ -444,7 +445,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) link); list_del_init(&conn->link); - ASSERTCMP(atomic_read(&conn->usage), ==, 0); + ASSERTCMP(refcount_read(&conn->ref), ==, 0); rxrpc_kill_connection(conn); } @@ -472,7 +473,7 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) write_lock(&rxnet->conn_lock); list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { pr_err("AF_RXRPC: Leaked conn %p {%d}\n", - conn, atomic_read(&conn->usage)); + conn, refcount_read(&conn->ref)); leak = true; } write_unlock(&rxnet->conn_lock); diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c index 6c847720494fa..68508166bbc0b 100644 --- a/net/rxrpc/conn_service.c +++ b/net/rxrpc/conn_service.c @@ -9,7 +9,7 @@ #include "ar-internal.h" static struct rxrpc_bundle rxrpc_service_dummy_bundle = { - .usage = ATOMIC_INIT(1), + .ref = REFCOUNT_INIT(1), .debug_id = UINT_MAX, .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock), }; @@ -99,7 +99,7 @@ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer, return; found_extant_conn: - if (atomic_read(&cursor->usage) == 0) + if (refcount_read(&cursor->ref) == 0) goto replace_old_connection; write_sequnlock_bh(&peer->service_conn_lock); /* We should not be able to get here. rxrpc_incoming_connection() is @@ -132,7 +132,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn * the rxrpc_connections list. */ conn->state = RXRPC_CONN_SERVICE_PREALLOC; - atomic_set(&conn->usage, 2); + refcount_set(&conn->ref, 2); conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle); atomic_inc(&rxnet->nr_conns); @@ -142,7 +142,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn write_unlock(&rxnet->conn_lock); trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service, - atomic_read(&conn->usage), + refcount_read(&conn->ref), __builtin_return_address(0)); } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 1145cb14d86f8..e9178115a7449 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -1163,8 +1163,6 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, */ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) { - CHECK_SLAB_OKAY(&local->usage); - if (rxrpc_get_local_maybe(local)) { skb_queue_tail(&local->reject_queue, skb); rxrpc_queue_local(local); @@ -1422,7 +1420,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) } } - if (!call || atomic_read(&call->usage) == 0) { + if (!call || refcount_read(&call->ref) == 0) { if (rxrpc_to_client(sp) || sp->hdr.type != RXRPC_PACKET_TYPE_DATA) goto bad_message; diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 8c2881054266d..2c66ee981f395 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -78,10 +78,10 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); if (local) { - atomic_set(&local->usage, 1); + refcount_set(&local->ref, 1); atomic_set(&local->active_users, 1); local->rxnet = rxnet; - INIT_LIST_HEAD(&local->link); + INIT_HLIST_NODE(&local->link); INIT_WORK(&local->processor, rxrpc_local_processor); init_rwsem(&local->defrag_sem); skb_queue_head_init(&local->reject_queue); @@ -199,7 +199,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, { struct rxrpc_local *local; struct rxrpc_net *rxnet = rxrpc_net(net); - struct list_head *cursor; + struct hlist_node *cursor; const char *age; long diff; int ret; @@ -209,16 +209,12 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, mutex_lock(&rxnet->local_mutex); - for (cursor = rxnet->local_endpoints.next; - cursor != &rxnet->local_endpoints; - cursor = cursor->next) { - local = list_entry(cursor, struct rxrpc_local, link); + hlist_for_each(cursor, &rxnet->local_endpoints) { + local = hlist_entry(cursor, struct rxrpc_local, link); diff = rxrpc_local_cmp_key(local, srx); - if (diff < 0) + if (diff != 0) continue; - if (diff > 0) - break; /* Services aren't allowed to share transport sockets, so * reject that here. It is possible that the object is dying - @@ -230,9 +226,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, goto addr_in_use; } - /* Found a match. We replace a dying object. Attempting to - * bind the transport socket may still fail if we're attempting - * to use a local address that the dying object is still using. + /* Found a match. We want to replace a dying object. + * Attempting to bind the transport socket may still fail if + * we're attempting to use a local address that the dying + * object is still using. */ if (!rxrpc_use_local(local)) break; @@ -249,10 +246,12 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, if (ret < 0) goto sock_error; - if (cursor != &rxnet->local_endpoints) - list_replace_init(cursor, &local->link); - else - list_add_tail(&local->link, cursor); + if (cursor) { + hlist_replace_rcu(cursor, &local->link); + cursor->pprev = NULL; + } else { + hlist_add_head_rcu(&local->link, &rxnet->local_endpoints); + } age = "new"; found: @@ -285,10 +284,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); - int n; + int r; - n = atomic_inc_return(&local->usage); - trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); + __refcount_inc(&local->ref, &r); + trace_rxrpc_local(local->debug_id, rxrpc_local_got, r + 1, here); return local; } @@ -298,12 +297,12 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); + int r; if (local) { - int n = atomic_fetch_add_unless(&local->usage, 1, 0); - if (n > 0) + if (__refcount_inc_not_zero(&local->ref, &r)) trace_rxrpc_local(local->debug_id, rxrpc_local_got, - n + 1, here); + r + 1, here); else local = NULL; } @@ -317,10 +316,10 @@ void rxrpc_queue_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); unsigned int debug_id = local->debug_id; - int n = atomic_read(&local->usage); + int r = refcount_read(&local->ref); if (rxrpc_queue_work(&local->processor)) - trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); + trace_rxrpc_local(debug_id, rxrpc_local_queued, r + 1, here); else rxrpc_put_local(local); } @@ -332,15 +331,16 @@ void rxrpc_put_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); unsigned int debug_id; - int n; + bool dead; + int r; if (local) { debug_id = local->debug_id; - n = atomic_dec_return(&local->usage); - trace_rxrpc_local(debug_id, rxrpc_local_put, n, here); + dead = __refcount_dec_and_test(&local->ref, &r); + trace_rxrpc_local(debug_id, rxrpc_local_put, r, here); - if (n == 0) + if (dead) call_rcu(&local->rcu, rxrpc_local_rcu); } } @@ -393,7 +393,7 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) local->dead = true; mutex_lock(&rxnet->local_mutex); - list_del_init(&local->link); + hlist_del_init_rcu(&local->link); mutex_unlock(&rxnet->local_mutex); rxrpc_clean_up_local_conns(local); @@ -424,8 +424,11 @@ static void rxrpc_local_processor(struct work_struct *work) container_of(work, struct rxrpc_local, processor); bool again; + if (local->dead) + return; + trace_rxrpc_local(local->debug_id, rxrpc_local_processing, - atomic_read(&local->usage), NULL); + refcount_read(&local->ref), NULL); do { again = false; @@ -477,11 +480,11 @@ void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet) flush_workqueue(rxrpc_workqueue); - if (!list_empty(&rxnet->local_endpoints)) { + if (!hlist_empty(&rxnet->local_endpoints)) { mutex_lock(&rxnet->local_mutex); - list_for_each_entry(local, &rxnet->local_endpoints, link) { + hlist_for_each_entry(local, &rxnet->local_endpoints, link) { pr_err("AF_RXRPC: Leaked local %p {%d}\n", - local, atomic_read(&local->usage)); + local, refcount_read(&local->ref)); } mutex_unlock(&rxnet->local_mutex); BUG(); diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c index cc7e30733feb0..34f389975a7d0 100644 --- a/net/rxrpc/net_ns.c +++ b/net/rxrpc/net_ns.c @@ -72,7 +72,7 @@ static __net_init int rxrpc_init_net(struct net *net) timer_setup(&rxnet->client_conn_reap_timer, rxrpc_client_conn_reap_timeout, 0); - INIT_LIST_HEAD(&rxnet->local_endpoints); + INIT_HLIST_HEAD(&rxnet->local_endpoints); mutex_init(&rxnet->local_mutex); hash_init(rxnet->peer_hash); @@ -98,6 +98,9 @@ static __net_init int rxrpc_init_net(struct net *net) proc_create_net("peers", 0444, rxnet->proc_net, &rxrpc_peer_seq_ops, sizeof(struct seq_net_private)); + proc_create_net("locals", 0444, rxnet->proc_net, + &rxrpc_local_seq_ops, + sizeof(struct seq_net_private)); return 0; err_proc: diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 9683617db7049..08c117bc083ec 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -93,7 +93,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, *_hard_ack = hard_ack; *_top = top; - pkt->ack.bufferSpace = htons(8); + pkt->ack.bufferSpace = htons(0); pkt->ack.maxSkew = htons(0); pkt->ack.firstPacket = htonl(hard_ack + 1); pkt->ack.previousPacket = htonl(call->ackr_highest_seq); diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 0298fe2ad6d32..26d2ae9baaf2c 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -121,7 +121,7 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu( hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 && - atomic_read(&peer->usage) > 0) + refcount_read(&peer->ref) > 0) return peer; } @@ -140,7 +140,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local, peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); if (peer) { _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport); - _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); + _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); } return peer; } @@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) peer = kzalloc(sizeof(struct rxrpc_peer), gfp); if (peer) { - atomic_set(&peer->usage, 1); + refcount_set(&peer->ref, 1); peer->local = rxrpc_get_local(local); INIT_HLIST_HEAD(&peer->error_targets); peer->service_conns = RB_ROOT; @@ -378,7 +378,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport); - _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); + _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); return peer; } @@ -388,10 +388,10 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) { const void *here = __builtin_return_address(0); - int n; + int r; - n = atomic_inc_return(&peer->usage); - trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here); + __refcount_inc(&peer->ref, &r); + trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here); return peer; } @@ -401,11 +401,11 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) { const void *here = __builtin_return_address(0); + int r; if (peer) { - int n = atomic_fetch_add_unless(&peer->usage, 1, 0); - if (n > 0) - trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here); + if (__refcount_inc_not_zero(&peer->ref, &r)) + trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here); else peer = NULL; } @@ -436,13 +436,14 @@ void rxrpc_put_peer(struct rxrpc_peer *peer) { const void *here = __builtin_return_address(0); unsigned int debug_id; - int n; + bool dead; + int r; if (peer) { debug_id = peer->debug_id; - n = atomic_dec_return(&peer->usage); - trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); - if (n == 0) + dead = __refcount_dec_and_test(&peer->ref, &r); + trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here); + if (dead) __rxrpc_put_peer(peer); } } @@ -455,11 +456,12 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer) { const void *here = __builtin_return_address(0); unsigned int debug_id = peer->debug_id; - int n; + bool dead; + int r; - n = atomic_dec_return(&peer->usage); - trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); - if (n == 0) { + dead = __refcount_dec_and_test(&peer->ref, &r); + trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here); + if (dead) { hash_del_rcu(&peer->hash_link); list_del_init(&peer->keepalive_link); rxrpc_free_peer(peer); @@ -481,7 +483,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet) hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) { pr_err("Leaked peer %u {%u} %pISp\n", peer->debug_id, - atomic_read(&peer->usage), + refcount_read(&peer->ref), &peer->srx.transport); } } diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index e2f990754f882..8967201fd8e54 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -107,7 +107,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) call->cid, call->call_id, rxrpc_is_service_call(call) ? "Svc" : "Clt", - atomic_read(&call->usage), + refcount_read(&call->ref), rxrpc_call_states[call->state], call->abort_code, call->debug_id, @@ -189,7 +189,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) conn->service_id, conn->proto.cid, rxrpc_conn_is_service(conn) ? "Svc" : "Clt", - atomic_read(&conn->usage), + refcount_read(&conn->ref), rxrpc_conn_states[conn->state], key_serial(conn->params.key), atomic_read(&conn->serial), @@ -239,7 +239,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) " %3u %5u %6llus %8u %8u\n", lbuff, rbuff, - atomic_read(&peer->usage), + refcount_read(&peer->ref), peer->cong_cwnd, peer->mtu, now - peer->last_tx_at, @@ -334,3 +334,72 @@ const struct seq_operations rxrpc_peer_seq_ops = { .stop = rxrpc_peer_seq_stop, .show = rxrpc_peer_seq_show, }; + +/* + * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals + */ +static int rxrpc_local_seq_show(struct seq_file *seq, void *v) +{ + struct rxrpc_local *local; + char lbuff[50]; + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, + "Proto Local " + " Use Act\n"); + return 0; + } + + local = hlist_entry(v, struct rxrpc_local, link); + + sprintf(lbuff, "%pISpc", &local->srx.transport); + + seq_printf(seq, + "UDP %-47.47s %3u %3u\n", + lbuff, + refcount_read(&local->ref), + atomic_read(&local->active_users)); + + return 0; +} + +static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos) + __acquires(rcu) +{ + struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); + unsigned int n; + + rcu_read_lock(); + + if (*_pos >= UINT_MAX) + return NULL; + + n = *_pos; + if (n == 0) + return SEQ_START_TOKEN; + + return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1); +} + +static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos) +{ + struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); + + if (*_pos >= UINT_MAX) + return NULL; + + return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos); +} + +static void rxrpc_local_seq_stop(struct seq_file *seq, void *v) + __releases(rcu) +{ + rcu_read_unlock(); +} + +const struct seq_operations rxrpc_local_seq_ops = { + .start = rxrpc_local_seq_start, + .next = rxrpc_local_seq_next, + .stop = rxrpc_local_seq_stop, + .show = rxrpc_local_seq_show, +}; diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index f114dc2af5cf3..5345e8eefd33c 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -451,7 +451,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, * directly into the target buffer. */ sg = _sg; - nsg = skb_shinfo(skb)->nr_frags; + nsg = skb_shinfo(skb)->nr_frags + 1; if (nsg <= 4) { nsg = 4; } else { diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index eef3c14fd1c18..a670553159abe 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -733,7 +733,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (call->tx_total_len != -1 || call->tx_pending || call->tx_top != 0) - goto error_put; + goto out_put_unlock; call->tx_total_len = p.call.tx_total_len; } } diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 0348d2bf6f7d8..580a5acffee71 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -71,7 +71,6 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) const void *here = __builtin_return_address(0); if (skb) { int n; - CHECK_SLAB_OKAY(&skb->users); n = atomic_dec_return(select_skb_count(skb)); trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, rxrpc_skb(skb)->rx_flags, here); diff --git a/net/sched/Kconfig b/net/sched/Kconfig index d762e89ab74f7..bc4e5da76fa6f 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -976,7 +976,7 @@ config NET_ACT_TUNNEL_KEY config NET_ACT_CT tristate "connection tracking tc action" - depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT && NF_FLOW_TABLE + depends on NET_CLS_ACT && NF_CONNTRACK && (!NF_NAT || NF_NAT) && NF_FLOW_TABLE help Say Y here to allow sending the packets to conntrack module. diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index a4c7ba35a3438..78f1cd70c8d19 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -307,7 +307,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, act, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, act, - &act_bpf_ops, bind, true, 0); + &act_bpf_ops, bind, true, flags); if (ret < 0) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index e19885d7fe2cb..b6576a250e851 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -62,7 +62,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, c = nf_ct_get(skb, &ctinfo); if (c) { - skb->mark = c->mark; + skb->mark = READ_ONCE(c->mark); /* using overlimits stats to count how many packets marked */ ca->tcf_qstats.overlimits++; goto out; @@ -82,7 +82,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, c = nf_ct_tuplehash_to_ctrack(thash); /* using overlimits stats to count how many packets marked */ ca->tcf_qstats.overlimits++; - skb->mark = c->mark; + skb->mark = READ_ONCE(c->mark); nf_ct_put(c); out: @@ -124,7 +124,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, a, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, a, - &act_connmark_ops, bind, false, 0); + &act_connmark_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 825b3e9b55f7e..2d41d866de3e3 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -177,7 +177,7 @@ static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct, entry = tcf_ct_flow_table_flow_action_get_next(action); entry->id = FLOW_ACTION_CT_METADATA; #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - entry->ct_metadata.mark = ct->mark; + entry->ct_metadata.mark = READ_ONCE(ct->mark); #endif ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED : IP_CT_ESTABLISHED_REPLY; @@ -843,9 +843,9 @@ static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask) if (!mask) return; - new_mark = mark | (ct->mark & ~(mask)); - if (ct->mark != new_mark) { - ct->mark = new_mark; + new_mark = mark | (READ_ONCE(ct->mark) & ~(mask)); + if (READ_ONCE(ct->mark) != new_mark) { + WRITE_ONCE(ct->mark, new_mark); if (nf_ct_is_confirmed(ct)) nf_conntrack_event_cache(IPCT_MARK, ct); } @@ -1293,7 +1293,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla, err = tcf_ct_flow_table_get(params); if (err) - goto cleanup; + goto cleanup_params; spin_lock_bh(&c->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); @@ -1308,6 +1308,9 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla, return res; +cleanup_params: + if (params->tmpl) + nf_ct_put(params->tmpl); cleanup: if (goto_ch) tcf_chain_put_by_act(goto_ch); diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index b20c8ce59905b..5aa005835c066 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -33,7 +33,7 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca, { u8 dscp, newdscp; - newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) & + newdscp = (((READ_ONCE(ct->mark) & cp->dscpmask) >> cp->dscpmaskshift) << 2) & ~INET_ECN_MASK; switch (proto) { @@ -73,7 +73,7 @@ static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca, struct sk_buff *skb) { ca->stats_cpmark_set++; - skb->mark = ct->mark & cp->cpmarkmask; + skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask; } static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, @@ -92,7 +92,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, cp = rcu_dereference_bh(ca->params); tcf_lastuse_update(&ca->tcf_tm); - bstats_update(&ca->tcf_bstats, skb); + tcf_action_update_bstats(&ca->common, skb); action = READ_ONCE(ca->tcf_action); wlen = skb_network_offset(skb); @@ -131,7 +131,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, } if (cp->mode & CTINFO_MODE_DSCP) - if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask)) + if (!cp->dscpstatemask || (READ_ONCE(ct->mark) & cp->dscpstatemask)) tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto); if (cp->mode & CTINFO_MODE_CPMARK) @@ -211,8 +211,8 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, index = actparm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { - ret = tcf_idr_create(tn, index, est, a, - &act_ctinfo_ops, bind, false, 0); + ret = tcf_idr_create_from_flags(tn, index, est, a, + &act_ctinfo_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index a78cb79657182..0e7568a06351b 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -357,7 +357,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_gate_ops, bind, false, 0); + &act_gate_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index a2ddea04183af..99548b2a1bc83 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -553,7 +553,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_ife_ops, - bind, true, 0); + bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); kfree(p); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8dc3bec0d3258..080f2952cd536 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -144,7 +144,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, ops, bind, - false, 0); + false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index d1486ea496a2c..47b963ded4e43 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -133,6 +133,11 @@ static int valid_label(const struct nlattr *attr, { const u32 *label = nla_data(attr); + if (nla_len(attr) != sizeof(*label)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length"); + return -EINVAL; + } + if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) { NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range"); return -EINVAL; @@ -144,7 +149,8 @@ static int valid_label(const struct nlattr *attr, static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = { [TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)), [TCA_MPLS_PROTO] = { .type = NLA_U16 }, - [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label), + [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, + valid_label), [TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7), [TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1), [TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1), @@ -248,7 +254,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_mpls_ops, bind, true, 0); + &act_mpls_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 1ebd2a86d980f..8466dc25fe397 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_nat_ops, bind, false, 0); + &act_nat_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 0d5463ddfd62f..db0d3bff19eba 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -189,7 +189,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_pedit_ops, bind, false, 0); + &act_pedit_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); goto out_free; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 3807335889590..c30cd3ecb3911 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -87,7 +87,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, NULL, a, - &act_police_ops, bind, true, 0); + &act_police_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 3ebf9ede3cf10..2f0e98bcf4945 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_sample_ops, bind, true, 0); + &act_sample_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index a4f3d0f0daa96..b9bbc87a87c5b 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -128,7 +128,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_simp_ops, bind, false, 0); + &act_simp_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index e5f3fb8b00e32..a5661f2d93e99 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -176,7 +176,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbedit_ops, bind, true, 0); + &act_skbedit_ops, bind, true, act_flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 8d17a543cc9fe..aa98dcac94b95 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -147,7 +147,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbmod_ops, bind, true, 0); + &act_skbmod_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index b8ffb7e4f696c..c410a736301bc 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -2124,6 +2124,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, } if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { + tfilter_put(tp, fh); NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); err = -EINVAL; goto errout; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index e9a8a2c86bbdd..2c0c95204cb5a 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -332,12 +333,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct tcindex_filter_result *r, struct nlattr **tb, struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) { - struct tcindex_filter_result new_filter_result, *old_r = r; + struct tcindex_filter_result new_filter_result; struct tcindex_data *cp = NULL, *oldp; struct tcindex_filter *f = NULL; /* make gcc behave */ struct tcf_result cr = {}; int err, balloc = 0; struct tcf_exts e; + bool update_h = false; err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); if (err < 0) @@ -401,7 +403,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, err = tcindex_filter_result_init(&new_filter_result, cp, net); if (err < 0) goto errout_alloc; - if (old_r) + if (r) cr = r->res; err = -EBUSY; @@ -455,10 +457,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, } } - if (cp->perfect) + if (cp->perfect) { r = cp->perfect + handle; - else - r = tcindex_lookup(cp, handle) ? : &new_filter_result; + } else { + /* imperfect area is updated in-place using rcu */ + update_h = !!tcindex_lookup(cp, handle); + r = &new_filter_result; + } if (r == &new_filter_result) { f = kzalloc(sizeof(*f), GFP_KERNEL); @@ -478,21 +483,34 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, tcf_bind_filter(tp, &cr, base); } - if (old_r && old_r != r) { - err = tcindex_filter_result_init(old_r, cp, net); - if (err < 0) { - kfree(f); - goto errout_alloc; - } - } - oldp = p; r->res = cr; tcf_exts_change(&r->exts, &e); rcu_assign_pointer(tp->root, cp); - if (r == &new_filter_result) { + if (update_h) { + struct tcindex_filter __rcu **fp; + struct tcindex_filter *cf; + + f->result.res = r->res; + tcf_exts_change(&f->result.exts, &r->exts); + + /* imperfect area bucket */ + fp = cp->h + (handle % cp->hash); + + /* lookup the filter, guaranteed to exist */ + for (cf = rcu_dereference_bh_rtnl(*fp); cf; + fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp)) + if (cf->key == (u16)handle) + break; + + f->next = cf->next; + + cf = rcu_replace_pointer(*fp, f, 1); + tcf_exts_get_net(&cf->result.exts); + tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work); + } else if (r == &new_filter_result) { struct tcindex_filter *nfp; struct tcindex_filter __rcu **fp; diff --git a/net/sched/ematch.c b/net/sched/ematch.c index f885bea5b4526..b7154103e9cd3 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp, * the value carried. */ if (em_hdr->flags & TCF_EM_SIMPLE) { + if (em->ops->datalen > 0) + goto errout; if (data_len < sizeof(u32)) goto errout; em->data = *(u32 *) data; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 6e18aa4177828..54e2309315eb5 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1081,12 +1081,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, skip: if (!ingress) { - notify_and_destroy(net, skb, n, classid, - rtnl_dereference(dev->qdisc), new); + old = rtnl_dereference(dev->qdisc); if (new && !new->ops->attach) qdisc_refcount_inc(new); rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); + notify_and_destroy(net, skb, n, classid, old, new); + if (new && new->ops->attach) new->ops->attach(new); } else { @@ -1113,6 +1114,11 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, return -ENOENT; } + if (new && new->ops == &noqueue_qdisc_ops) { + NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class"); + return -EINVAL; + } + err = cops->graft(parent, cl, new, &old, extack); if (err) return err; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 1c281cc81f577..95967ce1f370a 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -396,10 +396,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, result = tcf_classify(skb, fl, &res, true); if (result < 0) continue; + if (result == TC_ACT_SHOT) + goto done; + flow = (struct atm_flow_data *)res.class; if (!flow) flow = lookup_flow(sch, res.classid); - goto done; + goto drop; } } flow = NULL; @@ -575,7 +578,6 @@ static void atm_tc_reset(struct Qdisc *sch) pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) qdisc_reset(flow->q); - sch->q.qlen = 0; } static void atm_tc_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index c580139fcedec..5dc7a3c310c9d 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -2224,8 +2224,12 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch) static void cake_reset(struct Qdisc *sch) { + struct cake_sched_data *q = qdisc_priv(sch); u32 c; + if (!q->tins) + return; + for (c = 0; c < CAKE_MAX_TINS; c++) cake_clear_tin(sch, c); } diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 4a78fcf5d4f98..3da5eb313c246 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -231,6 +231,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) result = tcf_classify(skb, fl, &res, true); if (!fl || result < 0) goto fallback; + if (result == TC_ACT_SHOT) + return NULL; cl = (void *)res.class; if (!cl) { @@ -251,8 +253,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; fallthrough; - case TC_ACT_SHOT: - return NULL; case TC_ACT_RECLASSIFY: return cbq_reclassify(skb, cl); } @@ -1053,7 +1053,6 @@ cbq_reset(struct Qdisc *sch) cl->cpriority = cl->priority; } } - sch->q.qlen = 0; } diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 2adbd945bf15a..25d2daaa81227 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -315,8 +315,6 @@ static void choke_reset(struct Qdisc *sch) rtnl_qdisc_drop(skb, sch); } - sch->q.qlen = 0; - sch->qstats.backlog = 0; if (q->tab) memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q->head = q->tail = 0; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index dde564670ad8c..08424aac6da82 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -443,8 +443,6 @@ static void drr_reset_qdisc(struct Qdisc *sch) qdisc_reset(cl->qdisc); } } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void drr_destroy_qdisc(struct Qdisc *sch) diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 76ed1a05ded27..a75bc7f80cd7e 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -408,8 +408,6 @@ static void dsmark_reset(struct Qdisc *sch) pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); if (p->q) qdisc_reset(p->q); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void dsmark_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index c48f91075b5c6..d96103b0e2bf5 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -445,9 +445,6 @@ static void etf_reset(struct Qdisc *sch) timesortedlist_clear(sch); __qdisc_reset_queue(&sch->q); - sch->qstats.backlog = 0; - sch->q.qlen = 0; - q->last = 0; } diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 9c224872ef035..05817c55692f0 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -722,8 +722,6 @@ static void ets_qdisc_reset(struct Qdisc *sch) } for (band = 0; band < q->nbands; band++) qdisc_reset(q->classes[band].qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void ets_qdisc_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 99e8db2621984..01d6eea5b0ce9 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -347,8 +347,6 @@ static void fq_codel_reset(struct Qdisc *sch) codel_vars_init(&flow->cvars); } memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); - sch->q.qlen = 0; - sch->qstats.backlog = 0; q->memory_usage = 0; } diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index c70802785518f..cf04f70e96bf1 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -521,9 +521,6 @@ static void fq_pie_reset(struct Qdisc *sch) INIT_LIST_HEAD(&flow->flowchain); pie_vars_init(&flow->vars); } - - sch->q.qlen = 0; - sch->qstats.backlog = 0; } static void fq_pie_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index d1902fca98447..cdc43a06aa9bc 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1484,8 +1484,6 @@ hfsc_reset_qdisc(struct Qdisc *sch) } q->eligible = RB_ROOT; qdisc_watchdog_cancel(&q->watchdog); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index cd70dbcbd72fd..ff84ed531199a 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -405,7 +405,10 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) while (cl->cmode == HTB_MAY_BORROW && p && mask) { m = mask; while (m) { - int prio = ffz(~m); + unsigned int prio = ffz(~m); + + if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio))) + break; m &= ~(1 << prio); if (p->inner.clprio[prio].feed.rb_node) @@ -966,8 +969,6 @@ static void htb_reset(struct Qdisc *sch) } qdisc_watchdog_cancel(&q->watchdog); __qdisc_reset_queue(&q->direct_queue); - sch->q.qlen = 0; - sch->qstats.backlog = 0; memset(q->hlevel, 0, sizeof(q->hlevel)); memset(q->row_mask, 0, sizeof(q->row_mask)); } diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 5c27b4270b908..1c6dbcfa89b87 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -152,7 +152,6 @@ multiq_reset(struct Qdisc *sch) for (band = 0; band < q->bands; band++) qdisc_reset(q->queues[band]); - sch->q.qlen = 0; q->curband = 0; } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 3eabb871a1d52..1c805fe05b82a 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -135,8 +135,6 @@ prio_reset(struct Qdisc *sch) for (prio = 0; prio < q->bands; prio++) qdisc_reset(q->queues[prio]); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index af8c63a9ec18c..1d1d81aeb389f 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1458,8 +1458,6 @@ static void qfq_reset_qdisc(struct Qdisc *sch) qdisc_reset(cl->qdisc); } } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void qfq_destroy_qdisc(struct Qdisc *sch) diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 40adf1f07a82d..935d90874b1b7 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -72,6 +72,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; + unsigned int len; int ret; q->vars.qavg = red_calc_qavg(&q->parms, @@ -126,9 +127,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, break; } + len = qdisc_pkt_len(skb); ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; @@ -176,8 +178,6 @@ static void red_reset(struct Qdisc *sch) struct red_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; red_restart(&q->vars); } diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index da047a37a3bf3..9ded56228ea10 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) } } -static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) +static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) { u32 sfbhash; - sfbhash = sfb_hash(skb, 0); + sfbhash = cb->hashes[0]; if (sfbhash) increment_one_qlen(sfbhash, 0, q); - sfbhash = sfb_hash(skb, 1); + sfbhash = cb->hashes[1]; if (sfbhash) increment_one_qlen(sfbhash, 1, q); } @@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, { struct sfb_sched_data *q = qdisc_priv(sch); + unsigned int len = qdisc_pkt_len(skb); struct Qdisc *child = q->qdisc; struct tcf_proto *fl; + struct sfb_skb_cb cb; int i; u32 p_min = ~0; u32 minqlen = ~0; @@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, } enqueue: + memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; - increment_qlen(skb, q); + increment_qlen(&cb, q); } else if (net_xmit_drop_count(ret)) { q->stats.childdrop++; qdisc_qstats_drop(sch); @@ -452,9 +455,8 @@ static void sfb_reset(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); - qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; + if (likely(q->qdisc)) + qdisc_reset(q->qdisc); q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 7a5e4c4547156..df72fb83d9c7d 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -213,9 +213,6 @@ static void skbprio_reset(struct Qdisc *sch) struct skbprio_sched_data *q = qdisc_priv(sch); int prio; - sch->qstats.backlog = 0; - sch->q.qlen = 0; - for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index eca525791013e..2d842f31ec5a8 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -65,6 +65,7 @@ struct taprio_sched { u32 flags; enum tk_offsets tk_offset; int clockid; + bool offloaded; atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ * speeds it's sub-nanoseconds per byte */ @@ -1267,6 +1268,8 @@ static int taprio_enable_offload(struct net_device *dev, goto done; } + q->offloaded = true; + done: taprio_offload_free(offload); @@ -1281,12 +1284,9 @@ static int taprio_disable_offload(struct net_device *dev, struct tc_taprio_qopt_offload *offload; int err; - if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) + if (!q->offloaded) return 0; - if (!ops->ndo_setup_tc) - return -EOPNOTSUPP; - offload = taprio_offload_alloc(0); if (!offload) { NL_SET_ERR_MSG(extack, @@ -1302,6 +1302,8 @@ static int taprio_disable_offload(struct net_device *dev, goto out; } + q->offloaded = false; + out: taprio_offload_free(offload); @@ -1619,13 +1621,12 @@ static void taprio_reset(struct Qdisc *sch) int i; hrtimer_cancel(&q->advance_timer); + if (q->qdiscs) { for (i = 0; i < dev->num_tx_queues; i++) if (q->qdiscs[i]) qdisc_reset(q->qdiscs[i]); } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void taprio_destroy(struct Qdisc *sch) @@ -1642,6 +1643,7 @@ static void taprio_destroy(struct Qdisc *sch) * happens in qdisc_create(), after taprio_init() has been called. */ hrtimer_cancel(&q->advance_timer); + qdisc_synchronize(sch); taprio_disable_offload(dev, q, NULL); diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 6eb17004a9e44..7461e5c67d50a 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -316,8 +316,6 @@ static void tbf_reset(struct Qdisc *sch) struct tbf_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; q->t_c = ktime_get_ns(); q->tokens = q->buffer; q->ptokens = q->mtu; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 6af6b95bdb672..79aaab51cbf5c 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -124,7 +124,6 @@ teql_reset(struct Qdisc *sch) struct teql_sched_data *dat = qdisc_priv(sch); skb_queue_purge(&dat->q); - sch->q.qlen = 0; } static void diff --git a/net/sctp/auth.c b/net/sctp/auth.c index db6b7373d16c3..34964145514e6 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -863,12 +863,17 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, } list_del_init(&shkey->key_list); - sctp_auth_shkey_release(shkey); list_add(&cur_key->key_list, sh_keys); - if (asoc && asoc->active_key_id == auth_key->sca_keynumber) - sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + if (asoc && asoc->active_key_id == auth_key->sca_keynumber && + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) { + list_del_init(&cur_key->key_list); + sctp_auth_shkey_release(cur_key); + list_add(&shkey->key_list, sh_keys); + return -ENOMEM; + } + sctp_auth_shkey_release(shkey); return 0; } @@ -902,8 +907,13 @@ int sctp_auth_set_active_key(struct sctp_endpoint *ep, return -EINVAL; if (asoc) { + __u16 active_key_id = asoc->active_key_id; + asoc->active_key_id = key_id; - sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + if (sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) { + asoc->active_key_id = active_key_id; + return -ENOMEM; + } } else ep->active_key_id = key_id; diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 59e653b528b1f..6b95d3ba8fe1c 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, } } + /* If somehow no addresses were found that can be used with this + * scope, it's an error. + */ + if (list_empty(&dest->address_list)) + error = -ENETUNREACH; + out: if (error) sctp_bind_addr_clean(dest); diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 68ff82ff49a3d..07d0ada23bfd2 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -349,11 +349,9 @@ static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp struct sctp_comm_param *commp = p; struct sock *sk = ep->base.sk; const struct inet_diag_req_v2 *r = commp->r; - struct sctp_association *assoc = - list_entry(ep->asocs.next, struct sctp_association, asocs); /* find the ep only once through the transports by this condition */ - if (tsp->asoc != assoc) + if (!list_is_first(&tsp->asoc->asocs, &ep->asocs)) return 0; if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 3fd06a27105dd..83a89dcf75ed0 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -384,6 +384,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, { struct sctp_outq *q = &asoc->outqueue; struct sctp_chunk *chk, *temp; + struct sctp_stream_out *sout; q->sched->unsched_all(&asoc->stream); @@ -398,12 +399,14 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, sctp_sched_dequeue_common(q, chk); asoc->sent_cnt_removable--; asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; - if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) { - struct sctp_stream_out *streamout = - SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream); - streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; - } + sout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream); + sout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; + + /* clear out_curr if all frag chunks are pruned */ + if (asoc->stream.out_curr == sout && + list_is_last(&chk->frag_list, &chk->msg->chunks)) + asoc->stream.out_curr = NULL; msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk); sctp_chunk_free(chk); diff --git a/net/sctp/stream.c b/net/sctp/stream.c index ef9fceadef8d5..ee6514af830f7 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -52,6 +52,19 @@ static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt) } } +static void sctp_stream_free_ext(struct sctp_stream *stream, __u16 sid) +{ + struct sctp_sched_ops *sched; + + if (!SCTP_SO(stream, sid)->ext) + return; + + sched = sctp_sched_ops_from_stream(stream); + sched->free_sid(stream, sid); + kfree(SCTP_SO(stream, sid)->ext); + SCTP_SO(stream, sid)->ext = NULL; +} + /* Migrates chunks from stream queues to new stream queues if needed, * but not across associations. Also, removes those chunks to streams * higher than the new max. @@ -70,16 +83,14 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, * sctp_stream_update will swap ->out pointers. */ for (i = 0; i < outcnt; i++) { - kfree(SCTP_SO(new, i)->ext); + sctp_stream_free_ext(new, i); SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext; SCTP_SO(stream, i)->ext = NULL; } } - for (i = outcnt; i < stream->outcnt; i++) { - kfree(SCTP_SO(stream, i)->ext); - SCTP_SO(stream, i)->ext = NULL; - } + for (i = outcnt; i < stream->outcnt; i++) + sctp_stream_free_ext(stream, i); } static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, @@ -174,9 +185,9 @@ void sctp_stream_free(struct sctp_stream *stream) struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); int i; - sched->free(stream); + sched->unsched_all(stream); for (i = 0; i < stream->outcnt; i++) - kfree(SCTP_SO(stream, i)->ext); + sctp_stream_free_ext(stream, i); genradix_free(&stream->out); genradix_free(&stream->in); } diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c index a2e1d34f52c5b..33c2630c2496b 100644 --- a/net/sctp/stream_sched.c +++ b/net/sctp/stream_sched.c @@ -46,6 +46,10 @@ static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid, return 0; } +static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid) +{ +} + static void sctp_sched_fcfs_free(struct sctp_stream *stream) { } @@ -96,6 +100,7 @@ static struct sctp_sched_ops sctp_sched_fcfs = { .get = sctp_sched_fcfs_get, .init = sctp_sched_fcfs_init, .init_sid = sctp_sched_fcfs_init_sid, + .free_sid = sctp_sched_fcfs_free_sid, .free = sctp_sched_fcfs_free, .enqueue = sctp_sched_fcfs_enqueue, .dequeue = sctp_sched_fcfs_dequeue, diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c index 80b5a2c4cbc7b..4fc9f2923ed11 100644 --- a/net/sctp/stream_sched_prio.c +++ b/net/sctp/stream_sched_prio.c @@ -204,6 +204,24 @@ static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid, return sctp_sched_prio_set(stream, sid, 0, gfp); } +static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid) +{ + struct sctp_stream_priorities *prio = SCTP_SO(stream, sid)->ext->prio_head; + int i; + + if (!prio) + return; + + SCTP_SO(stream, sid)->ext->prio_head = NULL; + for (i = 0; i < stream->outcnt; i++) { + if (SCTP_SO(stream, i)->ext && + SCTP_SO(stream, i)->ext->prio_head == prio) + return; + } + + kfree(prio); +} + static void sctp_sched_prio_free(struct sctp_stream *stream) { struct sctp_stream_priorities *prio, *n; @@ -323,6 +341,7 @@ static struct sctp_sched_ops sctp_sched_prio = { .get = sctp_sched_prio_get, .init = sctp_sched_prio_init, .init_sid = sctp_sched_prio_init_sid, + .free_sid = sctp_sched_prio_free_sid, .free = sctp_sched_prio_free, .enqueue = sctp_sched_prio_enqueue, .dequeue = sctp_sched_prio_dequeue, diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c index ff425aed62c7f..cc444fe0d67c2 100644 --- a/net/sctp/stream_sched_rr.c +++ b/net/sctp/stream_sched_rr.c @@ -90,6 +90,10 @@ static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid, return 0; } +static void sctp_sched_rr_free_sid(struct sctp_stream *stream, __u16 sid) +{ +} + static void sctp_sched_rr_free(struct sctp_stream *stream) { sctp_sched_rr_unsched_all(stream); @@ -177,6 +181,7 @@ static struct sctp_sched_ops sctp_sched_rr = { .get = sctp_sched_rr_get, .init = sctp_sched_rr_init, .init_sid = sctp_sched_rr_init_sid, + .free_sid = sctp_sched_rr_free_sid, .free = sctp_sched_rr_free, .enqueue = sctp_sched_rr_enqueue, .dequeue = sctp_sched_rr_dequeue, diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index c16c80963e555..e4af050aec1be 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -79,17 +79,18 @@ static struct ctl_table sctp_table[] = { { /* sentinel */ } }; +/* The following index defines are used in sctp_sysctl_net_register(). + * If you add new items to the sctp_net_table, please ensure that + * the index values of these defines hold the same meaning indicated by + * their macro names when they appear in sctp_net_table. + */ +#define SCTP_RTO_MIN_IDX 0 +#define SCTP_RTO_MAX_IDX 1 +#define SCTP_PF_RETRANS_IDX 2 +#define SCTP_PS_RETRANS_IDX 3 + static struct ctl_table sctp_net_table[] = { - { - .procname = "rto_initial", - .data = &init_net.sctp.rto_initial, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ONE, - .extra2 = &timer_max - }, - { + [SCTP_RTO_MIN_IDX] = { .procname = "rto_min", .data = &init_net.sctp.rto_min, .maxlen = sizeof(unsigned int), @@ -98,7 +99,7 @@ static struct ctl_table sctp_net_table[] = { .extra1 = SYSCTL_ONE, .extra2 = &init_net.sctp.rto_max }, - { + [SCTP_RTO_MAX_IDX] = { .procname = "rto_max", .data = &init_net.sctp.rto_max, .maxlen = sizeof(unsigned int), @@ -107,6 +108,33 @@ static struct ctl_table sctp_net_table[] = { .extra1 = &init_net.sctp.rto_min, .extra2 = &timer_max }, + [SCTP_PF_RETRANS_IDX] = { + .procname = "pf_retrans", + .data = &init_net.sctp.pf_retrans, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &init_net.sctp.ps_retrans, + }, + [SCTP_PS_RETRANS_IDX] = { + .procname = "ps_retrans", + .data = &init_net.sctp.ps_retrans, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &init_net.sctp.pf_retrans, + .extra2 = &ps_retrans_max, + }, + { + .procname = "rto_initial", + .data = &init_net.sctp.rto_initial, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = &timer_max + }, { .procname = "rto_alpha_exp_divisor", .data = &init_net.sctp.rto_alpha, @@ -202,24 +230,6 @@ static struct ctl_table sctp_net_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, - { - .procname = "pf_retrans", - .data = &init_net.sctp.pf_retrans, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = &init_net.sctp.ps_retrans, - }, - { - .procname = "ps_retrans", - .data = &init_net.sctp.ps_retrans, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &init_net.sctp.pf_retrans, - .extra2 = &ps_retrans_max, - }, { .procname = "sndbuf_policy", .data = &init_net.sctp.sndbuf_policy, @@ -489,6 +499,11 @@ int sctp_sysctl_net_register(struct net *net) for (i = 0; table[i].data; i++) table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; + table[SCTP_RTO_MIN_IDX].extra2 = &net->sctp.rto_max; + table[SCTP_RTO_MAX_IDX].extra1 = &net->sctp.rto_min; + table[SCTP_PF_RETRANS_IDX].extra2 = &net->sctp.ps_retrans; + table[SCTP_PS_RETRANS_IDX].extra1 = &net->sctp.pf_retrans; + net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); if (net->sctp.sysctl_header == NULL) { kfree(table); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index ef2fd28999baf..bf485a2017a4e 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1584,7 +1584,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, static int smcr_buf_map_usable_links(struct smc_link_group *lgr, struct smc_buf_desc *buf_desc, bool is_rmb) { - int i, rc = 0; + int i, rc = 0, cnt = 0; /* protect against parallel link reconfiguration */ mutex_lock(&lgr->llc_conf_mutex); @@ -1597,9 +1597,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr, rc = -ENOMEM; goto out; } + cnt++; } out: mutex_unlock(&lgr->llc_conf_mutex); + if (!rc && !cnt) + rc = -EINVAL; return rc; } diff --git a/net/socket.c b/net/socket.c index bcf68b150fe29..8657112a687a4 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1688,30 +1688,22 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) return __sys_listen(fd, backlog); } -int __sys_accept4_file(struct file *file, unsigned file_flags, +struct file *do_accept(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags, - unsigned long nofile) + int __user *upeer_addrlen, int flags) { struct socket *sock, *newsock; struct file *newfile; - int err, len, newfd; + int err, len; struct sockaddr_storage address; - if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) - return -EINVAL; - - if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) - flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; - sock = sock_from_file(file, &err); if (!sock) - goto out; + return ERR_PTR(err); - err = -ENFILE; newsock = sock_alloc(); if (!newsock) - goto out; + return ERR_PTR(-ENFILE); newsock->type = sock->type; newsock->ops = sock->ops; @@ -1722,18 +1714,9 @@ int __sys_accept4_file(struct file *file, unsigned file_flags, */ __module_get(newsock->ops->owner); - newfd = __get_unused_fd_flags(flags, nofile); - if (unlikely(newfd < 0)) { - err = newfd; - sock_release(newsock); - goto out; - } newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); - if (IS_ERR(newfile)) { - err = PTR_ERR(newfile); - put_unused_fd(newfd); - goto out; - } + if (IS_ERR(newfile)) + return newfile; err = security_socket_accept(sock, newsock); if (err) @@ -1758,16 +1741,38 @@ int __sys_accept4_file(struct file *file, unsigned file_flags, } /* File flags are not inherited via accept() unlike another OSes. */ - - fd_install(newfd, newfile); - err = newfd; -out: - return err; + return newfile; out_fd: fput(newfile); - put_unused_fd(newfd); - goto out; + return ERR_PTR(err); +} +int __sys_accept4_file(struct file *file, unsigned file_flags, + struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags, + unsigned long nofile) +{ + struct file *newfile; + int newfd; + + if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) + return -EINVAL; + + if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) + flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; + + newfd = __get_unused_fd_flags(flags, nofile); + if (unlikely(newfd < 0)) + return newfd; + + newfile = do_accept(file, file_flags, upeer_sockaddr, upeer_addrlen, + flags); + if (IS_ERR(newfile)) { + put_unused_fd(newfd); + return PTR_ERR(newfile); + } + fd_install(newfd, newfile); + return newfd; } /* @@ -2181,6 +2186,17 @@ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, * Shutdown a socket. */ +int __sys_shutdown_sock(struct socket *sock, int how) +{ + int err; + + err = security_socket_shutdown(sock, how); + if (!err) + err = sock->ops->shutdown(sock, how); + + return err; +} + int __sys_shutdown(int fd, int how) { int err, fput_needed; @@ -2188,9 +2204,7 @@ int __sys_shutdown(int fd, int how) sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { - err = security_socket_shutdown(sock, how); - if (!err) - err = sock->ops->shutdown(sock, how); + err = __sys_shutdown_sock(sock, how); fput_light(sock->file, fput_needed); } return err; @@ -2405,10 +2419,6 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg, unsigned int flags) { - /* disallow ancillary data requests from this path */ - if (msg->msg_control || msg->msg_controllen) - return -EINVAL; - return ____sys_sendmsg(sock, msg, flags, NULL, 0); } @@ -2617,12 +2627,6 @@ long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg, struct user_msghdr __user *umsg, struct sockaddr __user *uaddr, unsigned int flags) { - if (msg->msg_control || msg->msg_controllen) { - /* disallow ancillary data reqs unless cmsg is plain data */ - if (!(sock->ops->flags & PROTO_CMSG_DATA_ONLY)) - return -EINVAL; - } - return ____sys_recvmsg(sock, msg, umsg, uaddr, flags, 0); } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 5f42aa5fc6128..2ff66a6a7e54c 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -301,7 +301,7 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth list_for_each_entry(pos, &pipe->in_downcall, list) { if (!uid_eq(pos->uid, uid)) continue; - if (auth && pos->auth->service != auth->service) + if (pos->auth->service != auth->service) continue; refcount_inc(&pos->count); return pos; @@ -685,6 +685,21 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) return err; } +static struct gss_upcall_msg * +gss_find_downcall(struct rpc_pipe *pipe, kuid_t uid) +{ + struct gss_upcall_msg *pos; + list_for_each_entry(pos, &pipe->in_downcall, list) { + if (!uid_eq(pos->uid, uid)) + continue; + if (!rpc_msg_is_inflight(&pos->msg)) + continue; + refcount_inc(&pos->count); + return pos; + } + return NULL; +} + #define MSG_BUF_MAXSIZE 1024 static ssize_t @@ -731,7 +746,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) err = -ENOENT; /* Find a matching upcall */ spin_lock(&pipe->lock); - gss_msg = __gss_find_upcall(pipe, uid, NULL); + gss_msg = gss_find_downcall(pipe, uid); if (gss_msg == NULL) { spin_unlock(&pipe->lock); goto err_put_ctx; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index f5111d62972d3..406ff7f8b156e 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1156,18 +1156,23 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp, return res; inlen = svc_getnl(argv); - if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) + if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) { + kfree(in_handle->data); return SVC_DENIED; + } pages = DIV_ROUND_UP(inlen, PAGE_SIZE); in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL); - if (!in_token->pages) + if (!in_token->pages) { + kfree(in_handle->data); return SVC_DENIED; + } in_token->page_base = 0; in_token->page_len = inlen; for (i = 0; i < pages; i++) { in_token->pages[i] = alloc_page(GFP_KERNEL); if (!in_token->pages[i]) { + kfree(in_handle->data); gss_free_in_token_pages(in_token); return SVC_DENIED; } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 78c6648af7827..c478108ca6a65 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1361,7 +1361,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, break; default: err = -EAFNOSUPPORT; - goto out; + goto out_release; } if (err < 0) { dprintk("RPC: can't bind UDP socket (%d)\n", err); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index dcc1992b14d76..338b06de86d16 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -866,7 +866,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, return req; out3: - kfree(req->rl_sendbuf); + rpcrdma_regbuf_free(req->rl_sendbuf); out2: kfree(req); out1: diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 6f91b9a306dc3..de63d6d41645c 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -1975,6 +1975,9 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, /* Ok, everything's fine, try to synch own keys according to peers' */ tipc_crypto_key_synch(rx, *skb); + /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */ + skb_cb = TIPC_SKB_CB(*skb); + /* Mark skb decrypted */ skb_cb->decrypted = 1; diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 14bc20604051d..2730310249e3c 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -147,8 +147,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, { struct net *net = d->net; struct tipc_net *tn = tipc_net(net); - bool trial = time_before(jiffies, tn->addr_trial_end); u32 self = tipc_own_addr(net); + bool trial = time_before(jiffies, tn->addr_trial_end) && !self; if (mtyp == DSC_TRIAL_FAIL_MSG) { if (!trial) @@ -210,7 +210,10 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb, u32 self; int err; - skb_linearize(skb); + if (skb_linearize(skb)) { + kfree_skb(skb); + return; + } hdr = buf_msg(skb); if (caps & TIPC_NODE_ID128) diff --git a/net/tipc/link.c b/net/tipc/link.c index 064fdb8e50e19..c1e56d1f21b38 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2188,7 +2188,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, if (tipc_own_addr(l->net) > msg_prevnode(hdr)) l->net_plane = msg_net_plane(hdr); - skb_linearize(skb); + if (skb_linearize(skb)) + goto exit; + hdr = buf_msg(skb); data = msg_data(hdr); diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index a37190da5a504..1d90f39129ca0 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -130,7 +130,7 @@ static void map_set(u64 *up_map, int i, unsigned int v) static int map_get(u64 up_map, int i) { - return (up_map & (1 << i)) >> i; + return (up_map & (1ULL << i)) >> i; } static struct tipc_peer *peer_prev(struct tipc_peer *peer) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 49e8933136526..2d62932b59878 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -877,7 +877,7 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) }; ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); - if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query)) + if (TLV_GET_DATA_LEN(msg->req) < (int)sizeof(struct tipc_name_table_query)) return -EINVAL; depth = ntohl(ntq->depth); diff --git a/net/tipc/node.c b/net/tipc/node.c index 60059827563ae..38f61dccb8552 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1152,8 +1152,9 @@ void tipc_node_check_dest(struct net *net, u32 addr, bool addr_match = false; bool sign_match = false; bool link_up = false; + bool link_is_reset = false; bool accept_addr = false; - bool reset = true; + bool reset = false; char *if_name; unsigned long intv; u16 session; @@ -1173,14 +1174,14 @@ void tipc_node_check_dest(struct net *net, u32 addr, /* Prepare to validate requesting node's signature and media address */ l = le->link; link_up = l && tipc_link_is_up(l); + link_is_reset = l && tipc_link_is_reset(l); addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); sign_match = (signature == n->signature); /* These three flags give us eight permutations: */ if (sign_match && addr_match && link_up) { - /* All is fine. Do nothing. */ - reset = false; + /* All is fine. Ignore requests. */ /* Peer node is not a container/local namespace */ if (!n->peer_hash_mix) n->peer_hash_mix = hash_mixes; @@ -1205,6 +1206,7 @@ void tipc_node_check_dest(struct net *net, u32 addr, */ accept_addr = true; *respond = true; + reset = true; } else if (!sign_match && addr_match && link_up) { /* Peer node rebooted. Two possibilities: * - Delayed re-discovery; this link endpoint has already @@ -1236,6 +1238,7 @@ void tipc_node_check_dest(struct net *net, u32 addr, n->signature = signature; accept_addr = true; *respond = true; + reset = true; } if (!accept_addr) @@ -1264,6 +1267,7 @@ void tipc_node_check_dest(struct net *net, u32 addr, tipc_link_fsm_evt(l, LINK_RESET_EVT); if (n->state == NODE_FAILINGOVER) tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); + link_is_reset = tipc_link_is_reset(l); le->link = l; n->link_cnt++; tipc_node_calculate_timer(n, l); @@ -1276,7 +1280,7 @@ void tipc_node_check_dest(struct net *net, u32 addr, memcpy(&le->maddr, maddr, sizeof(*maddr)); exit: tipc_node_write_unlock(n); - if (reset && l && !tipc_link_is_reset(l)) + if (reset && !link_is_reset) tipc_node_link_down(n, b->identity, false); tipc_node_put(n); } @@ -1660,6 +1664,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, struct tipc_node *n; struct sk_buff_head xmitq; bool node_up = false; + struct net *peer_net; int bearer_id; int rc; @@ -1676,18 +1681,23 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, return -EHOSTUNREACH; } + rcu_read_lock(); tipc_node_read_lock(n); node_up = node_is_up(n); - if (node_up && n->peer_net && check_net(n->peer_net)) { + peer_net = n->peer_net; + tipc_node_read_unlock(n); + if (node_up && peer_net && check_net(peer_net)) { /* xmit inner linux container */ - tipc_lxc_xmit(n->peer_net, list); + tipc_lxc_xmit(peer_net, list); if (likely(skb_queue_empty(list))) { - tipc_node_read_unlock(n); + rcu_read_unlock(); tipc_node_put(n); return 0; } } + rcu_read_unlock(); + tipc_node_read_lock(n); bearer_id = n->active_links[selector & 1]; if (unlikely(bearer_id == INVALID_BEARER_ID)) { tipc_node_read_unlock(n); diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 13f3143609f9e..89d8a2bd30cd0 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -176,7 +176,7 @@ static void tipc_conn_close(struct tipc_conn *con) conn_put(con); } -static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s) +static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock) { struct tipc_conn *con; int ret; @@ -202,10 +202,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s) } con->conid = ret; s->idr_in_use++; - spin_unlock_bh(&s->idr_lock); set_bit(CF_CONNECTED, &con->flags); con->server = s; + con->sock = sock; + conn_get(con); + spin_unlock_bh(&s->idr_lock); return con; } @@ -450,17 +452,24 @@ static void tipc_conn_data_ready(struct sock *sk) static void tipc_topsrv_accept(struct work_struct *work) { struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork); - struct socket *lsock = srv->listener; - struct socket *newsock; + struct socket *newsock, *lsock; struct tipc_conn *con; struct sock *newsk; int ret; + spin_lock_bh(&srv->idr_lock); + if (!srv->listener) { + spin_unlock_bh(&srv->idr_lock); + return; + } + lsock = srv->listener; + spin_unlock_bh(&srv->idr_lock); + while (1) { ret = kernel_accept(lsock, &newsock, O_NONBLOCK); if (ret < 0) return; - con = tipc_conn_alloc(srv); + con = tipc_conn_alloc(srv, newsock); if (IS_ERR(con)) { ret = PTR_ERR(con); sock_release(newsock); @@ -472,11 +481,11 @@ static void tipc_topsrv_accept(struct work_struct *work) newsk->sk_data_ready = tipc_conn_data_ready; newsk->sk_write_space = tipc_conn_write_space; newsk->sk_user_data = con; - con->sock = newsock; write_unlock_bh(&newsk->sk_callback_lock); /* Wake up receive process in case of 'SYN+' message */ newsk->sk_data_ready(newsk); + conn_put(con); } } @@ -489,7 +498,7 @@ static void tipc_topsrv_listener_data_ready(struct sock *sk) read_lock_bh(&sk->sk_callback_lock); srv = sk->sk_user_data; - if (srv->listener) + if (srv) queue_work(srv->rcv_wq, &srv->awork); read_unlock_bh(&sk->sk_callback_lock); } @@ -568,19 +577,19 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower, sub.seq.upper = upper; sub.timeout = TIPC_WAIT_FOREVER; sub.filter = filter; - *(u32 *)&sub.usr_handle = port; + *(u64 *)&sub.usr_handle = (u64)port; - con = tipc_conn_alloc(tipc_topsrv(net)); + con = tipc_conn_alloc(tipc_topsrv(net), NULL); if (IS_ERR(con)) return false; *conid = con->conid; - con->sock = NULL; rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub); - if (rc >= 0) - return true; + if (rc) + conn_put(con); + conn_put(con); - return false; + return !rc; } void tipc_topsrv_kern_unsubscr(struct net *net, int conid) @@ -699,8 +708,9 @@ static void tipc_topsrv_stop(struct net *net) __module_get(lsock->sk->sk_prot_creator->owner); srv->listener = NULL; spin_unlock_bh(&srv->idr_lock); - sock_release(lsock); + tipc_topsrv_work_stop(srv); + sock_release(lsock); idr_destroy(&srv->conn_idr); kfree(srv); } diff --git a/net/unix/diag.c b/net/unix/diag.c index 9ff64f9df1f3b..951b33fa8f5cf 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -113,14 +113,16 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); } -static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb) +static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb, + struct user_namespace *user_ns) { - uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk)); + uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk)); return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid); } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, - u32 portid, u32 seq, u32 flags, int sk_ino) + struct user_namespace *user_ns, + u32 portid, u32 seq, u32 flags, int sk_ino) { struct nlmsghdr *nlh; struct unix_diag_msg *rep; @@ -166,7 +168,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_UID) && - sk_diag_dump_uid(sk, skb)) + sk_diag_dump_uid(sk, skb, user_ns)) goto out_nlmsg_trim; nlmsg_end(skb, nlh); @@ -178,7 +180,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r } static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, - u32 portid, u32 seq, u32 flags) + struct user_namespace *user_ns, + u32 portid, u32 seq, u32 flags) { int sk_ino; @@ -189,7 +192,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r if (!sk_ino) return 0; - return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino); + return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino); } static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) @@ -217,7 +220,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) goto next; if (!(req->udiag_states & (1 << sk->sk_state))) goto next; - if (sk_diag_dump(sk, skb, req, + if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0) @@ -285,7 +288,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb, if (!rep) goto out; - err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid, + err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk), + NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0, req->udiag_ino); if (err < 0) { nlmsg_free(rep); diff --git a/net/unix/garbage.c b/net/unix/garbage.c index d45d5366115a7..dc27635403932 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -204,6 +204,7 @@ void wait_for_unix_gc(void) /* The external entry point: unix_gc() */ void unix_gc(void) { + struct sk_buff *next_skb, *skb; struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; @@ -297,11 +298,30 @@ void unix_gc(void) spin_unlock(&unix_gc_lock); + /* We need io_uring to clean its registered files, ignore all io_uring + * originated skbs. It's fine as io_uring doesn't keep references to + * other io_uring instances and so killing all other files in the cycle + * will put all io_uring references forcing it to go through normal + * release.path eventually putting registered files. + */ + skb_queue_walk_safe(&hitlist, skb, next_skb) { + if (skb->scm_io_uring) { + __skb_unlink(skb, &hitlist); + skb_queue_tail(&skb->sk->sk_receive_queue, skb); + } + } + /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); + /* There could be io_uring registered files, just push them back to + * the inflight list + */ + list_for_each_entry_safe(u, next, &gc_candidates, link) + list_move_tail(&u->link, &gc_inflight_list); + /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index d6d3a05c008a4..c9ee9259af48a 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1196,7 +1196,7 @@ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt) { - kfree(pkt->buf); + kvfree(pkt->buf); kfree(pkt); } EXPORT_SYMBOL_GPL(virtio_transport_free_pkt); diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index a9ca95a0fcdda..8c2856cbfeccf 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1713,7 +1713,11 @@ static int vmci_transport_dgram_enqueue( if (!dg) return -ENOMEM; - memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); + err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); + if (err) { + kfree(dg); + return err; + } dg->dst = vmci_make_handle(remote_addr->svm_cid, remote_addr->svm_port); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index fd848609e656a..90297264d8aea 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1064,6 +1064,8 @@ MODULE_FIRMWARE("regulatory.db"); static int query_regdb_file(const char *alpha2) { + int err; + ASSERT_RTNL(); if (regdb) @@ -1073,9 +1075,13 @@ static int query_regdb_file(const char *alpha2) if (!alpha2) return -ENOMEM; - return request_firmware_nowait(THIS_MODULE, true, "regulatory.db", - ®_pdev->dev, GFP_KERNEL, - (void *)alpha2, regdb_fw_cb); + err = request_firmware_nowait(THIS_MODULE, true, "regulatory.db", + ®_pdev->dev, GFP_KERNEL, + (void *)alpha2, regdb_fw_cb); + if (err) + kfree(alpha2); + + return err; } int reg_reload_regdb(void) @@ -4179,8 +4185,10 @@ static int __init regulatory_init_db(void) return -EINVAL; err = load_builtin_regdb_keys(); - if (err) + if (err) { + platform_device_unregister(reg_pdev); return err; + } /* We always try to get an update for the static regdomain */ err = regulatory_hint_core(cfg80211_world_regdom->alpha2); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 6dc9b7e22b71d..d09dabae56271 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -143,18 +143,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev, lockdep_assert_held(&rdev->bss_lock); bss->refcount++; - if (bss->pub.hidden_beacon_bss) { - bss = container_of(bss->pub.hidden_beacon_bss, - struct cfg80211_internal_bss, - pub); - bss->refcount++; - } - if (bss->pub.transmitted_bss) { - bss = container_of(bss->pub.transmitted_bss, - struct cfg80211_internal_bss, - pub); - bss->refcount++; - } + + if (bss->pub.hidden_beacon_bss) + bss_from_pub(bss->pub.hidden_beacon_bss)->refcount++; + + if (bss->pub.transmitted_bss) + bss_from_pub(bss->pub.transmitted_bss)->refcount++; } static inline void bss_ref_put(struct cfg80211_registered_device *rdev, @@ -304,7 +298,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie; - while (tmp_old + tmp_old[1] + 2 - ie <= ielen) { + while (tmp_old + 2 - ie <= ielen && + tmp_old + tmp_old[1] + 2 - ie <= ielen) { if (tmp_old[0] == 0) { tmp_old++; continue; @@ -335,7 +330,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, * determine if they are the same ie. */ if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) { - if (!memcmp(tmp_old + 2, tmp + 2, 5)) { + if (tmp_old[1] >= 5 && tmp[1] >= 5 && + !memcmp(tmp_old + 2, tmp + 2, 5)) { /* same vendor ie, copy from * subelement */ @@ -364,7 +360,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, * copied to new ie, skip ssid, capability, bssid-index ie */ tmp_new = sub_copy; - while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { + while (tmp_new + 2 - sub_copy <= subie_len && + tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP || tmp_new[0] == WLAN_EID_SSID)) { memcpy(pos, tmp_new, tmp_new[1] + 2); @@ -429,6 +426,15 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss, rcu_read_unlock(); + /* + * This is a bit weird - it's not on the list, but already on another + * one! The only way that could happen is if there's some BSSID/SSID + * shared by multiple APs in their multi-BSSID profiles, potentially + * with hidden SSID mixed in ... ignore it. + */ + if (!list_empty(&nontrans_bss->nontrans_list)) + return -EINVAL; + /* add to the list */ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); return 0; @@ -1597,6 +1603,23 @@ struct cfg80211_non_tx_bss { u8 bssid_index; }; +static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known, + const struct cfg80211_bss_ies *new_ies, + const struct cfg80211_bss_ies *old_ies) +{ + struct cfg80211_internal_bss *bss; + + /* Assign beacon IEs to all sub entries */ + list_for_each_entry(bss, &known->hidden_list, hidden_list) { + const struct cfg80211_bss_ies *ies; + + ies = rcu_access_pointer(bss->pub.beacon_ies); + WARN_ON(ies != old_ies); + + rcu_assign_pointer(bss->pub.beacon_ies, new_ies); + } +} + static bool cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *known, @@ -1620,7 +1643,6 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); } else if (rcu_access_pointer(new->pub.beacon_ies)) { const struct cfg80211_bss_ies *old; - struct cfg80211_internal_bss *bss; if (known->pub.hidden_beacon_bss && !list_empty(&known->hidden_list)) { @@ -1648,16 +1670,9 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, if (old == rcu_access_pointer(known->pub.ies)) rcu_assign_pointer(known->pub.ies, new->pub.beacon_ies); - /* Assign beacon IEs to all sub entries */ - list_for_each_entry(bss, &known->hidden_list, hidden_list) { - const struct cfg80211_bss_ies *ies; - - ies = rcu_access_pointer(bss->pub.beacon_ies); - WARN_ON(ies != old); - - rcu_assign_pointer(bss->pub.beacon_ies, - new->pub.beacon_ies); - } + cfg80211_update_hidden_bsses(known, + rcu_access_pointer(new->pub.beacon_ies), + old); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); @@ -1734,6 +1749,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, new->refcount = 1; INIT_LIST_HEAD(&new->hidden_list); INIT_LIST_HEAD(&new->pub.nontrans_list); + /* we'll set this later if it was non-NULL */ + new->pub.transmitted_bss = NULL; if (rcu_access_pointer(tmp->pub.proberesp_ies)) { hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN); @@ -1971,10 +1988,15 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, spin_lock_bh(&rdev->bss_lock); if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, &res->pub)) { - if (__cfg80211_unlink_bss(rdev, res)) + if (__cfg80211_unlink_bss(rdev, res)) { rdev->bss_generation++; + res = NULL; + } } spin_unlock_bh(&rdev->bss_lock); + + if (!res) + return NULL; } trace_cfg80211_return_bss(&res->pub); @@ -2093,6 +2115,8 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) { if (elem->datalen < 4) continue; + if (elem->data[0] < 1 || (int)elem->data[0] > 8) + continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 profile_len; @@ -2228,7 +2252,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, size_t new_ie_len; struct cfg80211_bss_ies *new_ies; const struct cfg80211_bss_ies *old; - u8 cpy_len; + size_t cpy_len; lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock); @@ -2295,6 +2319,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, } else { old = rcu_access_pointer(nontrans_bss->beacon_ies); rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies); + cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss), + new_ies, old); rcu_assign_pointer(nontrans_bss->ies, new_ies); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); @@ -2441,10 +2467,15 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, const struct cfg80211_bss_ies *ies1, *ies2; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - struct cfg80211_non_tx_bss non_tx_data; + struct cfg80211_non_tx_bss non_tx_data = {}; res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, gfp); + + /* don't do any further MBSSID handling for S1G */ + if (ieee80211_is_s1g_beacon(mgmt->frame_control)) + return res; + if (!res || !wiphy->support_mbssid || !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return res; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d231d4620c38f..161dc194e6342 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -492,6 +492,12 @@ static int x25_listen(struct socket *sock, int backlog) int rc = -EOPNOTSUPP; lock_sock(sk); + if (sock->state != SS_UNCONNECTED) { + rc = -EINVAL; + release_sock(sk); + return rc; + } + if (sk->sk_state != TCP_LISTEN) { memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 25bf72ee6cad0..226397add422a 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -117,7 +117,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, if (!pskb_may_pull(skb, 1)) { x25_neigh_put(nb); - return 0; + goto drop; } switch (skb->data[0]) { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index ca4716b92774b..691841dc6d334 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -742,8 +742,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) goto out_unlock; } - err = xp_assign_dev_shared(xs->pool, umem_xs->umem, - dev, qid); + err = xp_assign_dev_shared(xs->pool, umem_xs, dev, + qid); if (err) { xp_destroy(xs->pool); xs->pool = NULL; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index e63a285a98565..c347e52f58df8 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -198,17 +198,18 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, return __xp_assign_dev(pool, dev, queue_id, flags); } -int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, +int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, struct net_device *dev, u16 queue_id) { u16 flags; + struct xdp_umem *umem = umem_xs->umem; /* One fill and completion ring required for each queue id. */ if (!pool->fq || !pool->cq) return -EINVAL; flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; - if (pool->uses_need_wakeup) + if (umem_xs->pool->uses_need_wakeup) flags |= XDP_USE_NEED_WAKEUP; return __xp_assign_dev(pool, dev, queue_id, flags); diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index a0f62fa02e06e..8cbf45a8bcdc2 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -5,6 +5,7 @@ * Based on code and translator idea by: Florian Westphal */ #include +#include #include #include @@ -302,7 +303,7 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src) nla_for_each_attr(nla, attrs, len, remaining) { int err; - switch (type) { + switch (nlh_src->nlmsg_type) { case XFRM_MSG_NEWSPDINFO: err = xfrm_nla_cpy(dst, nla, nla_len(nla)); break; @@ -437,6 +438,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla, NL_SET_ERR_MSG(extack, "Bad attribute"); return -EOPNOTSUPP; } + type = array_index_nospec(type, XFRMA_MAX + 1); if (nla_len(nla) < compat_policy[type].len) { NL_SET_ERR_MSG(extack, "Attribute bad length"); return -EOPNOTSUPP; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index c255aac6b816b..8b8e957a69c36 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -97,6 +97,18 @@ static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) } } +static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb) +{ + struct xfrm_offload *xo = xfrm_offload(skb); + __u32 seq = xo->seq.low; + + seq += skb_shinfo(skb)->gso_segs; + if (unlikely(seq < xo->seq.low)) + return true; + + return false; +} + struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) { int err; @@ -134,7 +146,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } - if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) { + if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || + unlikely(xmit_xfrm_check_overflow(skb)))) { struct sk_buff *segs; /* Packet got rerouted, fixup features and segment it. */ diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 77e82033ad700..fef99a1c5df10 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -277,8 +277,7 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) goto out; if (x->props.flags & XFRM_STATE_DECAP_DSCP) - ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)), - ipipv6_hdr(skb)); + ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb)); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip6_ecn_decapsulate(skb); diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 0814320472f18..24ac6805275e9 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -212,6 +212,7 @@ static void ipcomp_free_scratches(void) vfree(*per_cpu_ptr(scratches, i)); free_percpu(scratches); + ipcomp_scratches = NULL; } static void * __percpu *ipcomp_alloc_scratches(void) diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index c6a4338a0d08e..65d009e3b6bbe 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -657,7 +657,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff oseq += skb_shinfo(skb)->gso_segs; } - if (unlikely(oseq < replay_esn->oseq)) { + if (unlikely(xo->seq.low < replay_esn->oseq)) { XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi; xo->seq.hi = oseq_hi; replay_esn->oseq_hi = oseq_hi; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index d9841f44487f2..c6bf3898d1bf0 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1920,6 +1920,7 @@ static int xfrm_notify_userpolicy(struct net *net) int len = NLMSG_ALIGN(sizeof(*up)); struct nlmsghdr *nlh; struct sk_buff *skb; + int err; skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) @@ -1938,7 +1939,11 @@ static int xfrm_notify_userpolicy(struct net *net) nlmsg_end(skb, nlh); - return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); + rcu_read_lock(); + err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); + rcu_read_unlock(); + + return err; } static bool xfrm_userpolicy_is_valid(__u8 policy) diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c index 9ec93d90e8a5a..4eb7aa11cfbb2 100644 --- a/samples/vfio-mdev/mdpy-fb.c +++ b/samples/vfio-mdev/mdpy-fb.c @@ -109,7 +109,7 @@ static int mdpy_fb_probe(struct pci_dev *pdev, ret = pci_request_regions(pdev, "mdpy-fb"); if (ret < 0) - return ret; + goto err_disable_dev; pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format); pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET, &width); @@ -191,6 +191,9 @@ static int mdpy_fb_probe(struct pci_dev *pdev, err_release_regions: pci_release_regions(pdev); +err_disable_dev: + pci_disable_device(pdev); + return ret; } @@ -199,7 +202,10 @@ static void mdpy_fb_remove(struct pci_dev *pdev) struct fb_info *info = pci_get_drvdata(pdev); unregister_framebuffer(info); + iounmap(info->screen_base); framebuffer_release(info); + pci_release_regions(pdev); + pci_disable_device(pdev); } static struct pci_device_id mdpy_fb_pci_table[] = { diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 0d6e118207913..25696de8114a3 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -179,8 +179,29 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\ quiet_redirect := silent_redirect := exec >/dev/null; +# Delete the target on interruption +# +# GNU Make automatically deletes the target if it has already been changed by +# the interrupted recipe. So, you can safely stop the build by Ctrl-C (Make +# will delete incomplete targets), and resume it later. +# +# However, this does not work when the stderr is piped to another program, like +# $ make >&2 | tee log +# Make dies with SIGPIPE before cleaning the targets. +# +# To address it, we clean the target in signal traps. +# +# Make deletes the target when it catches SIGHUP, SIGINT, SIGQUIT, SIGTERM. +# So, we cover them, and also SIGPIPE just in case. +# +# Of course, this is unneeded for phony targets. +delete-on-interrupt = \ + $(if $(filter-out $(PHONY), $@), \ + $(foreach sig, HUP INT QUIT TERM PIPE, \ + trap 'rm -f $@; trap - $(sig); kill -s $(sig) $$$$' $(sig);)) + # printing commands -cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(cmd_$(1)) +cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(delete-on-interrupt) $(cmd_$(1)) ### # if_changed - execute command if any prerequisite is newer than diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 23d3967786b9f..fe327a4532ddb 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -52,6 +52,7 @@ KBUILD_CFLAGS += -Wno-format-zero-length KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) +KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict) endif endif diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index 3bc48c726c41c..79ecbbfe37cd7 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c @@ -23,6 +23,13 @@ #include #include +/* + * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. + * + * Remove this if/when that API is no longer used + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + #define PKEY_ID_PKCS7 2 static __attribute__((noreturn)) diff --git a/scripts/faddr2line b/scripts/faddr2line index 57099687e5e1d..9e730b805e87c 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line @@ -73,7 +73,8 @@ command -v ${ADDR2LINE} >/dev/null 2>&1 || die "${ADDR2LINE} isn't installed" find_dir_prefix() { local objfile=$1 - local start_kernel_addr=$(${READELF} --symbols --wide $objfile | ${AWK} '$8 == "start_kernel" {printf "0x%s", $2}') + local start_kernel_addr=$(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | + ${AWK} '$8 == "start_kernel" {printf "0x%s", $2}') [[ -z $start_kernel_addr ]] && return local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr) @@ -177,7 +178,7 @@ __faddr2line() { found=2 break fi - done < <(${READELF} --symbols --wide $objfile | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2) + done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2) if [[ $found = 0 ]]; then warn "can't find symbol: sym_name: $sym_name sym_sec: $sym_sec sym_addr: $sym_addr sym_elf_size: $sym_elf_size" @@ -258,7 +259,7 @@ __faddr2line() { DONE=1 - done < <(${READELF} --symbols --wide $objfile | ${AWK} -v fn=$sym_name '$4 == "FUNC" && $8 == fn') + done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v fn=$sym_name '$4 == "FUNC" && $8 == fn') } [[ $# -lt 2 ]] && usage diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 6325bec3f66f8..19af6dd160e6b 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1215,6 +1215,13 @@ sub dump_struct($$) { $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos; $members =~ s/\s*____cacheline_aligned_in_smp/ /gos; $members =~ s/\s*____cacheline_aligned/ /gos; + # unwrap struct_group(): + # - first eat non-declaration parameters and rewrite for final match + # - then remove macro, outer parens, and trailing semicolon + $members =~ s/\bstruct_group\s*\(([^,]*,)/STRUCT_GROUP(/gos; + $members =~ s/\bstruct_group_(attr|tagged)\s*\(([^,]*,){2}/STRUCT_GROUP(/gos; + $members =~ s/\b__struct_group\s*\(([^,]*,){3}/STRUCT_GROUP(/gos; + $members =~ s/\bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*;/$2/gos; # replace DECLARE_BITMAP $members =~ s/__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)/DECLARE_BITMAP($1, __ETHTOOL_LINK_MODE_MASK_NBITS)/gos; diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index d0b44bee9286e..acd07a70a2f4e 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -161,7 +161,7 @@ gen_btf() vmlinux_link ${1} info "BTF" ${2} - LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} + LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1} # Create ${2} which contains just .BTF section but no symbols. Add # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all diff --git a/scripts/mksysmap b/scripts/mksysmap index 9aa23d15862a0..ad8bbc52267d0 100755 --- a/scripts/mksysmap +++ b/scripts/mksysmap @@ -41,4 +41,4 @@ # so we just ignore them to let readprofile continue to work. # (At least sparc64 has __crc_ in the middle). -$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2 +$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)\|\( L0\)' > $2 diff --git a/scripts/package/mkspec b/scripts/package/mkspec index 7c477ca7dc982..951cc60e5a903 100755 --- a/scripts/package/mkspec +++ b/scripts/package/mkspec @@ -85,10 +85,10 @@ $S mkdir -p %{buildroot}/boot %ifarch ia64 mkdir -p %{buildroot}/boot/efi - cp \$($MAKE image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE + cp \$($MAKE -s image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/ %else - cp \$($MAKE image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE + cp \$($MAKE -s image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE %endif $M $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install $MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install diff --git a/scripts/pahole-flags.sh b/scripts/pahole-flags.sh new file mode 100755 index 0000000000000..8c82173e42e52 --- /dev/null +++ b/scripts/pahole-flags.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +extra_paholeopt= + +if ! [ -x "$(command -v ${PAHOLE})" ]; then + exit 0 +fi + +pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/') + +if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then + # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars + extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars" +fi + +if [ "${pahole_ver}" -ge "124" ]; then + extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_enum64" +fi + +echo ${extra_paholeopt} diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh index 2dccf141241d7..20af56ce245c5 100755 --- a/scripts/selinux/install_policy.sh +++ b/scripts/selinux/install_policy.sh @@ -78,7 +78,7 @@ cd /etc/selinux/dummy/contexts/files $SF -F file_contexts / mounts=`cat /proc/$$/mounts | \ - egrep "ext[234]|jfs|xfs|reiserfs|jffs2|gfs2|btrfs|f2fs|ocfs2" | \ + grep -E "ext[234]|jfs|xfs|reiserfs|jffs2|gfs2|btrfs|f2fs|ocfs2" | \ awk '{ print $2 '}` $SF -F file_contexts $mounts diff --git a/scripts/sign-file.c b/scripts/sign-file.c index fbd34b8e8f578..7434e9ea926e2 100644 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -29,6 +29,13 @@ #include #include +/* + * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. + * + * Remove this if/when that API is no longer used + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + /* * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to * assume that it's not available and its header file is missing and that we diff --git a/scripts/tracing/ftrace-bisect.sh b/scripts/tracing/ftrace-bisect.sh index 926701162bc83..bb4f59262bbe9 100755 --- a/scripts/tracing/ftrace-bisect.sh +++ b/scripts/tracing/ftrace-bisect.sh @@ -12,7 +12,7 @@ # (note, if this is a problem with function_graph tracing, then simply # replace "function" with "function_graph" in the following steps). # -# # cd /sys/kernel/debug/tracing +# # cd /sys/kernel/tracing # # echo schedule > set_ftrace_filter # # echo function > current_tracer # @@ -20,22 +20,40 @@ # # # echo nop > current_tracer # -# # cat available_filter_functions > ~/full-file +# Starting with v5.1 this can be done with numbers, making it much faster: +# +# The old (slow) way, for kernels before v5.1. +# +# [old-way] # cat available_filter_functions > ~/full-file +# +# [old-way] *** Note *** this process will take several minutes to update the +# [old-way] filters. Setting multiple functions is an O(n^2) operation, and we +# [old-way] are dealing with thousands of functions. So go have coffee, talk +# [old-way] with your coworkers, read facebook. And eventually, this operation +# [old-way] will end. +# +# The new way (using numbers) is an O(n) operation, and usually takes less than a second. +# +# seq `wc -l available_filter_functions | cut -d' ' -f1` > ~/full-file +# +# This will create a sequence of numbers that match the functions in +# available_filter_functions, and when echoing in a number into the +# set_ftrace_filter file, it will enable the corresponding function in +# O(1) time. Making enabling all functions O(n) where n is the number of +# functions to enable. +# +# For either the new or old way, the rest of the operations remain the same. +# # # ftrace-bisect ~/full-file ~/test-file ~/non-test-file # # cat ~/test-file > set_ftrace_filter # -# *** Note *** this will take several minutes. Setting multiple functions is -# an O(n^2) operation, and we are dealing with thousands of functions. So go -# have coffee, talk with your coworkers, read facebook. And eventually, this -# operation will end. -# # # echo function > current_tracer # # If it crashes, we know that ~/test-file has a bad function. # # Reboot back to test kernel. # -# # cd /sys/kernel/debug/tracing +# # cd /sys/kernel/tracing # # mv ~/test-file ~/full-file # # If it didn't crash. diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index 269967c4fc1b6..b54eb7177a31f 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -22,13 +22,23 @@ menu "Memory initialization" config CC_HAS_AUTO_VAR_INIT_PATTERN def_bool $(cc-option,-ftrivial-auto-var-init=pattern) -config CC_HAS_AUTO_VAR_INIT_ZERO +config CC_HAS_AUTO_VAR_INIT_ZERO_BARE + def_bool $(cc-option,-ftrivial-auto-var-init=zero) + +config CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER + # Clang 16 and later warn about using the -enable flag, but it + # is required before then. def_bool $(cc-option,-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang) + depends on !CC_HAS_AUTO_VAR_INIT_ZERO_BARE + +config CC_HAS_AUTO_VAR_INIT_ZERO + def_bool CC_HAS_AUTO_VAR_INIT_ZERO_BARE || CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER choice prompt "Initialize kernel stack variables at function entry" default GCC_PLUGIN_STRUCTLEAK_BYREF_ALL if COMPILE_TEST && GCC_PLUGINS default INIT_STACK_ALL_PATTERN if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT_PATTERN + default INIT_STACK_ALL_ZERO if CC_HAS_AUTO_VAR_INIT_ZERO default INIT_STACK_NONE help This option enables initialization of stack variables at @@ -39,11 +49,11 @@ choice syscalls. This chooses the level of coverage over classes of potentially - uninitialized variables. The selected class will be + uninitialized variables. The selected class of variable will be initialized before use in a function. config INIT_STACK_NONE - bool "no automatic initialization (weakest)" + bool "no automatic stack variable initialization (weakest)" help Disable automatic stack variable initialization. This leaves the kernel vulnerable to the standard @@ -80,7 +90,7 @@ choice and is disallowed. config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL - bool "zero-init anything passed by reference (very strong)" + bool "zero-init everything passed by reference (very strong)" depends on GCC_PLUGINS depends on !(KASAN && KASAN_STACK=1) select GCC_PLUGIN_STRUCTLEAK @@ -91,33 +101,44 @@ choice of uninitialized stack variable exploits and information exposures. + As a side-effect, this keeps a lot of variables on the + stack that can otherwise be optimized out, so combining + this with CONFIG_KASAN_STACK can lead to a stack overflow + and is disallowed. + config INIT_STACK_ALL_PATTERN - bool "0xAA-init everything on the stack (strongest)" + bool "pattern-init everything (strongest)" depends on CC_HAS_AUTO_VAR_INIT_PATTERN help - Initializes everything on the stack with a 0xAA - pattern. This is intended to eliminate all classes - of uninitialized stack variable exploits and information - exposures, even variables that were warned to have been - left uninitialized. + Initializes everything on the stack (including padding) + with a specific debug value. This is intended to eliminate + all classes of uninitialized stack variable exploits and + information exposures, even variables that were warned about + having been left uninitialized. Pattern initialization is known to provoke many existing bugs related to uninitialized locals, e.g. pointers receive - non-NULL values, buffer sizes and indices are very big. + non-NULL values, buffer sizes and indices are very big. The + pattern is situation-specific; Clang on 64-bit uses 0xAA + repeating for all types and padding except float and double + which use 0xFF repeating (-NaN). Clang on 32-bit uses 0xFF + repeating for all types and padding. config INIT_STACK_ALL_ZERO - bool "zero-init everything on the stack (strongest and safest)" + bool "zero-init everything (strongest and safest)" depends on CC_HAS_AUTO_VAR_INIT_ZERO help - Initializes everything on the stack with a zero - value. This is intended to eliminate all classes - of uninitialized stack variable exploits and information - exposures, even variables that were warned to have been - left uninitialized. - - Zero initialization provides safe defaults for strings, - pointers, indices and sizes, and is therefore - more suitable as a security mitigation measure. + Initializes everything on the stack (including padding) + with a zero value. This is intended to eliminate all + classes of uninitialized stack variable exploits and + information exposures, even variables that were warned + about having been left uninitialized. + + Zero initialization provides safe defaults for strings + (immediately NUL-terminated), pointers (NULL), indices + (index 0), and sizes (0 length), so it is therefore more + suitable as a production security mitigation than pattern + initialization. endchoice diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index c173f6fd7aeed..49d97b331abca 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -867,8 +867,10 @@ static struct multi_transaction *multi_transaction_new(struct file *file, if (!t) return ERR_PTR(-ENOMEM); kref_init(&t->count); - if (copy_from_user(t->data, buf, size)) + if (copy_from_user(t->data, buf, size)) { + put_multi_transaction(t); return ERR_PTR(-EFAULT); + } return t; } diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index ffeaee5ed9683..585edcc6814d2 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -1161,10 +1161,10 @@ static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb, #endif /* - * The cred blob is a pointer to, not an instance of, an aa_task_ctx. + * The cred blob is a pointer to, not an instance of, an aa_label. */ struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = { - .lbs_cred = sizeof(struct aa_task_ctx *), + .lbs_cred = sizeof(struct aa_label *), .lbs_file = sizeof(struct aa_file_ctx), .lbs_task = sizeof(struct aa_task_ctx), }; diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 4c010c9a6af1d..fcf22577f606c 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -1125,7 +1125,7 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ - mutex_lock_nested(&ns->parent->lock, ns->level); + mutex_lock_nested(&ns->parent->lock, ns->parent->level); __aa_bump_ns_revision(ns); __aa_remove_ns(ns); mutex_unlock(&ns->parent->lock); diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c index 70921d95fb406..53d24cf638936 100644 --- a/security/apparmor/policy_ns.c +++ b/security/apparmor/policy_ns.c @@ -121,7 +121,7 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name) return ns; fail_unconfined: - kfree_sensitive(ns->base.hname); + aa_policy_destroy(&ns->base); fail_ns: kfree_sensitive(ns); return NULL; diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 556ef65ab6ee6..519656e685822 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -964,7 +964,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns) * if not specified use previous version * Mask off everything that is not kernel abi version */ - if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) { + if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v8)) { audit_iface(NULL, NULL, NULL, "unsupported interface version", e, error); return error; diff --git a/security/commoncap.c b/security/commoncap.c index 28d582ed80c9d..b44b69796c0ba 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -391,8 +391,10 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, &tmpbuf, size, GFP_NOFS); dput(dentry); - if (ret < 0 || !tmpbuf) - return ret; + if (ret < 0 || !tmpbuf) { + size = ret; + goto out_free; + } fs_ns = inode->i_sb->s_user_ns; cap = (struct vfs_cap_data *) tmpbuf; diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 04375df52fc9a..fe5cb7696993d 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -81,6 +81,17 @@ static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig) return -ENOMEM; } +static void dev_exceptions_move(struct list_head *dest, struct list_head *orig) +{ + struct dev_exception_item *ex, *tmp; + + lockdep_assert_held(&devcgroup_mutex); + + list_for_each_entry_safe(ex, tmp, orig, list) { + list_move_tail(&ex->list, dest); + } +} + /* * called under devcgroup_mutex */ @@ -603,11 +614,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, int count, rc = 0; struct dev_exception_item ex; struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent); + struct dev_cgroup tmp_devcgrp; if (!capable(CAP_SYS_ADMIN)) return -EPERM; memset(&ex, 0, sizeof(ex)); + memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp)); b = buffer; switch (*b) { @@ -619,15 +632,27 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, if (!may_allow_all(parent)) return -EPERM; - dev_exception_clean(devcgroup); - devcgroup->behavior = DEVCG_DEFAULT_ALLOW; - if (!parent) + if (!parent) { + devcgroup->behavior = DEVCG_DEFAULT_ALLOW; + dev_exception_clean(devcgroup); break; + } + INIT_LIST_HEAD(&tmp_devcgrp.exceptions); + rc = dev_exceptions_copy(&tmp_devcgrp.exceptions, + &devcgroup->exceptions); + if (rc) + return rc; + dev_exception_clean(devcgroup); rc = dev_exceptions_copy(&devcgroup->exceptions, &parent->exceptions); - if (rc) + if (rc) { + dev_exceptions_move(&devcgroup->exceptions, + &tmp_devcgrp.exceptions); return rc; + } + devcgroup->behavior = DEVCG_DEFAULT_ALLOW; + dev_exception_clean(&tmp_devcgrp); break; case DEVCG_DENY: if (css_has_online_children(&devcgroup->css)) diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c index 0f518dcfde052..de442af7b3364 100644 --- a/security/integrity/digsig.c +++ b/security/integrity/digsig.c @@ -120,6 +120,7 @@ int __init integrity_init_keyring(const unsigned int id) { struct key_restriction *restriction; key_perm_t perm; + int ret; perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH; @@ -140,7 +141,10 @@ int __init integrity_init_keyring(const unsigned int id) perm |= KEY_USR_WRITE; out: - return __integrity_init_keyring(id, perm, restriction); + ret = __integrity_init_keyring(id, perm, restriction); + if (ret) + kfree(restriction); + return ret; } int __init integrity_add_key(const unsigned int id, const void *data, diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 2d1af8899cabd..600b97677085f 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -743,6 +743,7 @@ int ima_load_data(enum kernel_load_data_id id, bool contents) pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } + break; default: break; } diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 18569adcb4fe7..96ecb7d254037 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -370,12 +370,6 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) nentry->lsm[i].type = entry->lsm[i].type; nentry->lsm[i].args_p = entry->lsm[i].args_p; - /* - * Remove the reference from entry so that the associated - * memory will not be freed during a later call to - * ima_lsm_free_rule(entry). - */ - entry->lsm[i].args_p = NULL; ima_filter_rule_init(nentry->lsm[i].type, Audit_equal, nentry->lsm[i].args_p, @@ -389,6 +383,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) static int ima_lsm_update_rule(struct ima_rule_entry *entry) { + int i; struct ima_rule_entry *nentry; nentry = ima_lsm_copy_rule(entry); @@ -403,7 +398,8 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry) * references and the entry itself. All other memory refrences will now * be owned by nentry. */ - ima_lsm_free_rule(entry); + for (i = 0; i < MAX_LSM_RULES; i++) + ima_filter_rule_free(entry->lsm[i].rule); kfree(entry); return 0; @@ -503,6 +499,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode, const char *keyring) { int i; + bool result = false; + struct ima_rule_entry *lsm_rule = rule; + bool rule_reinitialized = false; if (func == KEY_CHECK) { return (rule->flags & IMA_FUNC) && (rule->func == func) && @@ -545,34 +544,55 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode, int rc = 0; u32 osid; - if (!rule->lsm[i].rule) { - if (!rule->lsm[i].args_p) + if (!lsm_rule->lsm[i].rule) { + if (!lsm_rule->lsm[i].args_p) continue; else return false; } + +retry: switch (i) { case LSM_OBJ_USER: case LSM_OBJ_ROLE: case LSM_OBJ_TYPE: security_inode_getsecid(inode, &osid); - rc = ima_filter_rule_match(osid, rule->lsm[i].type, + rc = ima_filter_rule_match(osid, lsm_rule->lsm[i].type, Audit_equal, - rule->lsm[i].rule); + lsm_rule->lsm[i].rule); break; case LSM_SUBJ_USER: case LSM_SUBJ_ROLE: case LSM_SUBJ_TYPE: - rc = ima_filter_rule_match(secid, rule->lsm[i].type, + rc = ima_filter_rule_match(secid, lsm_rule->lsm[i].type, Audit_equal, - rule->lsm[i].rule); + lsm_rule->lsm[i].rule); + break; default: break; } - if (!rc) - return false; + + if (rc == -ESTALE && !rule_reinitialized) { + lsm_rule = ima_lsm_copy_rule(rule); + if (lsm_rule) { + rule_reinitialized = true; + goto retry; + } + } + if (!rc) { + result = false; + goto out; + } } - return true; + result = true; + +out: + if (rule_reinitialized) { + for (i = 0; i < MAX_LSM_RULES; i++) + ima_filter_rule_free(lsm_rule->lsm[i].rule); + kfree(lsm_rule); + } + return result; } /* @@ -802,6 +822,7 @@ void __init ima_init_policy(void) add_rules(default_measurement_rules, ARRAY_SIZE(default_measurement_rules), IMA_DEFAULT_POLICY); + break; default: break; } diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c index f64c01d53e96a..e5d941f485b19 100644 --- a/security/integrity/ima/ima_template.c +++ b/security/integrity/ima/ima_template.c @@ -220,11 +220,11 @@ int template_desc_init_fields(const char *template_fmt, } if (fields && num_fields) { - *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL); + *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL); if (*fields == NULL) return -ENOMEM; - memcpy(*fields, found_fields, i * sizeof(*fields)); + memcpy(*fields, found_fields, i * sizeof(**fields)); *num_fields = i; } @@ -290,8 +290,11 @@ static struct ima_template_desc *restore_template_fmt(char *template_name) template_desc->name = ""; template_desc->fmt = kstrdup(template_name, GFP_KERNEL); - if (!template_desc->fmt) + if (!template_desc->fmt) { + kfree(template_desc); + template_desc = NULL; goto out; + } spin_lock(&template_list); list_add_tail_rcu(&template_desc->list, &defined_templates); diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c index 555d2dfc0ff79..d2f2c3936277a 100644 --- a/security/integrity/platform_certs/load_uefi.c +++ b/security/integrity/platform_certs/load_uefi.c @@ -30,10 +30,11 @@ static const struct dmi_system_id uefi_skip_cert[] = { { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,1") }, { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,2") }, { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir9,1") }, - { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacMini8,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "Macmini8,1") }, { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") }, { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") }, { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMacPro1,1") }, { } }; diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c index b12f7d986b1e3..5fce105a372d3 100644 --- a/security/loadpin/loadpin.c +++ b/security/loadpin/loadpin.c @@ -118,21 +118,11 @@ static void loadpin_sb_free_security(struct super_block *mnt_sb) } } -static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, - bool contents) +static int loadpin_check(struct file *file, enum kernel_read_file_id id) { struct super_block *load_root; const char *origin = kernel_read_file_id_str(id); - /* - * If we will not know that we'll be seeing the full contents - * then we cannot trust a load will be complete and unchanged - * off disk. Treat all contents=false hooks as if there were - * no associated file struct. - */ - if (!contents) - file = NULL; - /* If the file id is excluded, ignore the pinning. */ if ((unsigned int)id < ARRAY_SIZE(ignore_read_file_id) && ignore_read_file_id[id]) { @@ -187,9 +177,25 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, return 0; } +static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, + bool contents) +{ + /* + * LoadPin only cares about the _origin_ of a file, not its + * contents, so we can ignore the "are full contents available" + * argument here. + */ + return loadpin_check(file, id); +} + static int loadpin_load_data(enum kernel_load_data_id id, bool contents) { - return loadpin_read_file(NULL, (enum kernel_read_file_id) id, contents); + /* + * LoadPin only cares about the _origin_ of a file, not its + * contents, so a NULL file is passed, and we can ignore the + * state of "contents". + */ + return loadpin_check(NULL, (enum kernel_read_file_id) id); } static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = { diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 31d631fa846ef..3db8bd2158d9b 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2011,7 +2011,8 @@ static inline int convert_context_handle_invalid_context( * in `newc'. Verify that the context is valid * under the new policy. */ -static int convert_context(struct context *oldc, struct context *newc, void *p) +static int convert_context(struct context *oldc, struct context *newc, void *p, + gfp_t gfp_flags) { struct convert_context_args *args; struct ocontext *oc; @@ -2025,7 +2026,7 @@ static int convert_context(struct context *oldc, struct context *newc, void *p) args = p; if (oldc->str) { - s = kstrdup(oldc->str, GFP_KERNEL); + s = kstrdup(oldc->str, gfp_flags); if (!s) return -ENOMEM; diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index 656d50b09f762..1981c5af13e0a 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -325,7 +325,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context, } rc = convert->func(context, &dst_convert->context, - convert->args); + convert->args, GFP_ATOMIC); if (rc) { context_destroy(&dst->context); goto out_unlock; @@ -404,7 +404,7 @@ static int sidtab_convert_tree(union sidtab_entry_inner *edst, while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { rc = convert->func(&esrc->ptr_leaf->entries[i].context, &edst->ptr_leaf->entries[i].context, - convert->args); + convert->args, GFP_KERNEL); if (rc) return rc; (*pos)++; diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h index 4eff0e49dcb22..9fce0d553fe2c 100644 --- a/security/selinux/ss/sidtab.h +++ b/security/selinux/ss/sidtab.h @@ -65,7 +65,7 @@ struct sidtab_isid_entry { }; struct sidtab_convert_params { - int (*func)(struct context *oldc, struct context *newc, void *args); + int (*func)(struct context *oldc, struct context *newc, void *args, gfp_t gfp_flags); void *args; struct sidtab *target; }; diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile index cca5a3012fee2..221eaadffb09c 100644 --- a/security/tomoyo/Makefile +++ b/security/tomoyo/Makefile @@ -10,7 +10,7 @@ endef quiet_cmd_policy = POLICY $@ cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@ -$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE +$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE $(call if_changed,policy) $(obj)/common.o: $(obj)/builtin-policy.h diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c index faf6b03131ee4..51ed2f34b276d 100644 --- a/sound/aoa/soundbus/i2sbus/core.c +++ b/sound/aoa/soundbus/i2sbus/core.c @@ -147,6 +147,7 @@ static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index, return rc; } +/* Returns 1 if added, 0 for otherwise; don't return a negative value! */ /* FIXME: look at device node refcounting */ static int i2sbus_add_dev(struct macio_dev *macio, struct i2sbus_control *control, @@ -213,7 +214,7 @@ static int i2sbus_add_dev(struct macio_dev *macio, * either as the second one in that case is just a modem. */ if (!ok) { kfree(dev); - return -ENODEV; + return 0; } mutex_init(&dev->lock); @@ -302,6 +303,10 @@ static int i2sbus_add_dev(struct macio_dev *macio, if (soundbus_add_one(&dev->sound)) { printk(KERN_DEBUG "i2sbus: device registration error!\n"); + if (dev->sound.ofdev.dev.kobj.state_initialized) { + soundbus_dev_put(&dev->sound); + return 0; + } goto err; } diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index 97467f6a32a13..980ab3580f1b7 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c @@ -304,7 +304,9 @@ static int ctl_elem_read_user(struct snd_card *card, err = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (err < 0) goto error; + down_read(&card->controls_rwsem); err = snd_ctl_elem_read(card, data); + up_read(&card->controls_rwsem); if (err < 0) goto error; err = copy_ctl_value_to_user(userdata, valuep, data, type, count); @@ -332,7 +334,9 @@ static int ctl_elem_write_user(struct snd_ctl_file *file, err = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (err < 0) goto error; + down_write(&card->controls_rwsem); err = snd_ctl_elem_write(card, file, data); + up_write(&card->controls_rwsem); if (err < 0) goto error; err = copy_ctl_value_to_user(userdata, valuep, data, type, count); diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index f88de74da1eb3..de6f94bee50b9 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1662,13 +1662,14 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) goto __direct; - if ((err = snd_pcm_oss_make_ready(substream)) < 0) - return err; atomic_inc(&runtime->oss.rw_ref); if (mutex_lock_interruptible(&runtime->oss.params_lock)) { atomic_dec(&runtime->oss.rw_ref); return -ERESTARTSYS; } + err = snd_pcm_oss_make_ready_locked(substream); + if (err < 0) + goto unlock; format = snd_pcm_oss_format_from(runtime->oss.format); width = snd_pcm_format_physical_width(format); if (runtime->oss.buffer_used > 0) { diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c index 4d0e8fe535a1e..be58505889a36 100644 --- a/sound/core/pcm_dmaengine.c +++ b/sound/core/pcm_dmaengine.c @@ -130,12 +130,14 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); static void dmaengine_pcm_dma_complete(void *arg) { + unsigned int new_pos; struct snd_pcm_substream *substream = arg; struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); - prtd->pos += snd_pcm_lib_period_bytes(substream); - if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream)) - prtd->pos = 0; + new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream); + if (new_pos >= snd_pcm_lib_buffer_bytes(substream)) + new_pos = 0; + prtd->pos = new_pos; snd_pcm_period_elapsed(substream); } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 6cc7c2a9fe732..9425fcd30c4c7 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1413,8 +1413,10 @@ static int snd_pcm_do_start(struct snd_pcm_substream *substream, static void snd_pcm_undo_start(struct snd_pcm_substream *substream, snd_pcm_state_t state) { - if (substream->runtime->trigger_master == substream) + if (substream->runtime->trigger_master == substream) { substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); + substream->runtime->stop_operating = true; + } } static void snd_pcm_post_start(struct snd_pcm_substream *substream, diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 257ad5206240f..0d91143eb4647 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -1736,10 +1736,8 @@ static int snd_rawmidi_free(struct snd_rawmidi *rmidi) snd_info_free_entry(rmidi->proc_entry); rmidi->proc_entry = NULL; - mutex_lock(®ister_mutex); if (rmidi->ops && rmidi->ops->dev_unregister) rmidi->ops->dev_unregister(rmidi); - mutex_unlock(®ister_mutex); snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]); snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]); diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index 65db1a7c77b76..bb76a2dd0a2ff 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -112,15 +112,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event); * expand the variable length event to linear buffer space. */ -static int seq_copy_in_kernel(char **bufptr, const void *src, int size) +static int seq_copy_in_kernel(void *ptr, void *src, int size) { + char **bufptr = ptr; + memcpy(*bufptr, src, size); *bufptr += size; return 0; } -static int seq_copy_in_user(char __user **bufptr, const void *src, int size) +static int seq_copy_in_user(void *ptr, void *src, int size) { + char __user **bufptr = ptr; + if (copy_to_user(*bufptr, src, size)) return -EFAULT; *bufptr += size; @@ -149,8 +153,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char return newlen; } err = snd_seq_dump_var_event(event, - in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel : - (snd_seq_dump_func_t)seq_copy_in_user, + in_kernel ? seq_copy_in_kernel : seq_copy_in_user, &buf); return err < 0 ? err : newlen; } diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c index 610f317bea9d1..99874e80b6829 100644 --- a/sound/core/sound_oss.c +++ b/sound/core/sound_oss.c @@ -162,7 +162,6 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev) mutex_unlock(&sound_oss_mutex); return -ENOENT; } - unregister_sound_special(minor); switch (SNDRV_MINOR_OSS_DEVICE(minor)) { case SNDRV_MINOR_OSS_PCM: track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_AUDIO); @@ -174,12 +173,18 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev) track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1); break; } - if (track2 >= 0) { - unregister_sound_special(track2); + if (track2 >= 0) snd_oss_minors[track2] = NULL; - } snd_oss_minors[minor] = NULL; mutex_unlock(&sound_oss_mutex); + + /* call unregister_sound_special() outside sound_oss_mutex; + * otherwise may deadlock, as it can trigger the release of a card + */ + unregister_sound_special(minor); + if (track2 >= 0) + unregister_sound_special(track2); + kfree(mptr); return 0; } diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 2c5f7e905ab8f..fb45a32d99cd9 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -606,17 +606,18 @@ static unsigned int loopback_jiffies_timer_pos_update cable->streams[SNDRV_PCM_STREAM_PLAYBACK]; struct loopback_pcm *dpcm_capt = cable->streams[SNDRV_PCM_STREAM_CAPTURE]; - unsigned long delta_play = 0, delta_capt = 0; + unsigned long delta_play = 0, delta_capt = 0, cur_jiffies; unsigned int running, count1, count2; + cur_jiffies = jiffies; running = cable->running ^ cable->pause; if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { - delta_play = jiffies - dpcm_play->last_jiffies; + delta_play = cur_jiffies - dpcm_play->last_jiffies; dpcm_play->last_jiffies += delta_play; } if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) { - delta_capt = jiffies - dpcm_capt->last_jiffies; + delta_capt = cur_jiffies - dpcm_capt->last_jiffies; dpcm_capt->last_jiffies += delta_capt; } diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c index 9c708b693cb33..257314920e4d4 100644 --- a/sound/drivers/mts64.c +++ b/sound/drivers/mts64.c @@ -816,6 +816,9 @@ static void snd_mts64_interrupt(void *private) u8 status, data; struct snd_rawmidi_substream *substream; + if (!mts) + return; + spin_lock(&mts->lock); ret = mts64_read(mts->pardev->port); data = ret & 0x00ff; diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c index 1e6e4cf428cda..4276dae2e00af 100644 --- a/sound/hda/ext/hdac_ext_stream.c +++ b/sound/hda/ext/hdac_ext_stream.c @@ -475,23 +475,6 @@ int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus, } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo); - -/** - * snd_hdac_ext_stop_streams - stop all stream if running - * @bus: HD-audio core bus - */ -void snd_hdac_ext_stop_streams(struct hdac_bus *bus) -{ - struct hdac_stream *stream; - - if (bus->chip_init) { - list_for_each_entry(stream, &bus->stream_list, list) - snd_hdac_stream_stop(stream); - snd_hdac_bus_stop_chip(bus); - } -} -EXPORT_SYMBOL_GPL(snd_hdac_ext_stop_streams); - /** * snd_hdac_ext_stream_drsm_enable - enable DMA resume for a stream * @bus: HD-audio core bus diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index ce77a53201639..1e0f61affd979 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -142,6 +142,33 @@ void snd_hdac_stream_stop(struct hdac_stream *azx_dev) } EXPORT_SYMBOL_GPL(snd_hdac_stream_stop); +/** + * snd_hdac_stop_streams - stop all streams + * @bus: HD-audio core bus + */ +void snd_hdac_stop_streams(struct hdac_bus *bus) +{ + struct hdac_stream *stream; + + list_for_each_entry(stream, &bus->stream_list, list) + snd_hdac_stream_stop(stream); +} +EXPORT_SYMBOL_GPL(snd_hdac_stop_streams); + +/** + * snd_hdac_stop_streams_and_chip - stop all streams and chip if running + * @bus: HD-audio core bus + */ +void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus) +{ + + if (bus->chip_init) { + snd_hdac_stop_streams(bus); + snd_hdac_bus_stop_chip(bus); + } +} +EXPORT_SYMBOL_GPL(snd_hdac_stop_streams_and_chip); + /** * snd_hdac_stream_reset - reset a stream * @azx_dev: HD-audio core stream to reset diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c index e56e833259031..bcf302f5115ac 100644 --- a/sound/hda/hdac_sysfs.c +++ b/sound/hda/hdac_sysfs.c @@ -346,8 +346,10 @@ static int add_widget_node(struct kobject *parent, hda_nid_t nid, return -ENOMEM; kobject_init(kobj, &widget_ktype); err = kobject_add(kobj, parent, "%02x", nid); - if (err < 0) + if (err < 0) { + kobject_put(kobj); return err; + } err = sysfs_create_group(kobj, group); if (err < 0) { kobject_put(kobj); diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 963731cf0d8c8..cd66632bf1c37 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -1946,6 +1946,7 @@ static int snd_ac97_dev_register(struct snd_device *device) snd_ac97_get_short_name(ac97)); if ((err = device_register(&ac97->dev)) < 0) { ac97_err(ac97, "Can't register ac97 bus\n"); + put_device(&ac97->dev); ac97->dev.bus = NULL; return err; } diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index bb31b7fe867d6..477a5b4b50bcb 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c @@ -361,7 +361,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, pci_dev->devfn); - if (pci_enable_device(pci_dev) < 0) { + if (pcim_enable_device(pci_dev) < 0) { dev_err(&pci_dev->dev, "pci_enable_device failed, disabling device\n"); return -EIO; diff --git a/sound/pci/au88x0/au88x0.h b/sound/pci/au88x0/au88x0.h index 0aa7af049b1b9..6cbb2bc4a0483 100644 --- a/sound/pci/au88x0/au88x0.h +++ b/sound/pci/au88x0/au88x0.h @@ -141,7 +141,7 @@ struct snd_vortex { #ifndef CHIP_AU8810 stream_t dma_wt[NR_WT]; wt_voice_t wt_voice[NR_WT]; /* WT register cache. */ - char mixwt[(NR_WT / NR_WTPB) * 6]; /* WT mixin objects */ + s8 mixwt[(NR_WT / NR_WTPB) * 6]; /* WT mixin objects */ #endif /* Global resources */ @@ -235,8 +235,8 @@ static int vortex_alsafmt_aspfmt(snd_pcm_format_t alsafmt, vortex_t *v); static void vortex_connect_default(vortex_t * vortex, int en); static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch, int dir, int type, int subdev); -static char vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, - int restype); +static int vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, + int restype); #ifndef CHIP_AU8810 static int vortex_wt_allocroute(vortex_t * vortex, int dma, int nr_ch); static void vortex_wt_connect(vortex_t * vortex, int en); diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 5180f1bd1326c..0b04436ac017e 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c @@ -1998,7 +1998,7 @@ static const int resnum[VORTEX_RESOURCE_LAST] = out: Mean checkout if != 0. Else mean Checkin resource. restype: Indicates type of resource to be checked in or out. */ -static char +static int vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype) { int i, qty = resnum[restype], resinuse = 0; diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c index b2ddabb994381..8d2c101d66a23 100644 --- a/sound/pci/emu10k1/emupcm.c +++ b/sound/pci/emu10k1/emupcm.c @@ -123,7 +123,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic epcm->voices[0]->epcm = epcm; if (voices > 1) { for (i = 1; i < voices; i++) { - epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i]; + epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G]; epcm->voices[i]->epcm = epcm; } } diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c index 53a2b89f8983c..e63621bcb2142 100644 --- a/sound/pci/hda/hda_beep.c +++ b/sound/pci/hda/hda_beep.c @@ -118,6 +118,12 @@ static int snd_hda_beep_event(struct input_dev *dev, unsigned int type, return 0; } +static void turn_on_beep(struct hda_beep *beep) +{ + if (beep->keep_power_at_enable) + snd_hda_power_up_pm(beep->codec); +} + static void turn_off_beep(struct hda_beep *beep) { cancel_work_sync(&beep->beep_work); @@ -125,6 +131,8 @@ static void turn_off_beep(struct hda_beep *beep) /* turn off beep */ generate_tone(beep, 0); } + if (beep->keep_power_at_enable) + snd_hda_power_down_pm(beep->codec); } /** @@ -140,7 +148,9 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable) enable = !!enable; if (beep->enabled != enable) { beep->enabled = enable; - if (!enable) + if (enable) + turn_on_beep(beep); + else turn_off_beep(beep); return 1; } @@ -167,7 +177,8 @@ static int beep_dev_disconnect(struct snd_device *device) input_unregister_device(beep->dev); else input_free_device(beep->dev); - turn_off_beep(beep); + if (beep->enabled) + turn_off_beep(beep); return 0; } diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h index a25358a4807ab..db76e3ddba654 100644 --- a/sound/pci/hda/hda_beep.h +++ b/sound/pci/hda/hda_beep.h @@ -25,6 +25,7 @@ struct hda_beep { unsigned int enabled:1; unsigned int linear_tone:1; /* linear tone for IDT/STAC codec */ unsigned int playing:1; + unsigned int keep_power_at_enable:1; /* set by driver */ struct work_struct beep_work; /* scheduled task for beep event */ struct mutex mutex; void (*power_hook)(struct hda_beep *beep, bool on); diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c index 4efbcc41fdfb7..0a83afa5f373c 100644 --- a/sound/pci/hda/hda_bind.c +++ b/sound/pci/hda/hda_bind.c @@ -143,6 +143,7 @@ static int hda_codec_driver_probe(struct device *dev) error: snd_hda_codec_cleanup_for_unbind(codec); + codec->preset = NULL; return err; } @@ -159,6 +160,7 @@ static int hda_codec_driver_remove(struct device *dev) if (codec->patch_ops.free) codec->patch_ops.free(codec); snd_hda_codec_cleanup_for_unbind(codec); + codec->preset = NULL; module_put(dev->driver->owner); return 0; } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 39281106477eb..fc4a64a83ff2f 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -784,7 +784,6 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec) snd_array_free(&codec->cvt_setups); snd_array_free(&codec->spdif_out); snd_array_free(&codec->verbs); - codec->preset = NULL; codec->follower_dig_outs = NULL; codec->spdif_status_reset = 0; snd_array_free(&codec->mixers); diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 3de7dc34def24..ea76395d71d38 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1045,10 +1045,8 @@ EXPORT_SYMBOL_GPL(azx_init_chip); void azx_stop_all_streams(struct azx *chip) { struct hdac_bus *bus = azx_bus(chip); - struct hdac_stream *s; - list_for_each_entry(s, &bus->stream_list, list) - snd_hdac_stream_stop(s); + snd_hdac_stop_streams(bus); } EXPORT_SYMBOL_GPL(azx_stop_all_streams); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 600ea241ead79..494bfd2135a9e 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2584,9 +2584,12 @@ static const struct pci_device_id azx_ids[] = { /* 5 Series/3400 */ { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, + { PCI_DEVICE(0x8086, 0x3b57), + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, /* Poulsbo */ { PCI_DEVICE(0x8086, 0x811b), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE | + AZX_DCAPS_POSFIX_LPIB }, /* Oaktrail */ { PCI_DEVICE(0x8086, 0x080a), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE }, @@ -2746,6 +2749,9 @@ static const struct pci_device_id azx_ids[] = { { PCI_DEVICE(0x1002, 0xab28), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | AZX_DCAPS_PM_RUNTIME }, + { PCI_DEVICE(0x1002, 0xab30), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | + AZX_DCAPS_PM_RUNTIME }, { PCI_DEVICE(0x1002, 0xab38), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | AZX_DCAPS_PM_RUNTIME }, diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 07787698b9738..17b06f7b69ee6 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -70,9 +71,9 @@ struct hda_tegra { struct azx chip; struct device *dev; - struct clk *hda_clk; - struct clk *hda2codec_2x_clk; - struct clk *hda2hdmi_clk; + struct reset_control *reset; + struct clk_bulk_data clocks[3]; + unsigned int nclocks; void __iomem *regs; struct work_struct probe_work; }; @@ -113,36 +114,6 @@ static void hda_tegra_init(struct hda_tegra *hda) writel(v, hda->regs + HDA_IPFS_INTR_MASK); } -static int hda_tegra_enable_clocks(struct hda_tegra *data) -{ - int rc; - - rc = clk_prepare_enable(data->hda_clk); - if (rc) - return rc; - rc = clk_prepare_enable(data->hda2codec_2x_clk); - if (rc) - goto disable_hda; - rc = clk_prepare_enable(data->hda2hdmi_clk); - if (rc) - goto disable_codec_2x; - - return 0; - -disable_codec_2x: - clk_disable_unprepare(data->hda2codec_2x_clk); -disable_hda: - clk_disable_unprepare(data->hda_clk); - return rc; -} - -static void hda_tegra_disable_clocks(struct hda_tegra *data) -{ - clk_disable_unprepare(data->hda2hdmi_clk); - clk_disable_unprepare(data->hda2codec_2x_clk); - clk_disable_unprepare(data->hda_clk); -} - /* * power management */ @@ -186,7 +157,7 @@ static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev) azx_stop_chip(chip); azx_enter_link_reset(chip); } - hda_tegra_disable_clocks(hda); + clk_bulk_disable_unprepare(hda->nclocks, hda->clocks); return 0; } @@ -198,7 +169,13 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev) struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); int rc; - rc = hda_tegra_enable_clocks(hda); + if (!chip->running) { + rc = reset_control_assert(hda->reset); + if (rc) + return rc; + } + + rc = clk_bulk_prepare_enable(hda->nclocks, hda->clocks); if (rc != 0) return rc; if (chip && chip->running) { @@ -207,6 +184,12 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev) /* disable controller wake up event*/ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & ~STATESTS_INT_MASK); + } else { + usleep_range(10, 100); + + rc = reset_control_deassert(hda->reset); + if (rc) + return rc; } return 0; @@ -268,29 +251,6 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev) return 0; } -static int hda_tegra_init_clk(struct hda_tegra *hda) -{ - struct device *dev = hda->dev; - - hda->hda_clk = devm_clk_get(dev, "hda"); - if (IS_ERR(hda->hda_clk)) { - dev_err(dev, "failed to get hda clock\n"); - return PTR_ERR(hda->hda_clk); - } - hda->hda2codec_2x_clk = devm_clk_get(dev, "hda2codec_2x"); - if (IS_ERR(hda->hda2codec_2x_clk)) { - dev_err(dev, "failed to get hda2codec_2x clock\n"); - return PTR_ERR(hda->hda2codec_2x_clk); - } - hda->hda2hdmi_clk = devm_clk_get(dev, "hda2hdmi"); - if (IS_ERR(hda->hda2hdmi_clk)) { - dev_err(dev, "failed to get hda2hdmi clock\n"); - return PTR_ERR(hda->hda2hdmi_clk); - } - - return 0; -} - static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev) { struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); @@ -479,7 +439,8 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match); static int hda_tegra_probe(struct platform_device *pdev) { const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR | - AZX_DCAPS_PM_RUNTIME; + AZX_DCAPS_PM_RUNTIME | + AZX_DCAPS_4K_BDLE_BOUNDARY; struct snd_card *card; struct azx *chip; struct hda_tegra *hda; @@ -498,7 +459,17 @@ static int hda_tegra_probe(struct platform_device *pdev) return err; } - err = hda_tegra_init_clk(hda); + hda->reset = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(hda->reset)) { + err = PTR_ERR(hda->reset); + goto out_free; + } + + hda->clocks[hda->nclocks++].id = "hda"; + hda->clocks[hda->nclocks++].id = "hda2hdmi"; + hda->clocks[hda->nclocks++].id = "hda2codec_2x"; + + err = devm_clk_bulk_get(&pdev->dev, hda->nclocks, hda->clocks); if (err < 0) goto out_free; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index f774b2ac9720c..82f14c3f642bd 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -1272,6 +1272,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI), + SND_PCI_QUIRK(0x3842, 0x1055, "EVGA Z390 DARK", QUIRK_R3DI), SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D), SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D), SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5), diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 2bd0a5839e805..48b802563c2da 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1117,6 +1117,7 @@ static const struct hda_device_id snd_hda_id_conexant[] = { HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f120d1, "SN6180", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 71e11481ba41c..1afe9cddb69eb 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -157,6 +157,9 @@ struct hdmi_spec { bool dyn_pin_out; bool dyn_pcm_assign; + bool dyn_pcm_no_legacy; + bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */ + bool intel_hsw_fixup; /* apply Intel platform-specific fixups */ /* * Non-generic VIA/NVIDIA specific @@ -666,15 +669,24 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec, int ca, int active_channels, int conn_type) { + struct hdmi_spec *spec = codec->spec; union audio_infoframe ai; memset(&ai, 0, sizeof(ai)); - if (conn_type == 0) { /* HDMI */ + if ((conn_type == 0) || /* HDMI */ + /* Nvidia DisplayPort: Nvidia HW expects same layout as HDMI */ + (conn_type == 1 && spec->nv_dp_workaround)) { struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi; - hdmi_ai->type = 0x84; - hdmi_ai->ver = 0x01; - hdmi_ai->len = 0x0a; + if (conn_type == 0) { /* HDMI */ + hdmi_ai->type = 0x84; + hdmi_ai->ver = 0x01; + hdmi_ai->len = 0x0a; + } else {/* Nvidia DP */ + hdmi_ai->type = 0x84; + hdmi_ai->ver = 0x1b; + hdmi_ai->len = 0x11 << 2; + } hdmi_ai->CC02_CT47 = active_channels - 1; hdmi_ai->CA = ca; hdmi_checksum_audio_infoframe(hdmi_ai); @@ -1257,6 +1269,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, set_bit(pcm_idx, &spec->pcm_in_use); per_pin = get_pin(spec, pin_idx); per_pin->cvt_nid = per_cvt->cvt_nid; + per_pin->silent_stream = false; hinfo->nid = per_cvt->cvt_nid; /* flip stripe flag for the assigned stream if supported */ @@ -1348,6 +1361,12 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec, { int i; + /* on the new machines, try to assign the pcm slot dynamically, + * not use the preferred fixed map (legacy way) anymore. + */ + if (spec->dyn_pcm_no_legacy) + goto last_try; + /* * generic_hdmi_build_pcms() may allocate extra PCMs on some * platforms (with maximum of 'num_nids + dev_num - 1') @@ -1377,8 +1396,9 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec, return i; } + last_try: /* the last try; check the empty slots in pins */ - for (i = 0; i < spec->num_nids; i++) { + for (i = 0; i < spec->pcm_used; i++) { if (!test_bit(i, &spec->pcm_bitmap)) return i; } @@ -1945,6 +1965,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) static const struct snd_pci_quirk force_connect_list[] = { SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1), SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1), {} }; @@ -2254,7 +2276,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec) * dev_num is the device entry number in a pin */ - if (codec->mst_no_extra_pcms) + if (spec->dyn_pcm_no_legacy && codec->mst_no_extra_pcms) + pcm_num = spec->num_cvts; + else if (codec->mst_no_extra_pcms) pcm_num = spec->num_nids; else pcm_num = spec->num_nids + spec->dev_num - 1; @@ -2662,9 +2686,6 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id) */ if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND) return; - /* ditto during suspend/resume process itself */ - if (snd_hdac_is_in_pm(&codec->core)) - return; check_presence_and_report(codec, pin_nid, dev_id); } @@ -2848,9 +2869,6 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe) */ if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND) return; - /* ditto during suspend/resume process itself */ - if (snd_hdac_is_in_pm(&codec->core)) - return; snd_hdac_i915_set_bclk(&codec->bus->core); check_presence_and_report(codec, pin_nid, dev_id); @@ -3010,8 +3028,16 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec) * the index indicate the port number. */ static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}; + int ret; - return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map)); + ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map)); + if (!ret) { + struct hdmi_spec *spec = codec->spec; + + spec->dyn_pcm_no_legacy = true; + } + + return ret; } /* Intel Baytrail and Braswell; with eld notifier */ @@ -3510,6 +3536,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec) spec->pcm_playback.rates = SUPPORTED_RATES; spec->pcm_playback.maxbps = SUPPORTED_MAXBPS; spec->pcm_playback.formats = SUPPORTED_FORMATS; + spec->nv_dp_workaround = true; return 0; } @@ -3649,6 +3676,7 @@ static int patch_nvhdmi(struct hda_codec *codec) spec->chmap.ops.chmap_cea_alloc_validate_get_type = nvhdmi_chmap_cea_alloc_validate_get_type; spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; + spec->nv_dp_workaround = true; codec->link_down_at_suspend = 1; @@ -3672,6 +3700,7 @@ static int patch_nvhdmi_legacy(struct hda_codec *codec) spec->chmap.ops.chmap_cea_alloc_validate_get_type = nvhdmi_chmap_cea_alloc_validate_get_type; spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; + spec->nv_dp_workaround = true; codec->link_down_at_suspend = 1; @@ -3839,11 +3868,13 @@ static int patch_tegra_hdmi(struct hda_codec *codec) if (err) return err; + codec->depop_delay = 10; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; spec = codec->spec; spec->chmap.ops.chmap_cea_alloc_validate_get_type = nvhdmi_chmap_cea_alloc_validate_get_type; spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; + spec->nv_dp_workaround = true; return 0; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 78f4f684a3c72..fffa681313b66 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -827,7 +827,7 @@ static int alc_subsystem_id(struct hda_codec *codec, const hda_nid_t *ports) alc_setup_gpio(codec, 0x02); break; case 7: - alc_setup_gpio(codec, 0x03); + alc_setup_gpio(codec, 0x04); break; case 5: default: @@ -3561,6 +3561,15 @@ static void alc256_init(struct hda_codec *codec) hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; + if (spec->ultra_low_power) { + alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1); + alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2); + alc_update_coef_idx(codec, 0x08, 7<<4, 0); + alc_update_coef_idx(codec, 0x3b, 1<<15, 0); + alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); + msleep(30); + } + if (!hp_pin) hp_pin = 0x21; @@ -3572,14 +3581,6 @@ static void alc256_init(struct hda_codec *codec) msleep(2); alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ - if (spec->ultra_low_power) { - alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1); - alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2); - alc_update_coef_idx(codec, 0x08, 7<<4, 0); - alc_update_coef_idx(codec, 0x3b, 1<<15, 0); - alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); - msleep(30); - } snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); @@ -3661,6 +3662,13 @@ static void alc225_init(struct hda_codec *codec) hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp1_pin_sense, hp2_pin_sense; + if (spec->ultra_low_power) { + alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2); + alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); + alc_update_coef_idx(codec, 0x33, 1<<11, 0); + msleep(30); + } + if (!hp_pin) hp_pin = 0x21; msleep(30); @@ -3672,12 +3680,6 @@ static void alc225_init(struct hda_codec *codec) msleep(2); alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ - if (spec->ultra_low_power) { - alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2); - alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); - alc_update_coef_idx(codec, 0x33, 1<<11, 0); - msleep(30); - } if (hp1_pin_sense || spec->ultra_low_power) snd_hda_codec_write(codec, hp_pin, 0, @@ -4581,6 +4583,16 @@ static void alc285_fixup_hp_coef_micmute_led(struct hda_codec *codec, } } +static void alc285_fixup_hp_gpio_micmute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) + spec->micmute_led_polarity = 1; + alc_fixup_hp_gpio_led(codec, action, 0, 0x04); +} + static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -4602,6 +4614,13 @@ static void alc285_fixup_hp_mute_led(struct hda_codec *codec, alc285_fixup_hp_coef_micmute_led(codec, fix, action); } +static void alc285_fixup_hp_spectre_x360_mute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + alc285_fixup_hp_mute_led_coefbit(codec, fix, action); + alc285_fixup_hp_gpio_micmute_led(codec, fix, action); +} + static void alc236_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -6654,6 +6673,34 @@ static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec, } } +static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const struct hda_pintbl pincfgs[] = { + { 0x14, 0x90170151 }, + { 0x17, 0x90170150 }, + { } + }; + static const hda_nid_t conn[] = { 0x02, 0x03 }; + static const hda_nid_t preferred_pairs[] = { + 0x14, 0x02, + 0x17, 0x03, + 0x21, 0x02, + 0 + }; + struct alc_spec *spec = codec->spec; + + alc_fixup_no_shutup(codec, fix, action); + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_apply_pincfgs(codec, pincfgs); + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + spec->gen.preferred_dacs = preferred_pairs; + break; + } +} + enum { ALC269_FIXUP_GPIO2, ALC269_FIXUP_SONY_VAIO, @@ -6824,8 +6871,11 @@ enum { ALC294_FIXUP_ASUS_GU502_HP, ALC294_FIXUP_ASUS_GU502_PINS, ALC294_FIXUP_ASUS_GU502_VERBS, + ALC294_FIXUP_ASUS_G513_PINS, + ALC285_FIXUP_ASUS_G533Z_PINS, ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, + ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED, ALC236_FIXUP_HP_GPIO_LED, ALC236_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, @@ -6881,6 +6931,9 @@ enum { ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME, ALC285_FIXUP_LEGION_Y9000X_SPEAKERS, ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, + ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED, + ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS, + ALC236_FIXUP_DELL_DUAL_CODECS, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -8149,6 +8202,26 @@ static const struct hda_fixup alc269_fixups[] = { [ALC294_FIXUP_ASUS_GU502_HP] = { .type = HDA_FIXUP_FUNC, .v.func = alc294_fixup_gu502_hp, + }, + [ALC294_FIXUP_ASUS_G513_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11050 }, /* front HP mic */ + { 0x1a, 0x03a11c30 }, /* rear external mic */ + { 0x21, 0x03211420 }, /* front HP out */ + { } + }, + }, + [ALC285_FIXUP_ASUS_G533Z_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90170152 }, /* Speaker Surround Playback Switch */ + { 0x19, 0x03a19020 }, /* Mic Boost Volume */ + { 0x1a, 0x03a11c30 }, /* Mic Boost Volume */ + { 0x1e, 0x90170151 }, /* Rear jack, IN OUT EAPD Detect */ + { 0x21, 0x03211420 }, + { } + }, }, [ALC294_FIXUP_ASUS_COEF_1B] = { .type = HDA_FIXUP_VERBS, @@ -8171,6 +8244,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_mute_led, }, + [ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_spectre_x360_mute_led, + }, [ALC236_FIXUP_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_gpio_led, @@ -8671,6 +8748,28 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, + [ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x20, AC_VERB_SET_COEF_INDEX, 0x19 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x8e11 }, + { } + }, + .chained = true, + .chain_id = ALC285_FIXUP_HP_MUTE_LED, + }, + [ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc295_fixup_dell_inspiron_top_speakers, + .chained = true, + .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, + }, + [ALC236_FIXUP_DELL_DUAL_CODECS] = { + .type = HDA_FIXUP_PINS, + .v.func = alc1220_fixup_gb_dual_codecs, + .chained = true, + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8712,6 +8811,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC), + SND_PCI_QUIRK(0x1025, 0x1534, "Acer Predator PH315-54", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), @@ -8769,6 +8869,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), + SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), + SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -8851,6 +8961,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), + SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED), @@ -8892,6 +9003,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED), SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), @@ -8912,10 +9024,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), + SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), - SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), @@ -8930,14 +9043,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), + SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -8959,6 +9075,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), @@ -8967,10 +9084,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc1a3, "Samsung Galaxy Book Pro (NP935XDB-KC1SE)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc1a6, "Samsung Galaxy Book Pro 360 (NP930QBD)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), @@ -9133,13 +9253,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), + SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), + SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X), + SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS), SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP), @@ -10470,6 +10593,17 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec, } } +static void alc897_fixup_lenovo_headset_mode(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; + spec->gen.hp_automute_hook = alc897_hp_automute_hook; + } +} + static const struct coef_fw alc668_coefs[] = { WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0), WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80), @@ -10553,6 +10687,8 @@ enum { ALC897_FIXUP_LENOVO_HEADSET_MIC, ALC897_FIXUP_HEADSET_MIC_PIN, ALC897_FIXUP_HP_HSMIC_VERB, + ALC897_FIXUP_LENOVO_HEADSET_MODE, + ALC897_FIXUP_HEADSET_MIC_PIN2, }; static const struct hda_fixup alc662_fixups[] = { @@ -10979,6 +11115,19 @@ static const struct hda_fixup alc662_fixups[] = { { } }, }, + [ALC897_FIXUP_LENOVO_HEADSET_MODE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc897_fixup_lenovo_headset_mode, + }, + [ALC897_FIXUP_HEADSET_MIC_PIN2] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -11031,6 +11180,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index c662431bf13a5..6fc0c4e77cd1e 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -209,6 +209,7 @@ struct sigmatel_spec { /* beep widgets */ hda_nid_t anabeep_nid; + bool beep_power_on; /* SPDIF-out mux */ const char * const *spdif_labels; @@ -4307,6 +4308,8 @@ static int stac_parse_auto_config(struct hda_codec *codec) if (codec->beep) { /* IDT/STAC codecs have linear beep tone parameter */ codec->beep->linear_tone = spec->linear_tone_beep; + /* keep power up while beep is enabled */ + codec->beep->keep_power_at_enable = 1; /* if no beep switch is available, make its own one */ caps = query_amp_caps(codec, nid, HDA_OUTPUT); if (!(caps & AC_AMPCAP_MUTE)) { diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index a188901a83bbe..29abc96dc146c 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -821,6 +821,9 @@ static int add_secret_dac_path(struct hda_codec *codec) return 0; nums = snd_hda_get_connections(codec, spec->gen.mixer_nid, conn, ARRAY_SIZE(conn) - 1); + if (nums < 0) + return nums; + for (i = 0; i < nums; i++) { if (get_wcaps_type(get_wcaps(codec, conn[i])) == AC_WID_AUD_OUT) return 0; diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c index f884f5a6a61c2..a49a3254f9677 100644 --- a/sound/pci/lx6464es/lx_core.c +++ b/sound/pci/lx6464es/lx_core.c @@ -493,12 +493,11 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture, dev_dbg(chip->card->dev, "CMD_08_ASK_BUFFERS: needed %d, freed %d\n", *r_needed, *r_freed); - for (i = 0; i < MAX_STREAM_BUFFER; ++i) { - for (i = 0; i != chip->rmh.stat_len; ++i) - dev_dbg(chip->card->dev, - " stat[%d]: %x, %x\n", i, - chip->rmh.stat[i], - chip->rmh.stat[i] & MASK_DATA_SIZE); + for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len; + ++i) { + dev_dbg(chip->card->dev, " stat[%d]: %x, %x\n", i, + chip->rmh.stat[i], + chip->rmh.stat[i] & MASK_DATA_SIZE); } } diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index 4aee30db034dd..9543474245004 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c @@ -436,7 +436,7 @@ struct hdsp_midi { struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *input; struct snd_rawmidi_substream *output; - char istimer; /* timer in use */ + signed char istimer; /* timer in use */ struct timer_list timer; spinlock_t lock; int pending; @@ -479,7 +479,7 @@ struct hdsp { pid_t playback_pid; int running; int system_sample_rate; - const char *channel_map; + const signed char *channel_map; int dev; int irq; unsigned long port; @@ -501,7 +501,7 @@ struct hdsp { where the data for that channel can be read/written from/to. */ -static const char channel_map_df_ss[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_df_ss[HDSP_MAX_CHANNELS] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 }; @@ -516,7 +516,7 @@ static const char channel_map_mf_ss[HDSP_MAX_CHANNELS] = { /* Multiface */ -1, -1, -1, -1, -1, -1, -1, -1 }; -static const char channel_map_ds[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_ds[HDSP_MAX_CHANNELS] = { /* ADAT channels are remapped */ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, /* channels 12 and 13 are S/PDIF */ @@ -525,7 +525,7 @@ static const char channel_map_ds[HDSP_MAX_CHANNELS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; -static const char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = { /* ADAT channels */ 0, 1, 2, 3, 4, 5, 6, 7, /* SPDIF */ @@ -539,7 +539,7 @@ static const char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = { -1, -1 }; -static const char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = { /* ADAT */ 1, 3, 5, 7, /* SPDIF */ @@ -553,7 +553,7 @@ static const char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = { -1, -1, -1, -1, -1, -1 }; -static const char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = { /* ADAT is disabled in this mode */ /* SPDIF */ 8, 9, @@ -3869,7 +3869,7 @@ static snd_pcm_uframes_t snd_hdsp_hw_pointer(struct snd_pcm_substream *substream return hdsp_hw_pointer(hdsp); } -static char *hdsp_channel_buffer_location(struct hdsp *hdsp, +static signed char *hdsp_channel_buffer_location(struct hdsp *hdsp, int stream, int channel) @@ -3893,7 +3893,7 @@ static int snd_hdsp_playback_copy(struct snd_pcm_substream *substream, void __user *src, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES)) return -EINVAL; @@ -3911,7 +3911,7 @@ static int snd_hdsp_playback_copy_kernel(struct snd_pcm_substream *substream, void *src, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = hdsp_channel_buffer_location(hdsp, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) @@ -3925,7 +3925,7 @@ static int snd_hdsp_capture_copy(struct snd_pcm_substream *substream, void __user *dst, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES)) return -EINVAL; @@ -3943,7 +3943,7 @@ static int snd_hdsp_capture_copy_kernel(struct snd_pcm_substream *substream, void *dst, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = hdsp_channel_buffer_location(hdsp, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) @@ -3957,7 +3957,7 @@ static int snd_hdsp_hw_silence(struct snd_pcm_substream *substream, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c index 8def24673f35f..459696844b8ce 100644 --- a/sound/pci/rme9652/rme9652.c +++ b/sound/pci/rme9652/rme9652.c @@ -229,7 +229,7 @@ struct snd_rme9652 { int last_spdif_sample_rate; /* so that we can catch externally ... */ int last_adat_sample_rate; /* ... induced rate changes */ - const char *channel_map; + const signed char *channel_map; struct snd_card *card; struct snd_pcm *pcm; @@ -246,12 +246,12 @@ struct snd_rme9652 { where the data for that channel can be read/written from/to. */ -static const char channel_map_9652_ss[26] = { +static const signed char channel_map_9652_ss[26] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 }; -static const char channel_map_9636_ss[26] = { +static const signed char channel_map_9636_ss[26] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* channels 16 and 17 are S/PDIF */ 24, 25, @@ -259,7 +259,7 @@ static const char channel_map_9636_ss[26] = { -1, -1, -1, -1, -1, -1, -1, -1 }; -static const char channel_map_9652_ds[26] = { +static const signed char channel_map_9652_ds[26] = { /* ADAT channels are remapped */ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, /* channels 12 and 13 are S/PDIF */ @@ -268,7 +268,7 @@ static const char channel_map_9652_ds[26] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; -static const char channel_map_9636_ds[26] = { +static const signed char channel_map_9636_ds[26] = { /* ADAT channels are remapped */ 1, 3, 5, 7, 9, 11, 13, 15, /* channels 8 and 9 are S/PDIF */ @@ -1841,7 +1841,7 @@ static snd_pcm_uframes_t snd_rme9652_hw_pointer(struct snd_pcm_substream *substr return rme9652_hw_pointer(rme9652); } -static char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652, +static signed char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652, int stream, int channel) @@ -1869,7 +1869,7 @@ static int snd_rme9652_playback_copy(struct snd_pcm_substream *substream, void __user *src, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES)) return -EINVAL; @@ -1889,7 +1889,7 @@ static int snd_rme9652_playback_copy_kernel(struct snd_pcm_substream *substream, void *src, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = rme9652_channel_buffer_location(rme9652, substream->pstr->stream, @@ -1905,7 +1905,7 @@ static int snd_rme9652_capture_copy(struct snd_pcm_substream *substream, void __user *dst, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES)) return -EINVAL; @@ -1925,7 +1925,7 @@ static int snd_rme9652_capture_copy_kernel(struct snd_pcm_substream *substream, void *dst, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = rme9652_channel_buffer_location(rme9652, substream->pstr->stream, @@ -1941,7 +1941,7 @@ static int snd_rme9652_hw_silence(struct snd_pcm_substream *substream, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = rme9652_channel_buffer_location (rme9652, substream->pstr->stream, diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c index 3bd350afb7434..0d2e3fa21519c 100644 --- a/sound/soc/atmel/mchp-spdiftx.c +++ b/sound/soc/atmel/mchp-spdiftx.c @@ -196,8 +196,7 @@ struct mchp_spdiftx_dev { struct clk *pclk; struct clk *gclk; unsigned int fmt; - const struct mchp_i2s_caps *caps; - int gclk_enabled:1; + unsigned int gclk_enabled:1; }; static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev) @@ -766,8 +765,6 @@ static const struct of_device_id mchp_spdiftx_dt_ids[] = { MODULE_DEVICE_TABLE(of, mchp_spdiftx_dt_ids); static int mchp_spdiftx_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; - const struct of_device_id *match; struct mchp_spdiftx_dev *dev; struct resource *mem; struct regmap *regmap; @@ -781,11 +778,6 @@ static int mchp_spdiftx_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; - /* Get hardware capabilities. */ - match = of_match_node(mchp_spdiftx_dt_ids, np); - if (match) - dev->caps = match->data; - /* Map I/O registers. */ base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(base)) diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index fc6a2bc311b4f..c61b17dc2af87 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -146,7 +146,7 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = { 0, 0xA0, 96, adc_att_tlv), SOC_DOUBLE_R_SX_TLV("PGA Volume", CS42L51_ALC_PGA_CTL, CS42L51_ALC_PGB_CTL, - 0, 0x19, 30, pga_tlv), + 0, 0x1A, 30, pga_tlv), SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0), SOC_SINGLE("Auto-Mute Switch", CS42L51_DAC_CTL, 2, 1, 0), SOC_SINGLE("Soft Ramp Switch", CS42L51_DAC_CTL, 1, 1, 0), diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index d41e031931061..3c5ec47a8fe64 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -1193,18 +1193,12 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, if (pdata) { cs42l56->pdata = *pdata; } else { - pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata), - GFP_KERNEL); - if (!pdata) - return -ENOMEM; - if (i2c_client->dev.of_node) { ret = cs42l56_handle_of_data(i2c_client, &cs42l56->pdata); if (ret != 0) return ret; } - cs42l56->pdata = *pdata; } if (cs42l56->pdata.gpio_nreset) { diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c index 5f8c96dea094a..f9e58d6509a83 100644 --- a/sound/soc/codecs/da7219.c +++ b/sound/soc/codecs/da7219.c @@ -2194,6 +2194,7 @@ static int da7219_register_dai_clks(struct snd_soc_component *component) dai_clk_lookup = clkdev_hw_create(dai_clk_hw, init.name, "%s", dev_name(dev)); if (!dai_clk_lookup) { + clk_hw_unregister(dai_clk_hw); ret = -ENOMEM; goto err; } else { @@ -2215,12 +2216,12 @@ static int da7219_register_dai_clks(struct snd_soc_component *component) return 0; err: - do { + while (--i >= 0) { if (da7219->dai_clks_lookup[i]) clkdev_drop(da7219->dai_clks_lookup[i]); clk_hw_unregister(&da7219->dai_clks_hw[i]); - } while (i-- > 0); + } if (np) kfree(da7219->clk_hw_data); diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c index 390dd6c7f6a50..de5955db0a5f0 100644 --- a/sound/soc/codecs/hdac_hda.c +++ b/sound/soc/codecs/hdac_hda.c @@ -46,9 +46,8 @@ static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai); static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai); -static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai, - unsigned int tx_mask, unsigned int rx_mask, - int slots, int slot_width); +static int hdac_hda_dai_set_stream(struct snd_soc_dai *dai, void *stream, + int direction); static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt, struct snd_soc_dai *dai); @@ -58,7 +57,7 @@ static const struct snd_soc_dai_ops hdac_hda_dai_ops = { .prepare = hdac_hda_dai_prepare, .hw_params = hdac_hda_dai_hw_params, .hw_free = hdac_hda_dai_hw_free, - .set_tdm_slot = hdac_hda_dai_set_tdm_slot, + .set_stream = hdac_hda_dai_set_stream, }; static struct snd_soc_dai_driver hdac_hda_dais[] = { @@ -180,21 +179,22 @@ static struct snd_soc_dai_driver hdac_hda_dais[] = { }; -static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai, - unsigned int tx_mask, unsigned int rx_mask, - int slots, int slot_width) +static int hdac_hda_dai_set_stream(struct snd_soc_dai *dai, + void *stream, int direction) { struct snd_soc_component *component = dai->component; struct hdac_hda_priv *hda_pvt; struct hdac_hda_pcm *pcm; + struct hdac_stream *hstream; + + if (!stream) + return -EINVAL; hda_pvt = snd_soc_component_get_drvdata(component); pcm = &hda_pvt->pcm[dai->id]; + hstream = (struct hdac_stream *)stream; - if (tx_mask) - pcm->stream_tag[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask; - else - pcm->stream_tag[SNDRV_PCM_STREAM_CAPTURE] = rx_mask; + pcm->stream_tag[direction] = hstream->stream_tag; return 0; } diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h index d0efc5e254ae9..da0ed74758b05 100644 --- a/sound/soc/codecs/hdac_hda.h +++ b/sound/soc/codecs/hdac_hda.h @@ -14,7 +14,7 @@ enum { HDAC_HDMI_1_DAI_ID, HDAC_HDMI_2_DAI_ID, HDAC_HDMI_3_DAI_ID, - HDAC_LAST_DAI_ID = HDAC_HDMI_3_DAI_ID, + HDAC_DAI_ID_NUM }; struct hdac_hda_pcm { @@ -24,7 +24,7 @@ struct hdac_hda_pcm { struct hdac_hda_priv { struct hda_codec codec; - struct hdac_hda_pcm pcm[HDAC_LAST_DAI_ID]; + struct hdac_hda_pcm pcm[HDAC_DAI_ID_NUM]; bool need_display_power; }; diff --git a/sound/soc/codecs/jz4725b.c b/sound/soc/codecs/jz4725b.c index e49374c72e70a..8a830d0ad9500 100644 --- a/sound/soc/codecs/jz4725b.c +++ b/sound/soc/codecs/jz4725b.c @@ -136,14 +136,17 @@ enum { #define REG_CGR3_GO1L_OFFSET 0 #define REG_CGR3_GO1L_MASK (0x1f << REG_CGR3_GO1L_OFFSET) +#define REG_CGR10_GIL_OFFSET 0 +#define REG_CGR10_GIR_OFFSET 4 + struct jz_icdc { struct regmap *regmap; void __iomem *base; struct clk *clk; }; -static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_dac_tlv, -2250, 0); -static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_line_tlv, -1500, 600); +static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(jz4725b_adc_tlv, 0, 150, 0); +static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(jz4725b_dac_tlv, -2250, 150, 0); static const struct snd_kcontrol_new jz4725b_codec_controls[] = { SOC_DOUBLE_TLV("Master Playback Volume", @@ -151,11 +154,11 @@ static const struct snd_kcontrol_new jz4725b_codec_controls[] = { REG_CGR1_GODL_OFFSET, REG_CGR1_GODR_OFFSET, 0xf, 1, jz4725b_dac_tlv), - SOC_DOUBLE_R_TLV("Master Capture Volume", - JZ4725B_CODEC_REG_CGR3, - JZ4725B_CODEC_REG_CGR2, - REG_CGR2_GO1R_OFFSET, - 0x1f, 1, jz4725b_line_tlv), + SOC_DOUBLE_TLV("Master Capture Volume", + JZ4725B_CODEC_REG_CGR10, + REG_CGR10_GIL_OFFSET, + REG_CGR10_GIR_OFFSET, + 0xf, 0, jz4725b_adc_tlv), SOC_SINGLE("Master Playback Switch", JZ4725B_CODEC_REG_CR1, REG_CR1_DAC_MUTE_OFFSET, 1, 1), @@ -180,7 +183,7 @@ static SOC_VALUE_ENUM_SINGLE_DECL(jz4725b_codec_adc_src_enum, jz4725b_codec_adc_src_texts, jz4725b_codec_adc_src_values); static const struct snd_kcontrol_new jz4725b_codec_adc_src_ctrl = - SOC_DAPM_ENUM("Route", jz4725b_codec_adc_src_enum); + SOC_DAPM_ENUM("ADC Source Capture Route", jz4725b_codec_adc_src_enum); static const struct snd_kcontrol_new jz4725b_codec_mixer_controls[] = { SOC_DAPM_SINGLE("Line In Bypass", JZ4725B_CODEC_REG_CR1, @@ -225,7 +228,7 @@ static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = { SND_SOC_DAPM_ADC("ADC", "Capture", JZ4725B_CODEC_REG_PMR1, REG_PMR1_SB_ADC_OFFSET, 1), - SND_SOC_DAPM_MUX("ADC Source", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_MUX("ADC Source Capture Route", SND_SOC_NOPM, 0, 0, &jz4725b_codec_adc_src_ctrl), /* Mixer */ @@ -236,7 +239,8 @@ static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = { SND_SOC_DAPM_MIXER("DAC to Mixer", JZ4725B_CODEC_REG_CR1, REG_CR1_DACSEL_OFFSET, 0, NULL, 0), - SND_SOC_DAPM_MIXER("Line In", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("Line In", JZ4725B_CODEC_REG_PMR1, + REG_PMR1_SB_LIN_OFFSET, 1, NULL, 0), SND_SOC_DAPM_MIXER("HP Out", JZ4725B_CODEC_REG_CR1, REG_CR1_HP_DIS_OFFSET, 1, NULL, 0), @@ -283,11 +287,11 @@ static const struct snd_soc_dapm_route jz4725b_codec_dapm_routes[] = { {"Mixer", NULL, "DAC to Mixer"}, {"Mixer to ADC", NULL, "Mixer"}, - {"ADC Source", "Mixer", "Mixer to ADC"}, - {"ADC Source", "Line In", "Line In"}, - {"ADC Source", "Mic 1", "Mic 1"}, - {"ADC Source", "Mic 2", "Mic 2"}, - {"ADC", NULL, "ADC Source"}, + {"ADC Source Capture Route", "Mixer", "Mixer to ADC"}, + {"ADC Source Capture Route", "Line In", "Line In"}, + {"ADC Source Capture Route", "Mic 1", "Mic 1"}, + {"ADC Source Capture Route", "Mic 2", "Mic 2"}, + {"ADC", NULL, "ADC Source Capture Route"}, {"Out Stage", NULL, "Mixer"}, {"HP Out", NULL, "Out Stage"}, diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c index 39afa011f0e27..a2bb93fb830b4 100644 --- a/sound/soc/codecs/max98373-sdw.c +++ b/sound/soc/codecs/max98373-sdw.c @@ -728,7 +728,7 @@ static int max98373_sdw_set_tdm_slot(struct snd_soc_dai *dai, static const struct snd_soc_dai_ops max98373_dai_sdw_ops = { .hw_params = max98373_sdw_dai_hw_params, .hw_free = max98373_pcm_hw_free, - .set_sdw_stream = max98373_set_sdw_stream, + .set_stream = max98373_set_sdw_stream, .shutdown = max98373_shutdown, .set_tdm_slot = max98373_sdw_set_tdm_slot, }; diff --git a/sound/soc/codecs/mt6660.c b/sound/soc/codecs/mt6660.c index d1797003c83dd..3cee2ea4b85de 100644 --- a/sound/soc/codecs/mt6660.c +++ b/sound/soc/codecs/mt6660.c @@ -510,7 +510,11 @@ static int mt6660_i2c_probe(struct i2c_client *client, ret = devm_snd_soc_register_component(chip->dev, &mt6660_component_driver, &mt6660_codec_dai, 1); + if (ret) + pm_runtime_disable(chip->dev); + return ret; + probe_fail: _mt6660_chip_power_on(chip, 0); mutex_destroy(&chip->io_lock); diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c index c8ccfa2fff848..a95fe3fff1db8 100644 --- a/sound/soc/codecs/nau8824.c +++ b/sound/soc/codecs/nau8824.c @@ -1072,6 +1072,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, struct snd_soc_component *component = dai->component; struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component); unsigned int val_len = 0, osr, ctrl_val, bclk_fs, bclk_div; + int err = -EINVAL; nau8824_sema_acquire(nau8824, HZ); @@ -1088,7 +1089,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, osr &= NAU8824_DAC_OVERSAMPLE_MASK; if (nau8824_clock_check(nau8824, substream->stream, nau8824->fs, osr)) - return -EINVAL; + goto error; regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER, NAU8824_CLK_DAC_SRC_MASK, osr_dac_sel[osr].clk_src << NAU8824_CLK_DAC_SRC_SFT); @@ -1098,7 +1099,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, osr &= NAU8824_ADC_SYNC_DOWN_MASK; if (nau8824_clock_check(nau8824, substream->stream, nau8824->fs, osr)) - return -EINVAL; + goto error; regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER, NAU8824_CLK_ADC_SRC_MASK, osr_adc_sel[osr].clk_src << NAU8824_CLK_ADC_SRC_SFT); @@ -1119,7 +1120,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, else if (bclk_fs <= 256) bclk_div = 0; else - return -EINVAL; + goto error; regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_2, NAU8824_I2S_LRC_DIV_MASK | NAU8824_I2S_BLK_DIV_MASK, @@ -1140,15 +1141,17 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, val_len |= NAU8824_I2S_DL_32; break; default: - return -EINVAL; + goto error; } regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1, NAU8824_I2S_DL_MASK, val_len); + err = 0; + error: nau8824_sema_release(nau8824); - return 0; + return err; } static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) @@ -1157,8 +1160,6 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component); unsigned int ctrl1_val = 0, ctrl2_val = 0; - nau8824_sema_acquire(nau8824, HZ); - switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: ctrl2_val |= NAU8824_I2S_MS_MASTER; @@ -1200,6 +1201,8 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return -EINVAL; } + nau8824_sema_acquire(nau8824, HZ); + regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1, NAU8824_I2S_DF_MASK | NAU8824_I2S_BP_MASK | NAU8824_I2S_PCMB_EN, ctrl1_val); diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index 8153d3d01654f..3677e9029f91e 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c @@ -1599,7 +1599,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) if (val > 6) { dev_err(dev, "Invalid pll-in\n"); ret = -EINVAL; - goto err_clk; + goto err_pm; } pcm512x->pll_in = val; } @@ -1608,7 +1608,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) if (val > 6) { dev_err(dev, "Invalid pll-out\n"); ret = -EINVAL; - goto err_clk; + goto err_pm; } pcm512x->pll_out = val; } @@ -1617,12 +1617,12 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) dev_err(dev, "Error: both pll-in and pll-out, or none\n"); ret = -EINVAL; - goto err_clk; + goto err_pm; } if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) { dev_err(dev, "Error: pll-in == pll-out\n"); ret = -EINVAL; - goto err_clk; + goto err_pm; } } #endif diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c index 31daa749c3db4..a13296edf295c 100644 --- a/sound/soc/codecs/rt1308-sdw.c +++ b/sound/soc/codecs/rt1308-sdw.c @@ -613,7 +613,7 @@ static const struct snd_soc_component_driver soc_component_sdw_rt1308 = { static const struct snd_soc_dai_ops rt1308_aif_dai_ops = { .hw_params = rt1308_sdw_hw_params, .hw_free = rt1308_sdw_pcm_hw_free, - .set_sdw_stream = rt1308_set_sdw_stream, + .set_stream = rt1308_set_sdw_stream, .shutdown = rt1308_sdw_shutdown, .set_tdm_slot = rt1308_sdw_set_tdm_slot, }; diff --git a/sound/soc/codecs/rt1308-sdw.h b/sound/soc/codecs/rt1308-sdw.h index c5ce75666dcc8..98293d73ebabc 100644 --- a/sound/soc/codecs/rt1308-sdw.h +++ b/sound/soc/codecs/rt1308-sdw.h @@ -139,9 +139,11 @@ static const struct reg_default rt1308_reg_defaults[] = { { 0x3005, 0x23 }, { 0x3008, 0x02 }, { 0x300a, 0x00 }, + { 0xc000 | (RT1308_DATA_PATH << 4), 0x00 }, { 0xc003 | (RT1308_DAC_SET << 4), 0x00 }, { 0xc001 | (RT1308_POWER << 4), 0x00 }, { 0xc002 | (RT1308_POWER << 4), 0x00 }, + { 0xc000 | (RT1308_POWER_STATUS << 4), 0x00 }, }; #define RT1308_SDW_OFFSET 0xc000 diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c index dc0273a5a11f7..1ca06213e3a32 100644 --- a/sound/soc/codecs/rt298.c +++ b/sound/soc/codecs/rt298.c @@ -1168,6 +1168,13 @@ static const struct dmi_system_id force_combo_jack_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Geminilake") } }, + { + .ident = "Intel Kabylake R RVP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform") + } + }, { } }; diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index 47ce074289ca9..58227602053fc 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -3192,8 +3192,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, if (ret < 0) goto err; - pm_runtime_put(&i2c->dev); - return 0; err: pm_runtime_disable(&i2c->dev); diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c index c9868dd096fcd..2fb6b1edf9331 100644 --- a/sound/soc/codecs/rt5682-sdw.c +++ b/sound/soc/codecs/rt5682-sdw.c @@ -272,7 +272,7 @@ static int rt5682_sdw_hw_free(struct snd_pcm_substream *substream, static struct snd_soc_dai_ops rt5682_sdw_ops = { .hw_params = rt5682_sdw_hw_params, .hw_free = rt5682_sdw_hw_free, - .set_sdw_stream = rt5682_set_sdw_stream, + .set_stream = rt5682_set_sdw_stream, .shutdown = rt5682_sdw_shutdown, }; diff --git a/sound/soc/codecs/rt700.c b/sound/soc/codecs/rt700.c index 687ac2153666b..80acf0daabf84 100644 --- a/sound/soc/codecs/rt700.c +++ b/sound/soc/codecs/rt700.c @@ -1005,7 +1005,7 @@ static int rt700_pcm_hw_free(struct snd_pcm_substream *substream, static struct snd_soc_dai_ops rt700_ops = { .hw_params = rt700_pcm_hw_params, .hw_free = rt700_pcm_hw_free, - .set_sdw_stream = rt700_set_sdw_stream, + .set_stream = rt700_set_sdw_stream, .shutdown = rt700_shutdown, }; diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c index 93d86f7558e0a..9bdcc7872053b 100644 --- a/sound/soc/codecs/rt711.c +++ b/sound/soc/codecs/rt711.c @@ -1059,7 +1059,7 @@ static int rt711_pcm_hw_free(struct snd_pcm_substream *substream, static struct snd_soc_dai_ops rt711_ops = { .hw_params = rt711_pcm_hw_params, .hw_free = rt711_pcm_hw_free, - .set_sdw_stream = rt711_set_sdw_stream, + .set_stream = rt711_set_sdw_stream, .shutdown = rt711_shutdown, }; diff --git a/sound/soc/codecs/rt715.c b/sound/soc/codecs/rt715.c index 532c5303e7ab0..22bdccf663468 100644 --- a/sound/soc/codecs/rt715.c +++ b/sound/soc/codecs/rt715.c @@ -686,7 +686,7 @@ static int rt715_pcm_hw_free(struct snd_pcm_substream *substream, static struct snd_soc_dai_ops rt715_ops = { .hw_params = rt715_pcm_hw_params, .hw_free = rt715_pcm_hw_free, - .set_sdw_stream = rt715_set_sdw_stream, + .set_stream = rt715_set_sdw_stream, .shutdown = rt715_shutdown, }; diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index f066e016a874a..edde0323799aa 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1797,6 +1797,7 @@ static int sgtl5000_i2c_remove(struct i2c_client *client) { struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client); + regmap_write(sgtl5000->regmap, SGTL5000_CHIP_CLK_CTRL, SGTL5000_CHIP_CLK_CTRL_DEFAULT); regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT); regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT); diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index 37588804a6b5f..c8f6f5122cacb 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -34,6 +34,9 @@ struct tas2764_priv { int v_sense_slot; int i_sense_slot; + + bool dac_powered; + bool unmuted; }; static void tas2764_reset(struct tas2764_priv *tas2764) @@ -50,34 +53,22 @@ static void tas2764_reset(struct tas2764_priv *tas2764) usleep_range(1000, 2000); } -static int tas2764_set_bias_level(struct snd_soc_component *component, - enum snd_soc_bias_level level) +static int tas2764_update_pwr_ctrl(struct tas2764_priv *tas2764) { - struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); + struct snd_soc_component *component = tas2764->component; + unsigned int val; + int ret; - switch (level) { - case SND_SOC_BIAS_ON: - snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_ACTIVE); - break; - case SND_SOC_BIAS_STANDBY: - case SND_SOC_BIAS_PREPARE: - snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_MUTE); - break; - case SND_SOC_BIAS_OFF: - snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_SHUTDOWN); - break; + if (tas2764->dac_powered) + val = tas2764->unmuted ? + TAS2764_PWR_CTRL_ACTIVE : TAS2764_PWR_CTRL_MUTE; + else + val = TAS2764_PWR_CTRL_SHUTDOWN; - default: - dev_err(tas2764->dev, - "wrong power level setting %d\n", level); - return -EINVAL; - } + ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, + TAS2764_PWR_CTRL_MASK, val); + if (ret < 0) + return ret; return 0; } @@ -114,9 +105,7 @@ static int tas2764_codec_resume(struct snd_soc_component *component) usleep_range(1000, 2000); } - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_ACTIVE); + ret = tas2764_update_pwr_ctrl(tas2764); if (ret < 0) return ret; @@ -150,14 +139,12 @@ static int tas2764_dac_event(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_POST_PMU: - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_MUTE); + tas2764->dac_powered = true; + ret = tas2764_update_pwr_ctrl(tas2764); break; case SND_SOC_DAPM_PRE_PMD: - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_SHUTDOWN); + tas2764->dac_powered = false; + ret = tas2764_update_pwr_ctrl(tas2764); break; default: dev_err(tas2764->dev, "Unsupported event\n"); @@ -202,17 +189,11 @@ static const struct snd_soc_dapm_route tas2764_audio_map[] = { static int tas2764_mute(struct snd_soc_dai *dai, int mute, int direction) { - struct snd_soc_component *component = dai->component; - int ret; - - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - mute ? TAS2764_PWR_CTRL_MUTE : 0); - - if (ret < 0) - return ret; + struct tas2764_priv *tas2764 = + snd_soc_component_get_drvdata(dai->component); - return 0; + tas2764->unmuted = !mute; + return tas2764_update_pwr_ctrl(tas2764); } static int tas2764_set_bitwidth(struct tas2764_priv *tas2764, int bitwidth) @@ -405,20 +386,13 @@ static int tas2764_set_dai_tdm_slot(struct snd_soc_dai *dai, if (tx_mask == 0 || rx_mask != 0) return -EINVAL; - if (slots == 1) { - if (tx_mask != 1) - return -EINVAL; - left_slot = 0; - right_slot = 0; + left_slot = __ffs(tx_mask); + tx_mask &= ~(1 << left_slot); + if (tx_mask == 0) { + right_slot = left_slot; } else { - left_slot = __ffs(tx_mask); - tx_mask &= ~(1 << left_slot); - if (tx_mask == 0) { - right_slot = left_slot; - } else { - right_slot = __ffs(tx_mask); - tx_mask &= ~(1 << right_slot); - } + right_slot = __ffs(tx_mask); + tx_mask &= ~(1 << right_slot); } if (tx_mask != 0 || left_slot >= slots || right_slot >= slots) @@ -485,7 +459,7 @@ static struct snd_soc_dai_driver tas2764_dai_driver[] = { .id = 0, .playback = { .stream_name = "ASI1 Playback", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = TAS2764_RATES, .formats = TAS2764_FORMATS, @@ -526,12 +500,6 @@ static int tas2764_codec_probe(struct snd_soc_component *component) if (ret < 0) return ret; - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_MUTE); - if (ret < 0) - return ret; - return 0; } @@ -549,7 +517,6 @@ static const struct snd_soc_component_driver soc_component_driver_tas2764 = { .probe = tas2764_codec_probe, .suspend = tas2764_codec_suspend, .resume = tas2764_codec_resume, - .set_bias_level = tas2764_set_bias_level, .controls = tas2764_snd_controls, .num_controls = ARRAY_SIZE(tas2764_snd_controls), .dapm_widgets = tas2764_dapm_widgets, diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 024ec68e8d356..c213c8096142b 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -395,21 +395,13 @@ static int tas2770_set_dai_tdm_slot(struct snd_soc_dai *dai, if (tx_mask == 0 || rx_mask != 0) return -EINVAL; - if (slots == 1) { - if (tx_mask != 1) - return -EINVAL; - - left_slot = 0; - right_slot = 0; + left_slot = __ffs(tx_mask); + tx_mask &= ~(1 << left_slot); + if (tx_mask == 0) { + right_slot = left_slot; } else { - left_slot = __ffs(tx_mask); - tx_mask &= ~(1 << left_slot); - if (tx_mask == 0) { - right_slot = left_slot; - } else { - right_slot = __ffs(tx_mask); - tx_mask &= ~(1 << right_slot); - } + right_slot = __ffs(tx_mask); + tx_mask &= ~(1 << right_slot); } if (tx_mask != 0 || left_slot >= slots || right_slot >= slots) @@ -495,6 +487,8 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = { }, }; +static const struct regmap_config tas2770_i2c_regmap; + static int tas2770_codec_probe(struct snd_soc_component *component) { struct tas2770_priv *tas2770 = @@ -508,6 +502,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component) } tas2770_reset(tas2770); + regmap_reinit_cache(tas2770->regmap, &tas2770_i2c_regmap); return 0; } diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index 8f4ed39c49de2..33c29a1f52d00 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -1971,8 +1971,8 @@ static int wcd9335_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - slim_stream_unprepare(dai_data->sruntime); slim_stream_disable(dai_data->sruntime); + slim_stream_unprepare(dai_data->sruntime); break; default: break; diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index fd704df9b1758..104751ac6cd14 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -1829,8 +1829,8 @@ static int wcd934x_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - slim_stream_unprepare(dai_data->sruntime); slim_stream_disable(dai_data->sruntime); + slim_stream_unprepare(dai_data->sruntime); break; default: break; diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 1c360bae5652c..cc96c9bdff41f 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -697,6 +697,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w, int dcs_mask; int dcs_l, dcs_r; int dcs_l_reg, dcs_r_reg; + int an_out_reg; int timeout; int pwr_reg; @@ -712,6 +713,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w, dcs_mask = WM8904_DCS_ENA_CHAN_0 | WM8904_DCS_ENA_CHAN_1; dcs_r_reg = WM8904_DC_SERVO_8; dcs_l_reg = WM8904_DC_SERVO_9; + an_out_reg = WM8904_ANALOGUE_OUT1_LEFT; dcs_l = 0; dcs_r = 1; break; @@ -720,6 +722,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w, dcs_mask = WM8904_DCS_ENA_CHAN_2 | WM8904_DCS_ENA_CHAN_3; dcs_r_reg = WM8904_DC_SERVO_6; dcs_l_reg = WM8904_DC_SERVO_7; + an_out_reg = WM8904_ANALOGUE_OUT2_LEFT; dcs_l = 2; dcs_r = 3; break; @@ -792,6 +795,10 @@ static int out_pga_event(struct snd_soc_dapm_widget *w, snd_soc_component_update_bits(component, reg, WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP, WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP); + + /* Update volume, requires PGA to be powered */ + val = snd_soc_component_read(component, an_out_reg); + snd_soc_component_write(component, an_out_reg, val); break; case SND_SOC_DAPM_POST_PMU: diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 38651022e3d5f..57aeded978c28 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -1840,6 +1840,49 @@ SOC_SINGLE_TLV("SPKOUTR Mixer DACR Volume", WM8962_SPEAKER_MIXER_5, 4, 1, 0, inmix_tlv), }; +static int tp_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + int ret, reg, val, mask; + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + + ret = pm_runtime_resume_and_get(component->dev); + if (ret < 0) { + dev_err(component->dev, "Failed to resume device: %d\n", ret); + return ret; + } + + reg = WM8962_ADDITIONAL_CONTROL_4; + + if (!strcmp(w->name, "TEMP_HP")) { + mask = WM8962_TEMP_ENA_HP_MASK; + val = WM8962_TEMP_ENA_HP; + } else if (!strcmp(w->name, "TEMP_SPK")) { + mask = WM8962_TEMP_ENA_SPK_MASK; + val = WM8962_TEMP_ENA_SPK; + } else { + pm_runtime_put(component->dev); + return -EINVAL; + } + + switch (event) { + case SND_SOC_DAPM_POST_PMD: + val = 0; + fallthrough; + case SND_SOC_DAPM_POST_PMU: + ret = snd_soc_component_update_bits(component, reg, mask, val); + break; + default: + WARN(1, "Invalid event %d\n", event); + pm_runtime_put(component->dev); + return -EINVAL; + } + + pm_runtime_put(component->dev); + + return 0; +} + static int cp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -2133,8 +2176,10 @@ SND_SOC_DAPM_SUPPLY("TOCLK", WM8962_ADDITIONAL_CONTROL_1, 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("DSP2", 1, WM8962_DSP2_POWER_MANAGEMENT, WM8962_DSP2_ENA_SHIFT, 0, dsp2_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), -SND_SOC_DAPM_SUPPLY("TEMP_HP", WM8962_ADDITIONAL_CONTROL_4, 2, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("TEMP_SPK", WM8962_ADDITIONAL_CONTROL_4, 1, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("TEMP_HP", SND_SOC_NOPM, 0, 0, tp_event, + SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), +SND_SOC_DAPM_SUPPLY("TEMP_SPK", SND_SOC_NOPM, 0, 0, tp_event, + SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MIXER("INPGAL", WM8962_LEFT_INPUT_PGA_CONTROL, 4, 0, inpgal, ARRAY_SIZE(inpgal)), @@ -2444,6 +2489,14 @@ static void wm8962_configure_bclk(struct snd_soc_component *component) snd_soc_component_update_bits(component, WM8962_CLOCKING2, WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA); + /* DSPCLK_DIV field in WM8962_CLOCKING1 register is used to generate + * correct frequency of LRCLK and BCLK. Sometimes the read-only value + * can't be updated timely after enabling SYSCLK. This results in wrong + * calculation values. Delay is introduced here to wait for newest + * value from register. The time of the delay should be at least + * 500~1000us according to test. + */ + usleep_range(500, 1000); dspclk = snd_soc_component_read(component, WM8962_CLOCKING1); if (snd_soc_component_get_bias_level(component) != SND_SOC_BIAS_ON) @@ -3760,6 +3813,11 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, if (ret < 0) goto err_pm_runtime; + regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4, + WM8962_TEMP_ENA_HP_MASK, 0); + regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4, + WM8962_TEMP_ENA_SPK_MASK, 0); + regcache_cache_only(wm8962->regmap, true); /* The drivers should power up as needed */ diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index f57884113406b..d3a7480fda435 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -3853,7 +3853,12 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data) } else { dev_dbg(component->dev, "Jack not detected\n"); + /* Release wm8994->accdet_lock to avoid deadlock: + * cancel_delayed_work_sync() takes wm8994->mic_work internal + * lock and wm1811_mic_work takes wm8994->accdet_lock */ + mutex_unlock(&wm8994->accdet_lock); cancel_delayed_work_sync(&wm8994->mic_work); + mutex_lock(&wm8994->accdet_lock); snd_soc_component_update_bits(component, WM8958_MICBIAS2, WM8958_MICB2_DISCH, WM8958_MICB2_DISCH); diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 601525c77bbaf..15b3f47fbfa35 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -1026,7 +1026,7 @@ static struct snd_soc_dai_ops wsa881x_dai_ops = { .hw_params = wsa881x_hw_params, .hw_free = wsa881x_hw_free, .mute_stream = wsa881x_digital_mute, - .set_sdw_stream = wsa881x_set_sdw_stream, + .set_stream = wsa881x_set_sdw_stream, }; static struct snd_soc_dai_driver wsa881x_dais[] = { diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c index e13271ea84ded..29cf9234984d9 100644 --- a/sound/soc/fsl/eukrea-tlv320.c +++ b/sound/soc/fsl/eukrea-tlv320.c @@ -86,7 +86,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) int ret; int int_port = 0, ext_port; struct device_node *np = pdev->dev.of_node; - struct device_node *ssi_np = NULL, *codec_np = NULL; + struct device_node *ssi_np = NULL, *codec_np = NULL, *tmp_np = NULL; eukrea_tlv320.dev = &pdev->dev; if (np) { @@ -143,7 +143,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) } if (machine_is_eukrea_cpuimx27() || - of_find_compatible_node(NULL, NULL, "fsl,imx21-audmux")) { + (tmp_np = of_find_compatible_node(NULL, NULL, "fsl,imx21-audmux"))) { imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0, IMX_AUDMUX_V1_PCR_SYN | IMX_AUDMUX_V1_PCR_TFSDIR | @@ -158,10 +158,11 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) IMX_AUDMUX_V1_PCR_SYN | IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR1_SSI0) ); + of_node_put(tmp_np); } else if (machine_is_eukrea_cpuimx25sd() || machine_is_eukrea_cpuimx35sd() || machine_is_eukrea_cpuimx51sd() || - of_find_compatible_node(NULL, NULL, "fsl,imx31-audmux")) { + (tmp_np = of_find_compatible_node(NULL, NULL, "fsl,imx31-audmux"))) { if (!np) ext_port = machine_is_eukrea_cpuimx25sd() ? 4 : 3; @@ -178,6 +179,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) IMX_AUDMUX_V2_PTCR_SYN, IMX_AUDMUX_V2_PDCR_RXDSEL(int_port) ); + of_node_put(tmp_np); } else { if (np) { /* The eukrea,asoc-tlv320 driver was explicitly diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c index 7cd14d6b9436a..9a756d0a60320 100644 --- a/sound/soc/fsl/fsl-asoc-card.c +++ b/sound/soc/fsl/fsl-asoc-card.c @@ -117,11 +117,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static const struct snd_soc_dapm_route audio_map_ac97[] = { /* 1st half -- Normal DAPM routes */ - {"Playback", NULL, "AC97 Playback"}, - {"AC97 Capture", NULL, "Capture"}, + {"AC97 Playback", NULL, "CPU AC97 Playback"}, + {"CPU AC97 Capture", NULL, "AC97 Capture"}, /* 2nd half -- ASRC DAPM routes */ - {"AC97 Playback", NULL, "ASRC-Playback"}, - {"ASRC-Capture", NULL, "AC97 Capture"}, + {"CPU AC97 Playback", NULL, "ASRC-Playback"}, + {"ASRC-Capture", NULL, "CPU AC97 Capture"}, }; static const struct snd_soc_dapm_route audio_map_tx[] = { diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index efc5daf53bbae..97f83c63e7652 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -87,21 +87,21 @@ static DECLARE_TLV_DB_SCALE(gain_tlv, 0, 100, 0); static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = { SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv), SOC_ENUM_EXT("MICFIL Quality Select", fsl_micfil_quality_enum, snd_soc_get_enum_double, snd_soc_put_enum_double), @@ -190,6 +190,25 @@ static int fsl_micfil_reset(struct device *dev) return ret; } + /* + * SRES is self-cleared bit, but REG_MICFIL_CTRL1 is defined + * as non-volatile register, so SRES still remain in regmap + * cache after set, that every update of REG_MICFIL_CTRL1, + * software reset happens. so clear it explicitly. + */ + ret = regmap_clear_bits(micfil->regmap, REG_MICFIL_CTRL1, + MICFIL_CTRL1_SRES); + if (ret) + return ret; + + /* + * Set SRES should clear CHnF flags, But even add delay here + * the CHnF may not be cleared sometimes, so clear CHnF explicitly. + */ + ret = regmap_write_bits(micfil->regmap, REG_MICFIL_STAT, 0xFF, 0xFF); + if (ret) + return ret; + return 0; } diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 1d774c876c52e..94229ce1a30ef 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -1161,14 +1161,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = { .symmetric_channels = 1, .probe = fsl_ssi_dai_probe, .playback = { - .stream_name = "AC97 Playback", + .stream_name = "CPU AC97 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20, }, .capture = { - .stream_name = "AC97 Capture", + .stream_name = "CPU AC97 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c index bfbee2d716f39..84510ca0b8fd6 100644 --- a/sound/soc/generic/audio-graph-card.c +++ b/sound/soc/generic/audio-graph-card.c @@ -466,8 +466,10 @@ static int graph_for_each_link(struct asoc_simple_priv *priv, of_node_put(codec_ep); of_node_put(codec_port); - if (ret < 0) + if (ret < 0) { + of_node_put(cpu_ep); return ret; + } codec_port_old = codec_port; } diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 7ed869bf1a926..81269ed5a2aaa 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -450,6 +450,13 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = { | BYT_CHT_ES8316_INTMIC_IN2_MAP | BYT_CHT_ES8316_JD_INVERTED), }, + { /* Nanote UMPC-01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"), + DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"), + }, + .driver_data = (void *)BYT_CHT_ES8316_INTMIC_IN1_MAP, + }, { /* Teclast X98 Plus II */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"), diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 3020a993f6ef5..8a99cb6dfcd69 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -430,6 +430,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { + /* Advantech MICA-071 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"), + }, + /* OVCD Th = 1500uA to reliable detect head-phones vs -set */ + .driver_data = (void *)(BYT_RT5640_IN3_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_1500UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"), diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c index 1f94fa5a15db6..5883d1fa3b7ed 100644 --- a/sound/soc/intel/boards/sof_rt5682.c +++ b/sound/soc/intel/boards/sof_rt5682.c @@ -704,6 +704,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev, links[id].num_platforms = ARRAY_SIZE(platform_component); links[id].nonatomic = true; links[id].dpcm_playback = 1; + /* feedback stream or firmware-generated echo reference */ + links[id].dpcm_capture = 1; + links[id].no_pcm = 1; links[id].cpus = &cpus[id]; links[id].num_cpus = 1; diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 25548555d8d79..f5d8f7951cfc3 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -231,7 +231,7 @@ int sdw_prepare(struct snd_pcm_substream *substream) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s", dai->name); @@ -251,7 +251,7 @@ int sdw_trigger(struct snd_pcm_substream *substream, int cmd) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s", dai->name); @@ -290,7 +290,7 @@ int sdw_hw_free(struct snd_pcm_substream *substream) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s", dai->name); diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c index b1897a057397d..b531d9dfc2d64 100644 --- a/sound/soc/intel/skylake/skl-pcm.c +++ b/sound/soc/intel/skylake/skl-pcm.c @@ -563,11 +563,8 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream, stream_tag = hdac_stream(link_dev)->stream_tag; - /* set the stream tag in the codec dai dma params */ - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0); - else - snd_soc_dai_set_tdm_slot(codec_dai, 0, stream_tag, 0, 0); + /* set the hdac_stream in the codec dai */ + snd_soc_dai_set_stream(codec_dai, hdac_stream(link_dev), substream->stream); p_params.s_fmt = snd_pcm_format_width(params_format(params)); p_params.ch = params_channels(params); diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index 8b993722f74e7..2085e12dc611b 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -439,7 +439,7 @@ static int skl_free(struct hdac_bus *bus) skl->init_done = 0; /* to be sure */ - snd_hdac_ext_stop_streams(bus); + snd_hdac_stop_streams_and_chip(bus); if (bus->irq >= 0) free_irq(bus->irq, (void *)bus); @@ -1100,7 +1100,10 @@ static void skl_shutdown(struct pci_dev *pci) if (!skl->init_done) return; - snd_hdac_ext_stop_streams(bus); + snd_hdac_stop_streams(bus); + snd_hdac_ext_bus_link_power_down_all(bus); + skl_dsp_sleep(skl->dsp); + list_for_each_entry(s, &bus->stream_list, list) { stream = stream_to_hdac_ext_stream(s); snd_hdac_ext_stream_decouple(bus, stream, false); diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 0793e284d0e78..fb6476b153ed3 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -59,7 +59,8 @@ #define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11) #define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10) #define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9) -#define JZ_AIC_CTRL_FLUSH BIT(8) +#define JZ_AIC_CTRL_TFLUSH BIT(8) +#define JZ_AIC_CTRL_RFLUSH BIT(7) #define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6) #define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5) #define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4) @@ -94,6 +95,8 @@ enum jz47xx_i2s_version { struct i2s_soc_info { enum jz47xx_i2s_version version; struct snd_soc_dai_driver *dai; + + bool shared_fifo_flush; }; struct jz4740_i2s { @@ -122,19 +125,44 @@ static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s, writel(value, i2s->base + reg); } +static inline void jz4740_i2s_set_bits(const struct jz4740_i2s *i2s, + unsigned int reg, uint32_t bits) +{ + uint32_t value = jz4740_i2s_read(i2s, reg); + value |= bits; + jz4740_i2s_write(i2s, reg, value); +} + static int jz4740_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); - uint32_t conf, ctrl; + uint32_t conf; int ret; + /* + * When we can flush FIFOs independently, only flush the FIFO + * that is starting up. We can do this when the DAI is active + * because it does not disturb other active substreams. + */ + if (!i2s->soc_info->shared_fifo_flush) { + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_TFLUSH); + else + jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_RFLUSH); + } + if (snd_soc_dai_active(dai)) return 0; - ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); - ctrl |= JZ_AIC_CTRL_FLUSH; - jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); + /* + * When there is a shared flush bit for both FIFOs, the TFLUSH + * bit flushes both FIFOs. Flushing while the DAI is active would + * cause FIFO underruns in other active substreams so we have to + * guard this behind the snd_soc_dai_active() check. + */ + if (i2s->soc_info->shared_fifo_flush) + jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_TFLUSH); ret = clk_prepare_enable(i2s->clk_i2s); if (ret) @@ -467,6 +495,7 @@ static struct snd_soc_dai_driver jz4740_i2s_dai = { static const struct i2s_soc_info jz4740_i2s_soc_info = { .version = JZ_I2S_JZ4740, .dai = &jz4740_i2s_dai, + .shared_fifo_flush = true, }; static const struct i2s_soc_info jz4760_i2s_soc_info = { diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c index 86e982e3209ed..e1f57b0dedd0f 100644 --- a/sound/soc/mediatek/common/mtk-btcvsd.c +++ b/sound/soc/mediatek/common/mtk-btcvsd.c @@ -1038,11 +1038,9 @@ static int mtk_pcm_btcvsd_copy(struct snd_soc_component *component, struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - mtk_btcvsd_snd_write(bt, buf, count); + return mtk_btcvsd_snd_write(bt, buf, count); else - mtk_btcvsd_snd_read(bt, buf, count); - - return 0; + return mtk_btcvsd_snd_read(bt, buf, count); } /* kcontrol */ diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c index 7e7bda70d12e9..619d6733091cd 100644 --- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c +++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c @@ -1054,6 +1054,7 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) int irq_id; struct mtk_base_afe *afe; struct mt8173_afe_private *afe_priv; + struct snd_soc_component *comp_pcm, *comp_hdmi; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33)); if (ret) @@ -1071,16 +1072,6 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) afe->dev = &pdev->dev; - irq_id = platform_get_irq(pdev, 0); - if (irq_id <= 0) - return irq_id < 0 ? irq_id : -ENXIO; - ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler, - 0, "Afe_ISR_Handle", (void *)afe); - if (ret) { - dev_err(afe->dev, "could not request_irq\n"); - return ret; - } - afe->base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(afe->base_addr)) return PTR_ERR(afe->base_addr); @@ -1142,23 +1133,65 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) if (ret) goto err_pm_disable; - ret = devm_snd_soc_register_component(&pdev->dev, - &mt8173_afe_pcm_dai_component, - mt8173_afe_pcm_dais, - ARRAY_SIZE(mt8173_afe_pcm_dais)); + comp_pcm = devm_kzalloc(&pdev->dev, sizeof(*comp_pcm), GFP_KERNEL); + if (!comp_pcm) { + ret = -ENOMEM; + goto err_pm_disable; + } + + ret = snd_soc_component_initialize(comp_pcm, + &mt8173_afe_pcm_dai_component, + &pdev->dev); if (ret) goto err_pm_disable; - ret = devm_snd_soc_register_component(&pdev->dev, - &mt8173_afe_hdmi_dai_component, - mt8173_afe_hdmi_dais, - ARRAY_SIZE(mt8173_afe_hdmi_dais)); +#ifdef CONFIG_DEBUG_FS + comp_pcm->debugfs_prefix = "pcm"; +#endif + + ret = snd_soc_add_component(comp_pcm, + mt8173_afe_pcm_dais, + ARRAY_SIZE(mt8173_afe_pcm_dais)); + if (ret) + goto err_pm_disable; + + comp_hdmi = devm_kzalloc(&pdev->dev, sizeof(*comp_hdmi), GFP_KERNEL); + if (!comp_hdmi) { + ret = -ENOMEM; + goto err_pm_disable; + } + + ret = snd_soc_component_initialize(comp_hdmi, + &mt8173_afe_hdmi_dai_component, + &pdev->dev); if (ret) goto err_pm_disable; +#ifdef CONFIG_DEBUG_FS + comp_hdmi->debugfs_prefix = "hdmi"; +#endif + + ret = snd_soc_add_component(comp_hdmi, + mt8173_afe_hdmi_dais, + ARRAY_SIZE(mt8173_afe_hdmi_dais)); + if (ret) + goto err_cleanup_components; + + irq_id = platform_get_irq(pdev, 0); + if (irq_id <= 0) + return irq_id < 0 ? irq_id : -ENXIO; + ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler, + 0, "Afe_ISR_Handle", (void *)afe); + if (ret) { + dev_err(afe->dev, "could not request_irq\n"); + goto err_pm_disable; + } + dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n"); return 0; +err_cleanup_components: + snd_soc_unregister_component(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; @@ -1166,6 +1199,8 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev) { + snd_soc_unregister_component(&pdev->dev); + pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mt8173_afe_runtime_suspend(&pdev->dev); diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c index 390da5bf727eb..9421b919d4627 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c @@ -200,14 +200,16 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev) if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node = of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1); if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } mt8173_rt5650_rt5514_codec_conf[0].dlc.of_node = mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node; @@ -219,6 +221,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); +out: of_node_put(platform_node); return ret; } diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c index 53fc49e32fbc8..0791737c3bf38 100644 --- a/sound/soc/pxa/mmp-pcm.c +++ b/sound/soc/pxa/mmp-pcm.c @@ -98,7 +98,7 @@ static bool filter(struct dma_chan *chan, void *param) devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name, dma_data->ssp_id); - if ((strcmp(dev_name(chan->device->dev), devname) == 0) && + if (devname && (strcmp(dev_name(chan->device->dev), devname) == 0) && (chan->chan_id == dma_data->dma_res->start)) { found = true; } diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index 03abb3d719d08..9e70c193d7f41 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -745,10 +745,20 @@ static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg) return true; if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v)) return true; + if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v)) + return true; + if (reg == LPASS_HDMI_TX_PARITY_ADDR(v)) + return true; for (i = 0; i < v->hdmi_rdma_channels; ++i) { if (reg == LPAIF_HDMI_RDMACURR_REG(v, i)) return true; + if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i)) + return true; + if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i)) + return true; + if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i)) + return true; } return false; } @@ -806,10 +816,11 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev, struct lpass_data *data) { struct device_node *node; - int ret, id; + int ret, i, id; /* Allow all channels by default for backwards compatibility */ - for (id = 0; id < data->variant->num_dai; id++) { + for (i = 0; i < data->variant->num_dai; i++) { + id = data->variant->dai_driver[i].id; data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH; data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH; } diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c index c647e627897a2..cb4e9017cd778 100644 --- a/sound/soc/qcom/lpass-sc7180.c +++ b/sound/soc/qcom/lpass-sc7180.c @@ -129,6 +129,9 @@ static int sc7180_lpass_init(struct platform_device *pdev) drvdata->clks = devm_kcalloc(dev, variant->num_clks, sizeof(*drvdata->clks), GFP_KERNEL); + if (!drvdata->clks) + return -ENOMEM; + drvdata->num_clks = variant->num_clks; for (i = 0; i < drvdata->num_clks; i++) diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index 153e9b2de0b53..6be7a32933ad0 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -56,8 +56,8 @@ static int sdm845_slim_snd_hw_params(struct snd_pcm_substream *substream, int ret = 0, i; for_each_rtd_codec_dais(rtd, i, codec_dai) { - sruntime = snd_soc_dai_get_sdw_stream(codec_dai, - substream->stream); + sruntime = snd_soc_dai_get_stream(codec_dai, + substream->stream); if (sruntime != ERR_PTR(-ENOTSUPP)) pdata->sruntime[cpu_dai->id] = sruntime; diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 5adb293d0435d..94cfbc90390ba 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -368,6 +368,7 @@ static int rockchip_pdm_runtime_resume(struct device *dev) ret = clk_prepare_enable(pdm->hclk); if (ret) { + clk_disable_unprepare(pdm->clk); dev_err(pdm->dev, "hclock enable failed %d\n", ret); return ret; } diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c index 674810851fbc6..ccddcd9926af8 100644 --- a/sound/soc/rockchip/rockchip_spdif.c +++ b/sound/soc/rockchip/rockchip_spdif.c @@ -86,6 +86,7 @@ static int __maybe_unused rk_spdif_runtime_resume(struct device *dev) ret = clk_prepare_enable(spdif->hclk); if (ret) { + clk_disable_unprepare(spdif->mclk); dev_err(spdif->dev, "hclk clock enable failed %d\n", ret); return ret; } diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c index 7647b3d4c0baa..25a8cfc274335 100644 --- a/sound/soc/sh/rcar/ctu.c +++ b/sound/soc/sh/rcar/ctu.c @@ -171,7 +171,11 @@ static int rsnd_ctu_init(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { - rsnd_mod_power_on(mod); + int ret; + + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_ctu_activation(mod); diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c index 8d91c0eb0880f..53b2ad01222b5 100644 --- a/sound/soc/sh/rcar/dvc.c +++ b/sound/soc/sh/rcar/dvc.c @@ -186,7 +186,11 @@ static int rsnd_dvc_init(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { - rsnd_mod_power_on(mod); + int ret; + + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_dvc_activation(mod); diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c index a3e0370f5704a..c6fe2595c373a 100644 --- a/sound/soc/sh/rcar/mix.c +++ b/sound/soc/sh/rcar/mix.c @@ -146,7 +146,11 @@ static int rsnd_mix_init(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { - rsnd_mod_power_on(mod); + int ret; + + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_mix_activation(mod); diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index 585ffba0244b9..fd52e26a3808b 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c @@ -454,11 +454,14 @@ static int rsnd_src_init(struct rsnd_mod *mod, struct rsnd_priv *priv) { struct rsnd_src *src = rsnd_mod_to_src(mod); + int ret; /* reset sync convert_rate */ src->sync.val = 0; - rsnd_mod_power_on(mod); + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_src_activation(mod); diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 042207c116514..2ead44779d46d 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -518,7 +518,9 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, ssi->usrcnt++; - rsnd_mod_power_on(mod); + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_ssi_config_init(mod, io); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index a6d6d10cd471b..e9da95ebccc83 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3178,10 +3178,23 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_codecs); static int __init snd_soc_init(void) { + int ret; + snd_soc_debugfs_init(); - snd_soc_util_init(); + ret = snd_soc_util_init(); + if (ret) + goto err_util_init; - return platform_driver_register(&soc_driver); + ret = platform_driver_register(&soc_driver); + if (ret) + goto err_register; + return 0; + +err_register: + snd_soc_util_exit(); +err_util_init: + snd_soc_debugfs_exit(); + return ret; } module_init(snd_soc_init); diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 0f26d6c31ce50..daecd386d5ec8 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -432,7 +432,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, val = ucontrol->value.integer.value[0]; if (mc->platform_max && val > mc->platform_max) return -EINVAL; - if (val > max - min) + if (val > max) return -EINVAL; if (val < 0) return -EINVAL; @@ -445,8 +445,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, return err; if (snd_soc_volsw_is_stereo(mc)) { + val2 = ucontrol->value.integer.value[1]; + + if (mc->platform_max && val2 > mc->platform_max) + return -EINVAL; + if (val2 > max) + return -EINVAL; + val_mask = mask << rshift; - val2 = (ucontrol->value.integer.value[1] + min) & mask; + val2 = (val2 + min) & mask; val2 = val2 << rshift; err = snd_soc_component_update_bits(component, reg2, val_mask, diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 8b8a9aca2912f..fb874f924bbe3 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -723,11 +723,6 @@ static int soc_pcm_open(struct snd_pcm_substream *substream) ret = snd_soc_dai_startup(dai, substream); if (ret < 0) goto err; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - dai->tx_mask = 0; - else - dai->rx_mask = 0; } /* Dynamic PCM DAI links compat checks use dynamic capabilities */ @@ -1159,6 +1154,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe, return; be_substream = snd_soc_dpcm_get_substream(be, stream); + if (!be_substream) + return; for_each_dpcm_fe(be, stream, dpcm) { if (dpcm->fe == fe) diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c index f27f94ca064bc..6b398ffabb02e 100644 --- a/sound/soc/soc-utils.c +++ b/sound/soc/soc-utils.c @@ -171,7 +171,7 @@ int __init snd_soc_util_init(void) return ret; } -void __exit snd_soc_util_exit(void) +void snd_soc_util_exit(void) { platform_driver_unregister(&soc_dummy_driver); platform_device_unregister(soc_dummy_dev); diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index ef316311e959a..a6275cc92a405 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -212,6 +212,10 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, int stream_tag; int ret; + link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name); + if (!link) + return -EINVAL; + /* get stored dma data if resuming from system suspend */ link_dev = snd_soc_dai_get_dma_data(dai, substream); if (!link_dev) { @@ -232,15 +236,8 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; - link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name); - if (!link) - return -EINVAL; - - /* set the stream tag in the codec dai dma params */ - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0); - else - snd_soc_dai_set_tdm_slot(codec_dai, 0, stream_tag, 0, 0); + /* set the hdac_stream in the codec dai */ + snd_soc_dai_set_stream(codec_dai, hdac_stream(link_dev), substream->stream); p_params.s_fmt = snd_pcm_format_width(params_format(params)); p_params.ch = params_channels(params); diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c index 75657a25dbc05..fe9feaab6a0ac 100644 --- a/sound/soc/sof/sof-pci-dev.c +++ b/sound/soc/sof/sof-pci-dev.c @@ -75,7 +75,7 @@ static const struct dmi_system_id community_key_platforms[] = { { .ident = "Google Chromebooks", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"), } }, {}, diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c index 6695530bba9b3..c60ff81390a44 100644 --- a/sound/synth/emux/emux.c +++ b/sound/synth/emux/emux.c @@ -125,15 +125,10 @@ EXPORT_SYMBOL(snd_emux_register); */ int snd_emux_free(struct snd_emux *emu) { - unsigned long flags; - if (! emu) return -EINVAL; - spin_lock_irqsave(&emu->voice_lock, flags); - if (emu->timer_active) - del_timer(&emu->tlist); - spin_unlock_irqrestore(&emu->voice_lock, flags); + del_timer_sync(&emu->tlist); snd_emux_proc_free(emu); snd_emux_delete_virmidi(emu); diff --git a/sound/synth/emux/emux_nrpn.c b/sound/synth/emux/emux_nrpn.c index 7eed5791972cc..a7d83182f7d28 100644 --- a/sound/synth/emux/emux_nrpn.c +++ b/sound/synth/emux/emux_nrpn.c @@ -349,6 +349,9 @@ int snd_emux_xg_control(struct snd_emux_port *port, struct snd_midi_channel *chan, int param) { + if (param >= ARRAY_SIZE(chan->control)) + return -EINVAL; + return send_converted_effect(xg_effects, ARRAY_SIZE(xg_effects), port, chan, param, chan->control[param], diff --git a/sound/usb/card.c b/sound/usb/card.c index a3e06a71cf356..6b172db58a310 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -667,7 +667,7 @@ static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface) if (delayed_register[i] && sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 && id == chip->usb_id) - return inum != iface; + return iface < inum; } return false; diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 8527267725bb7..80dcac5abe0c4 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -73,12 +73,13 @@ static inline unsigned get_usb_high_speed_rate(unsigned int rate) */ static void release_urb_ctx(struct snd_urb_ctx *u) { - if (u->buffer_size) + if (u->urb && u->buffer_size) usb_free_coherent(u->ep->chip->dev, u->buffer_size, u->urb->transfer_buffer, u->urb->transfer_dma); usb_free_urb(u->urb); u->urb = NULL; + u->buffer_size = 0; } static const char *usb_error_string(int err) @@ -998,6 +999,7 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep) if (!ep->syncbuf) return -ENOMEM; + ep->nurbs = SYNC_URBS; for (i = 0; i < SYNC_URBS; i++) { struct snd_urb_ctx *u = &ep->urb[i]; u->index = i; @@ -1017,8 +1019,6 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep) u->urb->complete = snd_complete_urb; } - ep->nurbs = SYNC_URBS; - return 0; out_of_memory: diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 59faa5a9a7141..b67617b68e509 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -304,7 +304,8 @@ static void line6_data_received(struct urb *urb) for (;;) { done = line6_midibuf_read(mb, line6->buffer_message, - LINE6_MIDI_MESSAGE_MAXLEN); + LINE6_MIDI_MESSAGE_MAXLEN, + LINE6_MIDIBUF_READ_RX); if (done <= 0) break; diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c index ba0e2b7e8fe19..0838632c788e4 100644 --- a/sound/usb/line6/midi.c +++ b/sound/usb/line6/midi.c @@ -44,7 +44,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream) int req, done; for (;;) { - req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size); + req = min3(line6_midibuf_bytes_free(mb), line6->max_packet_size, + LINE6_FALLBACK_MAXPACKETSIZE); done = snd_rawmidi_transmit_peek(substream, chunk, req); if (done == 0) @@ -56,7 +57,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream) for (;;) { done = line6_midibuf_read(mb, chunk, - LINE6_FALLBACK_MAXPACKETSIZE); + LINE6_FALLBACK_MAXPACKETSIZE, + LINE6_MIDIBUF_READ_TX); if (done == 0) break; diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c index 6a70463f82c4e..e7f830f7526c9 100644 --- a/sound/usb/line6/midibuf.c +++ b/sound/usb/line6/midibuf.c @@ -9,6 +9,7 @@ #include "midibuf.h" + static int midibuf_message_length(unsigned char code) { int message_length; @@ -20,12 +21,7 @@ static int midibuf_message_length(unsigned char code) message_length = length[(code >> 4) - 8]; } else { - /* - Note that according to the MIDI specification 0xf2 is - the "Song Position Pointer", but this is used by Line 6 - to send sysex messages to the host. - */ - static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1, + static const int length[] = { -1, 2, 2, 2, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1 }; message_length = length[code & 0x0f]; @@ -125,7 +121,7 @@ int line6_midibuf_write(struct midi_buffer *this, unsigned char *data, } int line6_midibuf_read(struct midi_buffer *this, unsigned char *data, - int length) + int length, int read_type) { int bytes_used; int length1, length2; @@ -148,9 +144,22 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data, length1 = this->size - this->pos_read; - /* check MIDI command length */ command = this->buf[this->pos_read]; + /* + PODxt always has status byte lower nibble set to 0010, + when it means to send 0000, so we correct if here so + that control/program changes come on channel 1 and + sysex message status byte is correct + */ + if (read_type == LINE6_MIDIBUF_READ_RX) { + if (command == 0xb2 || command == 0xc2 || command == 0xf2) { + unsigned char fixed = command & 0xf0; + this->buf[this->pos_read] = fixed; + command = fixed; + } + } + /* check MIDI command length */ if (command & 0x80) { midi_length = midibuf_message_length(command); this->command_prev = command; diff --git a/sound/usb/line6/midibuf.h b/sound/usb/line6/midibuf.h index 124a8f9f7e96c..542e8d836f87d 100644 --- a/sound/usb/line6/midibuf.h +++ b/sound/usb/line6/midibuf.h @@ -8,6 +8,9 @@ #ifndef MIDIBUF_H #define MIDIBUF_H +#define LINE6_MIDIBUF_READ_TX 0 +#define LINE6_MIDIBUF_READ_RX 1 + struct midi_buffer { unsigned char *buf; int size; @@ -23,7 +26,7 @@ extern void line6_midibuf_destroy(struct midi_buffer *mb); extern int line6_midibuf_ignore(struct midi_buffer *mb, int length); extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split); extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data, - int length); + int length, int read_type); extern void line6_midibuf_reset(struct midi_buffer *mb); extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data, int length); diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c index 16e644330c4d6..54be5ac919bfb 100644 --- a/sound/usb/line6/pod.c +++ b/sound/usb/line6/pod.c @@ -159,8 +159,9 @@ static struct line6_pcm_properties pod_pcm_properties = { .bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */ }; + static const char pod_version_header[] = { - 0xf2, 0x7e, 0x7f, 0x06, 0x02 + 0xf0, 0x7e, 0x7f, 0x06, 0x02 }; static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code, diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 93fee6e365a6e..b02e1a33304f0 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1149,10 +1149,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) port = &umidi->endpoints[i].out->ports[j]; break; } - if (!port) { - snd_BUG(); + if (!port) return -ENXIO; - } substream->runtime->private_data = port; port->state = STATE_UNKNOWN; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 1ac91c46da3cf..ec74aead0844c 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -76,6 +76,8 @@ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f0a) }, /* E-Mu 0204 USB */ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) }, +/* Ktmicro Usb_audio device */ +{ USB_DEVICE_VENDOR_SPEC(0x31b2, 0x0011) }, /* * Creative Technology, Ltd Live! Cam Sync HD [VF0770] @@ -2028,6 +2030,10 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + /* M-Audio Micro */ + USB_DEVICE_VENDOR_SPEC(0x0763, 0x201a), +}, { USB_DEVICE_VENDOR_SPEC(0x0763, 0x2030), .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { @@ -3656,6 +3662,58 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } }, +/* + * MacroSilicon MS2100/MS2106 based AV capture cards + * + * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch. + * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if + * they pretend to be 96kHz mono as a workaround for stereo being broken + * by that... + * + * They also have an issue with initial stream alignment that causes the + * channels to be swapped and out of phase, which is dealt with in quirks.c. + */ +{ + USB_AUDIO_DEVICE(0x534d, 0x0021), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "MacroSilicon", + .product_name = "MS210x", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = &(const struct snd_usb_audio_quirk[]) { + { + .ifnum = 2, + .type = QUIRK_AUDIO_ALIGN_TRANSFER, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, + { + .ifnum = 3, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 2, + .iface = 3, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC | + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 48000, + .rate_max = 48000, + } + }, + { + .ifnum = -1 + } + } + } +}, + /* * MacroSilicon MS2109 based HDMI capture cards * diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 6333a2ecb848a..752422147fb38 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1508,6 +1508,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */ pioneer_djm_set_format_quirk(subs); break; + case USB_ID(0x534d, 0x0021): /* MacroSilicon MS2100/MS2106 */ case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ subs->stream_offset_adj = 2; break; @@ -1743,6 +1744,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, /* XMOS based USB DACs */ switch (chip->usb_id) { case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */ + case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */ case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */ case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */ if (fp->altsetting == 2) @@ -1911,7 +1913,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface) for (q = registration_quirks; q->usb_id; q++) if (chip->usb_id == q->usb_id) - return iface != q->interface; + return iface < q->interface; /* Register as normal */ return false; diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 2f6d39c2ba7c8..c4f4585f9b851 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -496,6 +496,10 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, return 0; } } + + if (chip->card->registered) + chip->need_delayed_register = true; + /* look for an empty stream */ list_for_each_entry(as, &chip->pcm_list, list) { if (as->fmt_type != fp->fmt_type) @@ -503,9 +507,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, subs = &as->substream[stream]; if (subs->ep_num) continue; - if (snd_device_get_state(chip->card, as->pcm) != - SNDRV_DEV_BUILD) - chip->need_delayed_register = true; err = snd_pcm_new_stream(as->pcm, stream, 1); if (err < 0) return err; @@ -1106,7 +1107,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip, * Dallas DS4201 workaround: It presents 5 altsettings, but the last * one misses syncpipe, and does not produce any sound. */ - if (chip->usb_id == USB_ID(0x04fa, 0x4201)) + if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4) num = 4; for (i = 0; i < num; i++) { diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h index 506c06a6536fb..4cc88a642e106 100644 --- a/tools/arch/parisc/include/uapi/asm/mman.h +++ b/tools/arch/parisc/include/uapi/asm/mman.h @@ -1,20 +1,20 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H #define TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H -#define MADV_DODUMP 70 +#define MADV_DODUMP 17 #define MADV_DOFORK 11 -#define MADV_DONTDUMP 69 +#define MADV_DONTDUMP 16 #define MADV_DONTFORK 10 #define MADV_DONTNEED 4 #define MADV_FREE 8 -#define MADV_HUGEPAGE 67 -#define MADV_MERGEABLE 65 -#define MADV_NOHUGEPAGE 68 +#define MADV_HUGEPAGE 14 +#define MADV_MERGEABLE 12 +#define MADV_NOHUGEPAGE 15 #define MADV_NORMAL 0 #define MADV_RANDOM 1 #define MADV_REMOVE 9 #define MADV_SEQUENTIAL 2 -#define MADV_UNMERGEABLE 66 +#define MADV_UNMERGEABLE 13 #define MADV_WILLNEED 3 #define MAP_ANONYMOUS 0x10 #define MAP_DENYWRITE 0x0800 diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 144dc164b7596..8fb9256768134 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -489,6 +489,11 @@ #define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 + +#define MSR_AMD64_DE_CFG 0xc0011029 +#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 +#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) + #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 @@ -565,9 +570,6 @@ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define MSR_FAM10H_NODE_ID 0xc001100c -#define MSR_F10H_DECFG 0xc0011029 -#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 -#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 0e9310727281a..13be487631992 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -416,7 +416,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset, *(char *)data); break; case BTF_INT_BOOL: - jsonw_bool(jw, *(int *)data); + jsonw_bool(jw, *(bool *)data); break; default: /* shouldn't happen */ diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 6ebf2b215ef49..eefa2b34e641a 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -271,6 +271,9 @@ int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***)) int err; int fd; + if (!REQ_ARGS(3)) + return -EINVAL; + fd = get_fd(&argc, &argv); if (fd < 0) return fd; diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 1854d6b978604..4fd4e3462ebce 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -398,6 +398,16 @@ int main(int argc, char **argv) setlinebuf(stdout); +#ifdef USE_LIBCAP + /* Libcap < 2.63 hooks before main() to compute the number of + * capabilities of the running kernel, and doing so it calls prctl() + * which may fail and set errno to non-zero. + * Let's reset errno to make sure this does not interfere with the + * batch mode. + */ + errno = 0; +#endif + last_do_help = do_help; pretty_output = false; json_output = false; diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c index 84ae1039b0a87..367c10636890a 100644 --- a/tools/gpio/gpio-event-mon.c +++ b/tools/gpio/gpio-event-mon.c @@ -86,6 +86,7 @@ int monitor_device(const char *device_name, gpiotools_test_bit(values.bits, i)); } + i = 0; while (1) { struct gpio_v2_line_event event; diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c index 7399eb7f13786..d66b18c54606a 100644 --- a/tools/iio/iio_utils.c +++ b/tools/iio/iio_utils.c @@ -543,6 +543,10 @@ static int calc_digits(int num) { int count = 0; + /* It takes a digit to represent zero */ + if (!num) + return 1; + while (num != 0) { num /= 10; count++; diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index b8cecb66d28b7..c20d2fe7cebaa 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -2318,9 +2318,9 @@ static __attribute__((unused)) int memcmp(const void *s1, const void *s2, size_t n) { size_t ofs = 0; - char c1 = 0; + int c1 = 0; - while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) { + while (ofs < n && !(c1 = ((unsigned char *)s1)[ofs] - ((unsigned char *)s2)[ofs])) { ofs++; } return c1; diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h index d30439b4b8ab4..869379f91fe48 100644 --- a/tools/include/uapi/asm/errno.h +++ b/tools/include/uapi/asm/errno.h @@ -9,8 +9,8 @@ #include "../../../arch/alpha/include/uapi/asm/errno.h" #elif defined(__mips__) #include "../../../arch/mips/include/uapi/asm/errno.h" -#elif defined(__xtensa__) -#include "../../../arch/xtensa/include/uapi/asm/errno.h" +#elif defined(__hppa__) +#include "../../../arch/parisc/include/uapi/asm/errno.h" #else #include #endif diff --git a/tools/include/uapi/linux/openat2.h b/tools/include/uapi/linux/openat2.h index 58b1eb7113600..a5feb76049487 100644 --- a/tools/include/uapi/linux/openat2.h +++ b/tools/include/uapi/linux/openat2.h @@ -35,5 +35,9 @@ struct open_how { #define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".." be scoped inside the dirfd (similar to chroot(2)). */ +#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be + completed through cached lookup. May + return -EAGAIN if that's not + possible. */ #endif /* _UAPI_LINUX_OPENAT2_H */ diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 875dde20d56e3..92a3eaa154ddd 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -241,8 +241,15 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); +#ifdef __cplusplus +/* forward-declaring enums in C++ isn't compatible with pure C enums, so + * instead define bpf_enable_stats() as accepting int as an input + */ +LIBBPF_API int bpf_enable_stats(int type); +#else enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); +#endif struct bpf_prog_bind_opts { size_t sz; /* size of this struct for forward/backward compatibility */ diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index bd22853be4a6b..0e2d63da24e91 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -188,6 +188,17 @@ static int btf_dump_resize(struct btf_dump *d) return 0; } +static void btf_dump_free_names(struct hashmap *map) +{ + size_t bkt; + struct hashmap_entry *cur; + + hashmap__for_each_entry(map, cur, bkt) + free((void *)cur->key); + + hashmap__free(map); +} + void btf_dump__free(struct btf_dump *d) { int i; @@ -206,8 +217,8 @@ void btf_dump__free(struct btf_dump *d) free(d->cached_names); free(d->emit_queue); free(d->decl_stack); - hashmap__free(d->type_names); - hashmap__free(d->ident_names); + btf_dump_free_names(d->type_names); + btf_dump_free_names(d->ident_names); free(d); } @@ -1392,11 +1403,23 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, const char *orig_name) { + char *old_name, *new_name; size_t dup_cnt = 0; + int err; + + new_name = strdup(orig_name); + if (!new_name) + return 1; hashmap__find(name_map, orig_name, (void **)&dup_cnt); dup_cnt++; - hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL); + + err = hashmap__set(name_map, new_name, (void *)dup_cnt, + (const void **)&old_name, NULL); + if (err) + free(new_name); + + free(old_name); return dup_cnt; } diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 66d7f8d494dec..015ed8253f739 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3479,6 +3479,9 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, int l = 0, r = obj->nr_programs - 1, m; struct bpf_program *prog; + if (!obj->nr_programs) + return NULL; + while (l < r) { m = l + (r - l + 1) / 2; prog = &obj->programs[m]; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index d38284a3aaf0b..13393f0eab25c 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -244,7 +244,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) case BPF_MAP_TYPE_RINGBUF: key_size = 0; value_size = 0; - max_entries = 4096; + max_entries = sysconf(_SC_PAGE_SIZE); break; case BPF_MAP_TYPE_UNSPEC: case BPF_MAP_TYPE_HASH: diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c index 86c31c787fb91..5e242be45206a 100644 --- a/tools/lib/bpf/ringbuf.c +++ b/tools/lib/bpf/ringbuf.c @@ -59,6 +59,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, __u32 len = sizeof(info); struct epoll_event *e; struct ring *r; + __u64 mmap_sz; void *tmp; int err; @@ -97,8 +98,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, r->mask = info.max_entries - 1; /* Map writable consumer page */ - tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, - map_fd, 0); + tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); if (tmp == MAP_FAILED) { err = -errno; pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n", @@ -111,8 +111,12 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, * data size to allow simple reading of samples that wrap around the * end of a ring buffer. See kernel implementation for details. * */ - tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ, - MAP_SHARED, map_fd, rb->page_size); + mmap_sz = rb->page_size + 2 * (__u64)info.max_entries; + if (mmap_sz != (__u64)(size_t)mmap_sz) { + pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries); + return -E2BIG; + } + tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size); if (tmp == MAP_FAILED) { err = -errno; ringbuf_unmap_ring(rb, r); diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index e8745f646371f..fa1f8faf7dfe9 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -930,13 +930,13 @@ void xsk_socket__delete(struct xsk_socket *xsk) ctx = xsk->ctx; umem = ctx->umem; - xsk_put_ctx(ctx, true); - - if (!ctx->refcount) { + if (ctx->refcount == 1) { xsk_delete_bpf_maps(xsk); close(ctx->prog_fd); } + xsk_put_ctx(ctx, true); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (!err) { if (xsk->rx) { diff --git a/tools/objtool/check.c b/tools/objtool/check.c index ea80b29b99134..985bcc5cea8a4 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -168,6 +168,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "panic", "do_exit", "do_task_dead", + "make_task_dead", "__module_put_and_exit", "complete_and_exit", "__reiserfs_panic", @@ -175,7 +176,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "fortify_panic", "usercopy_abort", "machine_real_restart", - "rewind_stack_do_exit", + "rewind_stack_and_make_dead", "kunit_try_catch_throw", "xen_start_kernel", "cpu_bringup_and_idle", @@ -196,7 +197,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, return false; insn = find_insn(file, func->sec, func->offset); - if (!insn->func) + if (!insn || !insn->func) return false; func_for_each_insn(file, func, insn) { @@ -802,6 +803,16 @@ static const char *uaccess_safe_builtin[] = { "__tsan_read_write4", "__tsan_read_write8", "__tsan_read_write16", + "__tsan_volatile_read1", + "__tsan_volatile_read2", + "__tsan_volatile_read4", + "__tsan_volatile_read8", + "__tsan_volatile_read16", + "__tsan_volatile_write1", + "__tsan_volatile_write2", + "__tsan_volatile_write4", + "__tsan_volatile_write8", + "__tsan_volatile_write16", "__tsan_atomic8_load", "__tsan_atomic16_load", "__tsan_atomic32_load", diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 5aa3b4e76479e..a2ea3931e01d5 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -578,6 +578,11 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab, Elf64_Xword entsize = symtab->sh.sh_entsize; int max_idx, idx = sym->idx; Elf_Scn *s, *t = NULL; + bool is_special_shndx = sym->sym.st_shndx >= SHN_LORESERVE && + sym->sym.st_shndx != SHN_XINDEX; + + if (is_special_shndx) + shndx = sym->sym.st_shndx; s = elf_getscn(elf->elf, symtab->idx); if (!s) { @@ -663,7 +668,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab, } /* setup extended section index magic and write the symbol */ - if (shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) { + if ((shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) || is_special_shndx) { sym->sym.st_shndx = shndx; if (!shndx_data) shndx = 0; diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index eac36afab2b39..9a56a44f87dd4 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -10,25 +10,13 @@ extern struct timeval bench__start, bench__end, bench__runtime; * The madvise transparent hugepage constants were added in glibc * 2.13. For compatibility with older versions of glibc, define these * tokens if they are not already defined. - * - * PA-RISC uses different madvise values from other architectures and - * needs to be special-cased. */ -#ifdef __hppa__ -# ifndef MADV_HUGEPAGE -# define MADV_HUGEPAGE 67 -# endif -# ifndef MADV_NOHUGEPAGE -# define MADV_NOHUGEPAGE 68 -# endif -#else # ifndef MADV_HUGEPAGE # define MADV_HUGEPAGE 14 # endif # ifndef MADV_NOHUGEPAGE # define MADV_NOHUGEPAGE 15 # endif -#endif int bench_numa(int argc, const char **argv); int bench_sched_messaging(int argc, const char **argv); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index de80534473afa..8de0d0a740de4 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -87,6 +87,8 @@ # define F_LINUX_SPECIFIC_BASE 1024 #endif +#define RAW_SYSCALL_ARGS_NUM 6 + /* * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 */ @@ -107,7 +109,7 @@ struct syscall_fmt { const char *sys_enter, *sys_exit; } bpf_prog_name; - struct syscall_arg_fmt arg[6]; + struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; u8 nr_args; bool errpid; bool timeout; @@ -1216,7 +1218,7 @@ struct syscall { */ struct bpf_map_syscall_entry { bool enabled; - u16 string_args_len[6]; + u16 string_args_len[RAW_SYSCALL_ARGS_NUM]; }; /* @@ -1641,7 +1643,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) { int idx; - if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0) + if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) nr_args = sc->fmt->nr_args; sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); @@ -1774,11 +1776,11 @@ static int trace__read_syscall_info(struct trace *trace, int id) #endif sc = trace->syscalls.table + id; if (sc->nonexistent) - return 0; + return -EEXIST; if (name == NULL) { sc->nonexistent = true; - return 0; + return -EEXIST; } sc->name = name; @@ -1792,11 +1794,18 @@ static int trace__read_syscall_info(struct trace *trace, int id) sc->tp_format = trace_event__tp_format("syscalls", tp_name); } - if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields)) - return -ENOMEM; - - if (IS_ERR(sc->tp_format)) + /* + * Fails to read trace point format via sysfs node, so the trace point + * doesn't exist. Set the 'nonexistent' flag as true. + */ + if (IS_ERR(sc->tp_format)) { + sc->nonexistent = true; return PTR_ERR(sc->tp_format); + } + + if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? + RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields)) + return -ENOMEM; sc->args = sc->tp_format->format.fields; /* @@ -2114,11 +2123,8 @@ static struct syscall *trace__syscall_info(struct trace *trace, (err = trace__read_syscall_info(trace, id)) != 0) goto out_cant_read; - if (trace->syscalls.table[id].name == NULL) { - if (trace->syscalls.table[id].nonexistent) - return NULL; + if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) goto out_cant_read; - } return &trace->syscalls.table[id]; diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index d3c15b53495d6..18452f12510c0 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -2164,11 +2164,19 @@ struct sym_args { bool near; }; +static bool kern_sym_name_match(const char *kname, const char *name) +{ + size_t n = strlen(name); + + return !strcmp(kname, name) || + (!strncmp(kname, name, n) && kname[n] == '\t'); +} + static bool kern_sym_match(struct sym_args *args, const char *name, char type) { /* A function with the same name, and global or the n'th found or any */ return kallsyms__is_function(type) && - !strcmp(name, args->name) && + kern_sym_name_match(name, args->name) && ((args->global && isupper(type)) || (args->selected && ++(args->cnt) == args->idx) || (!args->global && !args->selected)); @@ -2441,7 +2449,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, *size = sym->start - *start; if (idx > 0) { if (*size) - return 1; + return 0; } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { print_duplicate_syms(dso, sym_name); return -EINVAL; diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index 48754083791d8..29d32ba046b5c 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -127,6 +127,7 @@ int perf_data__open_dir(struct perf_data *data) file->size = st.st_size; } + closedir(dir); if (!files) return -EINVAL; @@ -135,6 +136,7 @@ int perf_data__open_dir(struct perf_data *data) return 0; out_err: + closedir(dir); close_dir(files, nr); return ret; } diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 0af163abaa62b..854dd3d2d8de3 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -207,6 +207,10 @@ int perf_quiet_option(void) opt++; } + /* For debug variables that are used as bool types, set to 0. */ + redirect_to_stderr = 0; + debug_peo_args = 0; + return 0; } diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 4343356f3cf9a..f8a10d5148f6f 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -308,26 +308,13 @@ static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name, { Dwarf_Attribute attr; - if (dwarf_attr(tp_die, attr_name, &attr) == NULL || + if (dwarf_attr_integrate(tp_die, attr_name, &attr) == NULL || dwarf_formudata(&attr, result) != 0) return -ENOENT; return 0; } -/* Get attribute and translate it as a sdata */ -static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name, - Dwarf_Sword *result) -{ - Dwarf_Attribute attr; - - if (dwarf_attr(tp_die, attr_name, &attr) == NULL || - dwarf_formsdata(&attr, result) != 0) - return -ENOENT; - - return 0; -} - /** * die_is_signed_type - Check whether a type DIE is signed or not * @tp_die: a DIE of a type @@ -467,9 +454,9 @@ int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs) /* Get the call file index number in CU DIE */ static int die_get_call_fileno(Dwarf_Die *in_die) { - Dwarf_Sword idx; + Dwarf_Word idx; - if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0) + if (die_get_attr_udata(in_die, DW_AT_call_file, &idx) == 0) return (int)idx; else return -ENOENT; @@ -478,9 +465,9 @@ static int die_get_call_fileno(Dwarf_Die *in_die) /* Get the declared file index number in CU DIE */ static int die_get_decl_fileno(Dwarf_Die *pdie) { - Dwarf_Sword idx; + Dwarf_Word idx; - if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0) + if (die_get_attr_udata(pdie, DW_AT_decl_file, &idx) == 0) return (int)idx; else return -ENOENT; diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index 953338b9e887e..02cd9f75e3d2f 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -251,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, Elf_Data *d; Elf_Scn *scn; Elf_Ehdr *ehdr; + Elf_Phdr *phdr; Elf_Shdr *shdr; uint64_t eh_frame_base_offset; char *strsym = NULL; @@ -285,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, ehdr->e_version = EV_CURRENT; ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */ + /* + * setup program header + */ + phdr = elf_newphdr(e, 1); + phdr[0].p_type = PT_LOAD; + phdr[0].p_offset = 0; + phdr[0].p_vaddr = 0; + phdr[0].p_paddr = 0; + phdr[0].p_filesz = csize; + phdr[0].p_memsz = csize; + phdr[0].p_flags = PF_X | PF_R; + phdr[0].p_align = 8; + /* * setup text section */ diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h index d4137559be053..ac638945b4cb0 100644 --- a/tools/perf/util/genelf.h +++ b/tools/perf/util/genelf.h @@ -50,8 +50,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #if GEN_ELF_CLASS == ELFCLASS64 #define elf_newehdr elf64_newehdr +#define elf_newphdr elf64_newphdr #define elf_getshdr elf64_getshdr #define Elf_Ehdr Elf64_Ehdr +#define Elf_Phdr Elf64_Phdr #define Elf_Shdr Elf64_Shdr #define Elf_Sym Elf64_Sym #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a) @@ -59,8 +61,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a) #else #define elf_newehdr elf32_newehdr +#define elf_newphdr elf32_newphdr #define elf_getshdr elf32_getshdr #define Elf_Ehdr Elf32_Ehdr +#define Elf_Phdr Elf32_Phdr #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a) diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c index b205d929245f5..e68935e9ac8ce 100644 --- a/tools/perf/util/get_current_dir_name.c +++ b/tools/perf/util/get_current_dir_name.c @@ -3,8 +3,9 @@ // #ifndef HAVE_GET_CURRENT_DIR_NAME #include "get_current_dir_name.h" +#include +#include #include -#include /* Android's 'bionic' library, for one, doesn't have this */ diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 5163d2ffea70d..453773ce6f455 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -3279,6 +3279,7 @@ static const char * const intel_pt_info_fmts[] = { [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n", [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n", + [INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n", [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n", [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n", [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n", @@ -3293,8 +3294,12 @@ static void intel_pt_print_info(__u64 *arr, int start, int finish) if (!dump_trace) return; - for (i = start; i <= finish; i++) - fprintf(stdout, intel_pt_info_fmts[i], arr[i]); + for (i = start; i <= finish; i++) { + const char *fmt = intel_pt_info_fmts[i]; + + if (fmt) + fprintf(stdout, fmt, arr[i]); + } } static void intel_pt_print_info_str(const char *name, const char *str) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 3a0a7930cd10a..c56a4d9c3be94 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -356,6 +356,12 @@ __add_event(struct list_head *list, int *idx, struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : cpu_list ? perf_cpu_map__new(cpu_list) : NULL; + if (pmu) + perf_pmu__warn_invalid_formats(pmu); + + if (pmu && attr->type == PERF_TYPE_RAW) + perf_pmu__warn_invalid_config(pmu, attr->config, name); + if (init_attr) event_attr_init(attr); diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d41caeb35cf6c..ac45da0302a73 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -862,6 +862,23 @@ static struct perf_pmu *pmu_lookup(const char *name) return pmu; } +void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu) +{ + struct perf_pmu_format *format; + + /* fake pmu doesn't have format list */ + if (pmu == &perf_pmu__fake) + return; + + list_for_each_entry(format, &pmu->format, list) + if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) { + pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'" + "which is not supported by this version of perf!\n", + pmu->name, format->name, format->value); + return; + } +} + static struct perf_pmu *pmu_find(const char *name) { struct perf_pmu *pmu; @@ -1716,3 +1733,36 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu) return nr_caps; } + +void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, + char *name) +{ + struct perf_pmu_format *format; + __u64 masks = 0, bits; + char buf[100]; + unsigned int i; + + list_for_each_entry(format, &pmu->format, list) { + if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG) + continue; + + for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS) + masks |= 1ULL << i; + } + + /* + * Kernel doesn't export any valid format bits. + */ + if (masks == 0) + return; + + bits = config & ~masks; + if (bits == 0) + return; + + bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf)); + + pr_warning("WARNING: event '%s' not valid (bits %s of config " + "'%llx' not supported by kernel)!\n", + name ?: "N/A", buf, config); +} diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index a64e9c9ce731a..7d208b8507695 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -15,6 +15,7 @@ enum { PERF_PMU_FORMAT_VALUE_CONFIG, PERF_PMU_FORMAT_VALUE_CONFIG1, PERF_PMU_FORMAT_VALUE_CONFIG2, + PERF_PMU_FORMAT_VALUE_CONFIG_END, }; #define PERF_PMU_FORMAT_BITS 64 @@ -120,4 +121,8 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval); int perf_pmu__caps_parse(struct perf_pmu *pmu); +void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, + char *name); +void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu); + #endif /* __PMU_H */ diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l index a15d9fbd7c0ed..58b4926cfaca9 100644 --- a/tools/perf/util/pmu.l +++ b/tools/perf/util/pmu.l @@ -27,8 +27,6 @@ num_dec [0-9]+ {num_dec} { return value(10); } config { return PP_CONFIG; } -config1 { return PP_CONFIG1; } -config2 { return PP_CONFIG2; } - { return '-'; } : { return ':'; } , { return ','; } diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index bfd7e8509869b..283efe059819d 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y @@ -20,7 +20,7 @@ do { \ %} -%token PP_CONFIG PP_CONFIG1 PP_CONFIG2 +%token PP_CONFIG %token PP_VALUE PP_ERROR %type PP_VALUE %type bit_term @@ -47,18 +47,11 @@ PP_CONFIG ':' bits $3)); } | -PP_CONFIG1 ':' bits +PP_CONFIG PP_VALUE ':' bits { ABORT_ON(perf_pmu__new_format(format, name, - PERF_PMU_FORMAT_VALUE_CONFIG1, - $3)); -} -| -PP_CONFIG2 ':' bits -{ - ABORT_ON(perf_pmu__new_format(format, name, - PERF_PMU_FORMAT_VALUE_CONFIG2, - $3)); + $2, + $4)); } bits: diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 96fe9c1af3364..4688e39de52af 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -203,7 +203,7 @@ static void new_line_csv(struct perf_stat_config *config, void *ctx) fputc('\n', os->fh); if (os->prefix) - fprintf(os->fh, "%s%s", os->prefix, config->csv_sep); + fprintf(os->fh, "%s", os->prefix); aggr_printout(config, os->evsel, os->id, os->nr); for (i = 0; i < os->nfields; i++) fputs(config->csv_sep, os->fh); diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index d8d79a9ec7758..5221f272f85c6 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1247,7 +1247,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, (!used_opd && syms_ss->adjust_symbols)) { GElf_Phdr phdr; - if (elf_read_program_header(syms_ss->elf, + if (elf_read_program_header(runtime_ss->elf, (u64)sym.st_value, &phdr)) { pr_debug4("%s: failed to find program header for " "symbol: %s st_value: %#" PRIx64 "\n", @@ -2002,8 +2002,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, * unusual. One significant peculiarity is that the mapping (start -> pgoff) * is not the same for the kernel map and the modules map. That happens because * the data is copied adjacently whereas the original kcore has gaps. Finally, - * kallsyms and modules files are compared with their copies to check that - * modules have not been loaded or unloaded while the copies were taking place. + * kallsyms file is compared with its copy to check that modules have not been + * loaded or unloaded while the copies were taking place. * * Return: %0 on success, %-1 on failure. */ @@ -2066,9 +2066,6 @@ int kcore_copy(const char *from_dir, const char *to_dir) goto out_extract_close; } - if (kcore_copy__compare_file(from_dir, to_dir, "modules")) - goto out_extract_close; - if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) goto out_extract_close; diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 4e24509645173..8b1e3ae8fe50d 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1912,7 +1912,7 @@ sub run_scp_mod { sub _get_grub_index { - my ($command, $target, $skip) = @_; + my ($command, $target, $skip, $submenu) = @_; return if (defined($grub_number) && defined($last_grub_menu) && $last_grub_menu eq $grub_menu && defined($last_machine) && @@ -1929,11 +1929,16 @@ sub _get_grub_index { my $found = 0; + my $submenu_number = 0; + while () { if (/$target/) { $grub_number++; $found = 1; last; + } elsif (defined($submenu) && /$submenu/) { + $submenu_number++; + $grub_number = -1; } elsif (/$skip/) { $grub_number++; } @@ -1942,6 +1947,9 @@ sub _get_grub_index { dodie "Could not find '$grub_menu' through $command on $machine" if (!$found); + if ($submenu_number > 0) { + $grub_number = "$submenu_number>$grub_number"; + } doprint "$grub_number\n"; $last_grub_menu = $grub_menu; $last_machine = $machine; @@ -1952,6 +1960,7 @@ sub get_grub_index { my $command; my $target; my $skip; + my $submenu; my $grub_menu_qt; if ($reboot_type !~ /^grub/) { @@ -1966,8 +1975,9 @@ sub get_grub_index { $skip = '^\s*title\s'; } elsif ($reboot_type eq "grub2") { $command = "cat $grub_file"; - $target = '^menuentry.*' . $grub_menu_qt; - $skip = '^menuentry\s|^submenu\s'; + $target = '^\s*menuentry.*' . $grub_menu_qt; + $skip = '^\s*menuentry'; + $submenu = '^\s*submenu\s'; } elsif ($reboot_type eq "grub2bls") { $command = $grub_bls_get; $target = '^title=.*' . $grub_menu_qt; @@ -1976,7 +1986,7 @@ sub get_grub_index { return; } - _get_grub_index($command, $target, $skip); + _get_grub_index($command, $target, $skip, $submenu); } sub wait_for_input @@ -2040,7 +2050,7 @@ sub reboot_to { if ($reboot_type eq "grub") { run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'"; } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) { - run_ssh "$grub_reboot $grub_number"; + run_ssh "$grub_reboot \"'$grub_number'\""; } elsif ($reboot_type eq "syslinux") { run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path"; } elsif (defined $reboot_script) { @@ -3763,9 +3773,10 @@ sub test_this_config { # .config to make sure it is missing the config that # we had before my %configs = %min_configs; - delete $configs{$config}; + $configs{$config} = "# $config is not set"; make_new_config ((values %configs), (values %keep_configs)); make_oldconfig; + delete $configs{$config}; undef %configs; assign_configs \%configs, $output_config; diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index d9c2835031590..db1e24d7155fa 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -103,19 +103,27 @@ ifdef building_out_of_srctree override LDFLAGS = endif -ifneq ($(O),) - BUILD := $(O)/kselftest +top_srcdir ?= ../../.. + +ifeq ("$(origin O)", "command line") + KBUILD_OUTPUT := $(O) +endif + +ifneq ($(KBUILD_OUTPUT),) + # Make's built-in functions such as $(abspath ...), $(realpath ...) cannot + # expand a shell special character '~'. We use a somewhat tedious way here. + abs_objtree := $(shell cd $(top_srcdir) && mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd) + $(if $(abs_objtree),, \ + $(error failed to create output directory "$(KBUILD_OUTPUT)")) + # $(realpath ...) resolves symlinks + abs_objtree := $(realpath $(abs_objtree)) + BUILD := $(abs_objtree)/kselftest else - ifneq ($(KBUILD_OUTPUT),) - BUILD := $(KBUILD_OUTPUT)/kselftest - else - BUILD := $(shell pwd) - DEFAULT_INSTALL_HDR_PATH := 1 - endif + BUILD := $(CURDIR) + DEFAULT_INSTALL_HDR_PATH := 1 endif # Prepare for headers install -top_srcdir ?= ../../.. include $(top_srcdir)/scripts/subarch.include ARCH ?= $(SUBARCH) export KSFT_KHDR_INSTALL_DONE := 1 diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.c b/tools/testing/selftests/arm64/signal/testcases/testcases.c index 61ebcdf638311..a3ac5c2d8aac7 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.c +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.c @@ -33,7 +33,7 @@ bool validate_extra_context(struct extra_context *extra, char **err) return false; fprintf(stderr, "Validating EXTRA...\n"); - term = GET_RESV_NEXT_HEAD(extra); + term = GET_RESV_NEXT_HEAD(&extra->head); if (!term || term->magic || term->size) { *err = "Missing terminator after EXTRA context"; return false; diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index 006b5bd99c089..525d810b10b81 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -901,3 +901,39 @@ .result_unpriv = REJECT, .errstr_unpriv = "unknown func", }, +{ + "reference tracking: try to leak released ptr reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_ringbuf_reserve), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_ringbuf_discard), + BPF_MOV64_IMM(BPF_REG_0, 0), + + BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_8, 0), + BPF_EXIT_INSN() + }, + .fixup_map_array_48b = { 4 }, + .fixup_map_ringbuf = { 11 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R8 !read_ok" +}, diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c index 7e50cb80873a5..7e36078f8f482 100644 --- a/tools/testing/selftests/bpf/verifier/search_pruning.c +++ b/tools/testing/selftests/bpf/verifier/search_pruning.c @@ -154,3 +154,39 @@ .result_unpriv = ACCEPT, .insn_processed = 15, }, +/* The test performs a conditional 64-bit write to a stack location + * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], + * then data is read from fp[-8]. This sequence is unsafe. + * + * The test would be mistakenly marked as safe w/o dst register parent + * preservation in verifier.c:copy_register_state() function. + * + * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the + * checkpoint state after conditional 64-bit assignment. + */ +{ + "write tracking and register parent chain bug", + .insns = { + /* r6 = ktime_get_ns() */ + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + /* r0 = ktime_get_ns() */ + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + /* if r0 > r6 goto +1 */ + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1), + /* *(u64 *)(r10 - 8) = 0xdeadbeef */ + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef), + /* r1 = 42 */ + BPF_MOV64_IMM(BPF_REG_1, 42), + /* *(u8 *)(r10 - 8) = r1 */ + BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8), + /* r2 = *(u64 *)(r10 - 8) */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8), + /* exit(0) */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .flags = BPF_F_TEST_STATE_FREQ, + .errstr = "invalid read from stack off -8+1 size 8", + .result = REJECT, +}, diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh index 40909c254365b..16d2de18591d3 100755 --- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh +++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh @@ -495,8 +495,8 @@ dummy_reporter_test() check_reporter_info dummy healthy 3 3 10 true - echo 8192> $DEBUGFS_DIR/health/binary_len - check_fail $? "Failed set dummy reporter binary len to 8192" + echo 8192 > $DEBUGFS_DIR/health/binary_len + check_err $? "Failed set dummy reporter binary len to 8192" local dump=$(devlink health dump show $DL_HANDLE reporter dummy -j) check_err $? "Failed show dump of dummy reporter" diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh index a90f394f9aa90..d374878cc0ba9 100755 --- a/tools/testing/selftests/efivarfs/efivarfs.sh +++ b/tools/testing/selftests/efivarfs/efivarfs.sh @@ -87,6 +87,11 @@ test_create_read() { local file=$efivarfs_mount/$FUNCNAME-$test_guid ./create-read $file + if [ $? -ne 0 ]; then + echo "create and read $file failed" + file_cleanup $file + exit 1 + fi file_cleanup $file } diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc index 3145b0f1835c3..27a68bbe778be 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc @@ -38,11 +38,18 @@ cnt_trace() { test_event_enabled() { val=$1 + check_times=10 # wait for 10 * SLEEP_TIME at most - e=`cat $EVENT_ENABLE` - if [ "$e" != $val ]; then - fail "Expected $val but found $e" - fi + while [ $check_times -ne 0 ]; do + e=`cat $EVENT_ENABLE` + if [ "$e" == $val ]; then + return 0 + fi + sleep $SLEEP_TIME + check_times=$((check_times - 1)) + done + + fail "Expected $val but found $e" } run_enable_disable() { diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc index 955e3ceea44b5..ada594fe16cb3 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc @@ -1,38 +1,19 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test synthetic_events syntax parser errors -# requires: synthetic_events error_log "char name[]' >> synthetic_events":README +# requires: synthetic_events error_log check_error() { # command-with-error-pos-by-^ ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events' } -check_dyn_error() { # command-with-error-pos-by-^ - ftrace_errlog_check 'synthetic_events' "$1" 'dynamic_events' -} - check_error 'myevent ^chr arg' # INVALID_TYPE -check_error 'myevent ^unsigned arg' # INCOMPLETE_TYPE - -check_error 'myevent char ^str]; int v' # BAD_NAME -check_error '^mye-vent char str[]' # BAD_NAME -check_error 'myevent char ^st-r[]' # BAD_NAME - -check_error 'myevent char str;^[]' # INVALID_FIELD -check_error 'myevent char str; ^int' # INVALID_FIELD - -check_error 'myevent char ^str[; int v' # INVALID_ARRAY_SPEC -check_error 'myevent char ^str[kdjdk]' # INVALID_ARRAY_SPEC -check_error 'myevent char ^str[257]' # INVALID_ARRAY_SPEC - -check_error '^mye;vent char str[]' # INVALID_CMD -check_error '^myevent ; char str[]' # INVALID_CMD -check_error '^myevent; char str[]' # INVALID_CMD -check_error '^myevent ;char str[]' # INVALID_CMD -check_error '^; char str[]' # INVALID_CMD -check_error '^;myevent char str[]' # INVALID_CMD -check_error '^myevent' # INVALID_CMD - -check_dyn_error '^s:junk/myevent char str[' # INVALID_DYN_CMD +check_error 'myevent ^char str[];; int v' # INVALID_TYPE +check_error 'myevent char ^str]; int v' # INVALID_NAME +check_error 'myevent char ^str;[]' # INVALID_NAME +check_error 'myevent ^char str[; int v' # INVALID_TYPE +check_error '^mye;vent char str[]' # BAD_NAME +check_error 'myevent char str[]; ^int' # INVALID_FIELD +check_error '^myevent' # INCOMPLETE_CMD exit 0 diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index 23207829ec752..6a0ed2e7881eb 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -3,11 +3,11 @@ INCLUDES := -I../include -I../../ CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) LDLIBS := -lpthread -lrt -HEADERS := \ +LOCAL_HDRS := \ ../include/futextest.h \ ../include/atomic.h \ ../include/logging.h -TEST_GEN_FILES := \ +TEST_GEN_PROGS := \ futex_wait_timeout \ futex_wait_wouldblock \ futex_requeue_pi \ @@ -21,5 +21,3 @@ TEST_PROGS := run.sh top_srcdir = ../../../../.. KSFT_KHDR_INSTALL := 1 include ../../lib.mk - -$(TEST_GEN_FILES): $(HEADERS) diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile index 39f0fa2a8fd63..05d66ef50c977 100644 --- a/tools/testing/selftests/intel_pstate/Makefile +++ b/tools/testing/selftests/intel_pstate/Makefile @@ -2,10 +2,10 @@ CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE LDLIBS += -lm -uname_M := $(shell uname -m 2>/dev/null || echo not) -ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) +ARCH ?= $(shell uname -m 2>/dev/null || echo not) +ARCH_PROCESSED := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/) -ifeq (x86,$(ARCH)) +ifeq (x86,$(ARCH_PROCESSED)) TEST_GEN_FILES := msr aperf endif diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index b7217b5251f57..56e360e019ecc 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -128,6 +128,11 @@ endef clean: $(CLEAN) +# Enables to extend CFLAGS and LDFLAGS from command line, e.g. +# make USERCFLAGS=-Werror USERLDFLAGS=-static +CFLAGS += $(USERCFLAGS) +LDFLAGS += $(USERLDFLAGS) + # When make O= with kselftest target from main level # the following aren't defined. # diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 4c7d33618437c..7ece4131dc6fc 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -931,6 +931,36 @@ ipv4_fcnal() set +e check_nexthop "dev veth1" "" log_test $? 0 "Nexthops removed on admin down" + + # nexthop route delete warning: route add with nhid and delete + # using device + run_cmd "$IP li set dev veth1 up" + run_cmd "$IP nexthop add id 12 via 172.16.1.3 dev veth1" + out1=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l` + run_cmd "$IP route add 172.16.101.1/32 nhid 12" + run_cmd "$IP route delete 172.16.101.1/32 dev veth1" + out2=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l` + [ $out1 -eq $out2 ] + rc=$? + log_test $rc 0 "Delete nexthop route warning" + run_cmd "$IP route delete 172.16.101.1/32 nhid 12" + run_cmd "$IP nexthop del id 12" + + run_cmd "$IP nexthop add id 21 via 172.16.1.6 dev veth1" + run_cmd "$IP ro add 172.16.101.0/24 nhid 21" + run_cmd "$IP ro del 172.16.101.0/24 nexthop via 172.16.1.7 dev veth1 nexthop via 172.16.1.8 dev veth1" + log_test $? 2 "Delete multipath route with only nh id based entry" + + run_cmd "$IP nexthop add id 22 via 172.16.1.6 dev veth1" + run_cmd "$IP ro add 172.16.102.0/24 nhid 22" + run_cmd "$IP ro del 172.16.102.0/24 dev veth1" + log_test $? 2 "Delete route when specifying only nexthop device" + + run_cmd "$IP ro del 172.16.102.0/24 via 172.16.1.6" + log_test $? 2 "Delete route when specifying only gateway" + + run_cmd "$IP ro del 172.16.102.0/24" + log_test $? 0 "Delete route when not specifying nexthop attributes" } ipv4_grp_fcnal() diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index a7f53c2a9580a..0f3bf90e04d36 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -1622,13 +1622,21 @@ ipv4_del_addr_test() $IP addr add dev dummy1 172.16.104.1/24 $IP addr add dev dummy1 172.16.104.11/24 + $IP addr add dev dummy1 172.16.104.12/24 + $IP addr add dev dummy1 172.16.104.13/24 $IP addr add dev dummy2 172.16.104.1/24 $IP addr add dev dummy2 172.16.104.11/24 + $IP addr add dev dummy2 172.16.104.12/24 $IP route add 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11 + $IP route add 172.16.106.0/24 dev lo src 172.16.104.12 + $IP route add table 0 172.16.107.0/24 via 172.16.104.2 src 172.16.104.13 $IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11 + $IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12 set +e # removing address from device in vrf should only remove route from vrf table + echo " Regular FIB info" + $IP addr del dev dummy2 172.16.104.11/24 $IP ro ls vrf red | grep -q 172.16.105.0/24 log_test $? 1 "Route removed from VRF when source address deleted" @@ -1646,6 +1654,35 @@ ipv4_del_addr_test() $IP ro ls vrf red | grep -q 172.16.105.0/24 log_test $? 0 "Route in VRF is not removed by address delete" + # removing address from device in vrf should only remove route from vrf + # table even when the associated fib info only differs in table ID + echo " Identical FIB info with different table ID" + + $IP addr del dev dummy2 172.16.104.12/24 + $IP ro ls vrf red | grep -q 172.16.106.0/24 + log_test $? 1 "Route removed from VRF when source address deleted" + + $IP ro ls | grep -q 172.16.106.0/24 + log_test $? 0 "Route in default VRF not removed" + + $IP addr add dev dummy2 172.16.104.12/24 + $IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12 + + $IP addr del dev dummy1 172.16.104.12/24 + $IP ro ls | grep -q 172.16.106.0/24 + log_test $? 1 "Route removed in default VRF when source address deleted" + + $IP ro ls vrf red | grep -q 172.16.106.0/24 + log_test $? 0 "Route in VRF is not removed by address delete" + + # removing address from device in default vrf should remove route from + # the default vrf even when route was inserted with a table ID of 0. + echo " Table ID 0" + + $IP addr del dev dummy1 172.16.104.13/24 + $IP ro ls | grep -q 172.16.107.0/24 + log_test $? 1 "Route removed in default VRF when source address deleted" + $IP li del dummy1 $IP li del dummy2 cleanup diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 54020d05a62b8..9605e158a0bfc 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -731,14 +731,14 @@ sysctl_set() local value=$1; shift SYSCTL_ORIG[$key]=$(sysctl -n $key) - sysctl -qw $key=$value + sysctl -qw $key="$value" } sysctl_restore() { local key=$1; shift - sysctl -qw $key=${SYSCTL_ORIG["$key"]} + sysctl -qw $key="${SYSCTL_ORIG[$key]}" } forwarding_enable() diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh index e714bae473fb4..81f31179ac887 100755 --- a/tools/testing/selftests/net/forwarding/sch_red.sh +++ b/tools/testing/selftests/net/forwarding/sch_red.sh @@ -1,3 +1,4 @@ +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 # This test sends one stream of traffic from H1 through a TBF shaper, to a RED diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c index b5277106df1fd..b0cc082fbb84f 100644 --- a/tools/testing/selftests/net/reuseport_bpf.c +++ b/tools/testing/selftests/net/reuseport_bpf.c @@ -330,7 +330,7 @@ static void test_extra_filter(const struct test_params p) if (bind(fd1, addr, sockaddr_size())) error(1, errno, "failed to bind recv socket 1"); - if (!bind(fd2, addr, sockaddr_size()) && errno != EADDRINUSE) + if (!bind(fd2, addr, sockaddr_size()) || errno != EADDRINUSE) error(1, errno, "bind socket 2 should fail with EADDRINUSE"); free(addr); diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index c9ce3dfa42ee7..c3a905923ef29 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -782,7 +782,7 @@ kci_test_ipsec_offload() tmpl proto esp src $srcip dst $dstip spi 9 \ mode transport reqid 42 check_err $? - ip x p add dir out src $dstip/24 dst $srcip/24 \ + ip x p add dir in src $dstip/24 dst $srcip/24 \ tmpl proto esp src $dstip dst $srcip spi 9 \ mode transport reqid 42 check_err $? diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh index dc932fd653634..640bc43452faa 100755 --- a/tools/testing/selftests/net/udpgso_bench.sh +++ b/tools/testing/selftests/net/udpgso_bench.sh @@ -7,6 +7,7 @@ readonly GREEN='\033[0;92m' readonly YELLOW='\033[0;33m' readonly RED='\033[0;31m' readonly NC='\033[0m' # No Color +readonly TESTPORT=8000 readonly KSFT_PASS=0 readonly KSFT_FAIL=1 @@ -56,11 +57,26 @@ trap wake_children EXIT run_one() { local -r args=$@ + local nr_socks=0 + local i=0 + local -r timeout=10 + + ./udpgso_bench_rx -p "$TESTPORT" & + ./udpgso_bench_rx -p "$TESTPORT" -t & + + # Wait for the above test program to get ready to receive connections. + while [ "$i" -lt "$timeout" ]; do + nr_socks="$(ss -lnHi | grep -c "\*:${TESTPORT}")" + [ "$nr_socks" -eq 2 ] && break + i=$((i + 1)) + sleep 1 + done + if [ "$nr_socks" -ne 2 ]; then + echo "timed out while waiting for udpgso_bench_rx" + exit 1 + fi - ./udpgso_bench_rx & - ./udpgso_bench_rx -t & - - ./udpgso_bench_tx ${args} + ./udpgso_bench_tx -p "$TESTPORT" ${args} } run_in_netns() { diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c index 6a193425c367f..4058c7451e70d 100644 --- a/tools/testing/selftests/net/udpgso_bench_rx.c +++ b/tools/testing/selftests/net/udpgso_bench_rx.c @@ -250,7 +250,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size) static void do_flush_udp(int fd) { static char rbuf[ETH_MAX_MTU]; - int ret, len, gso_size, budget = 256; + int ret, len, gso_size = 0, budget = 256; len = cfg_read_all ? sizeof(rbuf) : 0; while (budget--) { @@ -336,6 +336,8 @@ static void parse_opts(int argc, char **argv) cfg_verify = true; cfg_read_all = true; break; + default: + exit(1); } } diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c index f1fdaa2702913..477392715a9ad 100644 --- a/tools/testing/selftests/net/udpgso_bench_tx.c +++ b/tools/testing/selftests/net/udpgso_bench_tx.c @@ -62,6 +62,7 @@ static int cfg_payload_len = (1472 * 42); static int cfg_port = 8000; static int cfg_runtime_ms = -1; static bool cfg_poll; +static int cfg_poll_loop_timeout_ms = 2000; static bool cfg_segment; static bool cfg_sendmmsg; static bool cfg_tcp; @@ -235,16 +236,17 @@ static void flush_errqueue_recv(int fd) } } -static void flush_errqueue(int fd, const bool do_poll) +static void flush_errqueue(int fd, const bool do_poll, + unsigned long poll_timeout, const bool poll_err) { if (do_poll) { struct pollfd fds = {0}; int ret; fds.fd = fd; - ret = poll(&fds, 1, 500); + ret = poll(&fds, 1, poll_timeout); if (ret == 0) { - if (cfg_verbose) + if ((cfg_verbose) && (poll_err)) fprintf(stderr, "poll timeout\n"); } else if (ret < 0) { error(1, errno, "poll"); @@ -254,6 +256,20 @@ static void flush_errqueue(int fd, const bool do_poll) flush_errqueue_recv(fd); } +static void flush_errqueue_retry(int fd, unsigned long num_sends) +{ + unsigned long tnow, tstop; + bool first_try = true; + + tnow = gettimeofday_ms(); + tstop = tnow + cfg_poll_loop_timeout_ms; + do { + flush_errqueue(fd, true, tstop - tnow, first_try); + first_try = false; + tnow = gettimeofday_ms(); + } while ((stat_zcopies != num_sends) && (tnow < tstop)); +} + static int send_tcp(int fd, char *data) { int ret, done = 0, count = 0; @@ -413,7 +429,8 @@ static int send_udp_segment(int fd, char *data) static void usage(const char *filepath) { - error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]", + error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] " + "[-L secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]", filepath); } @@ -423,7 +440,7 @@ static void parse_opts(int argc, char **argv) int max_len, hdrlen; int c; - while ((c = getopt(argc, argv, "46acC:D:Hl:mM:p:s:PS:tTuvz")) != -1) { + while ((c = getopt(argc, argv, "46acC:D:Hl:L:mM:p:s:PS:tTuvz")) != -1) { switch (c) { case '4': if (cfg_family != PF_UNSPEC) @@ -452,6 +469,9 @@ static void parse_opts(int argc, char **argv) case 'l': cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000; break; + case 'L': + cfg_poll_loop_timeout_ms = strtoul(optarg, NULL, 10) * 1000; + break; case 'm': cfg_sendmmsg = true; break; @@ -490,6 +510,8 @@ static void parse_opts(int argc, char **argv) case 'z': cfg_zerocopy = true; break; + default: + exit(1); } } @@ -677,7 +699,7 @@ int main(int argc, char **argv) num_sends += send_udp(fd, buf[i]); num_msgs++; if ((cfg_zerocopy && ((num_msgs & 0xF) == 0)) || cfg_tx_tstamp) - flush_errqueue(fd, cfg_poll); + flush_errqueue(fd, cfg_poll, 500, true); if (cfg_msg_nr && num_msgs >= cfg_msg_nr) break; @@ -696,7 +718,7 @@ int main(int argc, char **argv) } while (!interrupted && (cfg_runtime_ms == -1 || tnow < tstop)); if (cfg_zerocopy || cfg_tx_tstamp) - flush_errqueue(fd, true); + flush_errqueue_retry(fd, num_sends); if (close(fd)) error(1, errno, "close"); diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh index b48e1833bc896..76645aaf2b58f 100755 --- a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh +++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh @@ -35,6 +35,8 @@ cleanup() { for i in 1 2;do ip netns del nsrouter$i;done } +trap cleanup EXIT + ipv4() { echo -n 192.168.$1.2 } @@ -146,11 +148,17 @@ ip netns exec nsclient1 nft -f - < /dev/null + +expect="packets 1 bytes 112" +check_counter nsclient1 "redir4" "$expect" +if [ $? -ne 0 ];then + ret=1 +fi + +ip netns exec "nsclient1" ping -c 1 dead:1::42 > /dev/null +expect="packets 1 bytes 192" +check_counter nsclient1 "redir6" "$expect" +if [ $? -ne 0 ];then + ret=1 +fi + +if [ $ret -eq 0 ];then + echo "PASS: icmp redirects had RELATED state" +else + echo "ERROR: icmp redirect RELATED state test has failed" +fi + exit $ret diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c index fbbdffdb2e5d2..f20d1c166d1e4 100644 --- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c +++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c @@ -24,6 +24,7 @@ static int check_cpu_dscr_default(char *file, unsigned long val) rc = read(fd, buf, sizeof(buf)); if (rc == -1) { perror("read() failed"); + close(fd); return 1; } close(fd); @@ -65,8 +66,10 @@ static int check_all_cpu_dscr_defaults(unsigned long val) if (access(file, F_OK)) continue; - if (check_cpu_dscr_default(file, val)) + if (check_cpu_dscr_default(file, val)) { + closedir(sysfs); return 1; + } } closedir(sysfs); return 0; diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c index e7ceabed7f51f..7d0aa22bdc12b 100644 --- a/tools/testing/selftests/proc/proc-uptime-002.c +++ b/tools/testing/selftests/proc/proc-uptime-002.c @@ -17,6 +17,7 @@ // while shifting across CPUs. #undef NDEBUG #include +#include #include #include #include @@ -54,7 +55,7 @@ int main(void) len += sizeof(unsigned long); free(m); m = malloc(len); - } while (sys_sched_getaffinity(0, len, m) == -EINVAL); + } while (sys_sched_getaffinity(0, len, m) == -1 && errno == EINVAL); fd = open("/proc/uptime", O_RDONLY); assert(fd >= 0); diff --git a/tools/testing/selftests/rcutorture/bin/console-badness.sh b/tools/testing/selftests/rcutorture/bin/console-badness.sh index 0e4c0b2eb7f08..80ae7f08b363e 100755 --- a/tools/testing/selftests/rcutorture/bin/console-badness.sh +++ b/tools/testing/selftests/rcutorture/bin/console-badness.sh @@ -13,4 +13,5 @@ egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' | grep -v 'ODEBUG: ' | grep -v 'This means that this is a DEBUG kernel and it is' | -grep -v 'Warning: unable to open an initial console' +grep -v 'Warning: unable to open an initial console' | +grep -v 'NOHZ tick-stop error: Non-RCU local softirq work is pending, handler' diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py index f34486cd7342d..3e67fdb518ec3 100644 --- a/tools/testing/selftests/tpm2/tpm2.py +++ b/tools/testing/selftests/tpm2/tpm2.py @@ -370,6 +370,10 @@ def __init__(self, flags = 0): fcntl.fcntl(self.tpm, fcntl.F_SETFL, flags) self.tpm_poll = select.poll() + def __del__(self): + if self.tpm: + self.tpm.close() + def close(self): self.tpm.close() diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h index b14c2c3b6b857..74aef964f5099 100644 --- a/tools/virtio/linux/bug.h +++ b/tools/virtio/linux/bug.h @@ -1,11 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef BUG_H -#define BUG_H +#ifndef _LINUX_BUG_H +#define _LINUX_BUG_H #define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond)) -#define BUILD_BUG_ON(x) - #define BUG() abort() -#endif /* BUG_H */ +#endif /* _LINUX_BUG_H */ diff --git a/tools/virtio/linux/build_bug.h b/tools/virtio/linux/build_bug.h new file mode 100644 index 0000000000000..cdbb75e28a604 --- /dev/null +++ b/tools/virtio/linux/build_bug.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BUILD_BUG_H +#define _LINUX_BUILD_BUG_H + +#define BUILD_BUG_ON(x) + +#endif /* _LINUX_BUILD_BUG_H */ diff --git a/tools/virtio/linux/cpumask.h b/tools/virtio/linux/cpumask.h new file mode 100644 index 0000000000000..307da69d6b26c --- /dev/null +++ b/tools/virtio/linux/cpumask.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CPUMASK_H +#define _LINUX_CPUMASK_H + +#include + +#endif /* _LINUX_CPUMASK_H */ diff --git a/tools/virtio/linux/gfp.h b/tools/virtio/linux/gfp.h new file mode 100644 index 0000000000000..43d146f236f14 --- /dev/null +++ b/tools/virtio/linux/gfp.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GFP_H +#define __LINUX_GFP_H + +#include + +#endif diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 315e85cabedab..063ccc8975647 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h new file mode 100644 index 0000000000000..272b5aa285d5a --- /dev/null +++ b/tools/virtio/linux/kmsan.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KMSAN_H +#define _LINUX_KMSAN_H + +#include + +inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size, + enum dma_data_direction dir) +{ +} + +#endif /* _LINUX_KMSAN_H */ diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h index 369ee308b6686..74d9e1825748e 100644 --- a/tools/virtio/linux/scatterlist.h +++ b/tools/virtio/linux/scatterlist.h @@ -2,6 +2,7 @@ #ifndef SCATTERLIST_H #define SCATTERLIST_H #include +#include struct scatterlist { unsigned long page_link; diff --git a/tools/virtio/linux/topology.h b/tools/virtio/linux/topology.h new file mode 100644 index 0000000000000..910794afb993a --- /dev/null +++ b/tools/virtio/linux/topology.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TOPOLOGY_H +#define _LINUX_TOPOLOGY_H + +#include + +#endif /* _LINUX_TOPOLOGY_H */ diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c index fa87b58bd5fa5..98ff808d6f0c2 100644 --- a/tools/virtio/vringh_test.c +++ b/tools/virtio/vringh_test.c @@ -308,6 +308,7 @@ static int parallel_test(u64 features, gvdev.vdev.features = features; INIT_LIST_HEAD(&gvdev.vdev.vqs); + spin_lock_init(&gvdev.vdev.vqs_list_lock); gvdev.to_host_fd = to_host[1]; gvdev.notifies = 0; @@ -455,6 +456,7 @@ int main(int argc, char *argv[]) getrange = getrange_iov; vdev.features = 0; INIT_LIST_HEAD(&vdev.vqs); + spin_lock_init(&vdev.vqs_list_lock); while (argv[1]) { if (strcmp(argv[1], "--indirect") == 0) diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh index 26e193ffd2a2f..873a892147e57 100644 --- a/tools/vm/slabinfo-gnuplot.sh +++ b/tools/vm/slabinfo-gnuplot.sh @@ -150,7 +150,7 @@ do_preprocess() let lines=3 out=`basename "$in"`"-slabs-by-loss" `cat "$in" | grep -A "$lines" 'Slabs sorted by loss' |\ - egrep -iv '\-\-|Name|Slabs'\ + grep -E -iv '\-\-|Name|Slabs'\ | awk '{print $1" "$4+$2*$3" "$4}' > "$out"` if [ $? -eq 0 ]; then do_slabs_plotting "$out" @@ -159,7 +159,7 @@ do_preprocess() let lines=3 out=`basename "$in"`"-slabs-by-size" `cat "$in" | grep -A "$lines" 'Slabs sorted by size' |\ - egrep -iv '\-\-|Name|Slabs'\ + grep -E -iv '\-\-|Name|Slabs'\ | awk '{print $1" "$4" "$4-$2*$3}' > "$out"` if [ $? -eq 0 ]; then do_slabs_plotting "$out" diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 578235291e92e..564d5c145fbe7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -159,6 +159,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { } +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) { /* @@ -340,6 +344,12 @@ void kvm_reload_remote_mmus(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } +static void kvm_flush_shadow_all(struct kvm *kvm) +{ + kvm_arch_flush_shadow_all(kvm); + kvm_arch_guest_memory_reclaimed(kvm); +} + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, gfp_t gfp_flags) @@ -489,6 +499,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); + kvm_arch_guest_memory_reclaimed(kvm); srcu_read_unlock(&kvm->srcu, idx); return 0; @@ -592,7 +603,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -896,7 +907,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); @@ -1238,6 +1249,7 @@ static int kvm_set_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, slot); + kvm_arch_guest_memory_reclaimed(kvm); } r = kvm_arch_prepare_memory_region(kvm, new, mem, change); @@ -3954,6 +3966,12 @@ struct compat_kvm_clear_dirty_log { }; }; +long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + return -ENOTTY; +} + static long kvm_vm_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -3962,6 +3980,11 @@ static long kvm_vm_compat_ioctl(struct file *filp, if (kvm->mm != current->mm || kvm->vm_bugged) return -EIO; + + r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg); + if (r != -ENOTTY) + return r; + switch (ioctl) { #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT case KVM_CLEAR_DIRTY_LOG: {