diff --git a/Documentation/ABI/testing/configfs-usb-gadget-ffs b/Documentation/ABI/testing/configfs-usb-gadget-ffs new file mode 100644 index 00000000000000..14343e237e8391 --- /dev/null +++ b/Documentation/ABI/testing/configfs-usb-gadget-ffs @@ -0,0 +1,9 @@ +What: /config/usb-gadget/gadget/functions/ffs.name +Date: Nov 2013 +KenelVersion: 3.13 +Description: The purpose of this directory is to create and remove it. + + A corresponding USB function instance is created/removed. + There are no attributes here. + + All parameters are set through FunctionFS. diff --git a/Documentation/ABI/testing/configfs-usb-gadget-loopback b/Documentation/ABI/testing/configfs-usb-gadget-loopback new file mode 100644 index 00000000000000..852b2365a5b57a --- /dev/null +++ b/Documentation/ABI/testing/configfs-usb-gadget-loopback @@ -0,0 +1,8 @@ +What: /config/usb-gadget/gadget/functions/Loopback.name +Date: Nov 2013 +KenelVersion: 3.13 +Description: + The attributes: + + qlen - depth of loopback queue + bulk_buflen - buffer length diff --git a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink new file mode 100644 index 00000000000000..a30f3093ef6cae --- /dev/null +++ b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink @@ -0,0 +1,12 @@ +What: /config/usb-gadget/gadget/functions/SourceSink.name +Date: Nov 2013 +KenelVersion: 3.13 +Description: + The attributes: + + pattern - 0 (all zeros), 1 (mod63), 2 (none) + isoc_interval - 1..16 + isoc_maxpacket - 0 - 1023 (fs), 0 - 1024 (hs/ss) + isoc_mult - 0..2 (hs/ss only) + isoc_maxburst - 0..15 (ss only) + qlen - buffer length diff --git a/Documentation/ABI/testing/debugfs-driver-genwqe b/Documentation/ABI/testing/debugfs-driver-genwqe new file mode 100644 index 00000000000000..1c2f25674e8c3f --- /dev/null +++ b/Documentation/ABI/testing/debugfs-driver-genwqe @@ -0,0 +1,91 @@ +What: /sys/kernel/debug/genwqe/genwqe_card/ddcb_info +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: DDCB queue dump used for debugging queueing problems. + +What: /sys/kernel/debug/genwqe/genwqe_card/curr_regs +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Dump of the current error registers. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/curr_dbg_uid0 +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Internal chip state of UID0 (unit id 0). + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/curr_dbg_uid1 +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Internal chip state of UID1. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/curr_dbg_uid2 +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Internal chip state of UID2. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/prev_regs +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Dump of the error registers before the last reset of + the card occured. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/prev_dbg_uid0 +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Internal chip state of UID0 before card was reset. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/prev_dbg_uid1 +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Internal chip state of UID1 before card was reset. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/prev_dbg_uid2 +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Internal chip state of UID2 before card was reset. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/info +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Comprehensive summary of bitstream version and software + version. Used bitstream and bitstream clocking information. + +What: /sys/kernel/debug/genwqe/genwqe_card/err_inject +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Possibility to inject error cases to ensure that the drivers + error handling code works well. + +What: /sys/kernel/debug/genwqe/genwqe_card/vf<0..14>_jobtimeout_msec +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Default VF timeout 250ms. Testing might require 1000ms. + Using 0 will use the cards default value (whatever that is). + + The timeout depends on the max number of available cards + in the system and the maximum allowed queue size. + + The driver ensures that the settings are done just before + the VFs get enabled. Changing the timeouts in flight is not + possible. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/jobtimer +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Dump job timeout register values for PF and VFs. + Only available for PF. + +What: /sys/kernel/debug/genwqe/genwqe_card/queue_working_time +Date: Dec 2013 +Contact: haver@linux.vnet.ibm.com +Description: Dump queue working time register values for PF and VFs. + Only available for PF. diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index b20e829d350f7f..6e02c5029152ea 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -197,6 +197,19 @@ Description: Raw pressure measurement from channel Y. Units after application of scale and offset are kilopascal. +What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_raw +KernelVersion: 3.14 +Contact: linux-iio@vger.kernel.org +Description: + Raw humidity measurement of air. Units after application of + scale and offset are milli percent. + +What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_input +KernelVersion: 3.14 +Contact: linux-iio@vger.kernel.org +Description: + Scaled humidity measurement in milli percent. + What: /sys/bus/iio/devices/iio:deviceX/in_accel_offset What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_offset What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_offset diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index 1430f584b26641..614d451cee4197 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -50,13 +50,19 @@ Description: This may allow the driver to support more hardware than was included in the driver's static device ID support table at compile time. The format for the device ID is: - idVendor idProduct bInterfaceClass. + idVendor idProduct bInterfaceClass RefIdVendor RefIdProduct The vendor ID and device ID fields are required, the - interface class is optional. + rest is optional. The Ref* tuple can be used to tell the + driver to use the same driver_data for the new device as + it is used for the reference device. Upon successfully adding an ID, the driver will probe for the device and attempt to bind to it. For example: # echo "8086 10f5" > /sys/bus/usb/drivers/foo/new_id + Here add a new device (0458:7045) using driver_data from + an already supported device (0458:704c): + # echo "0458 7045 0 0458 704c" > /sys/bus/usb/drivers/foo/new_id + Reading from this file will list all dynamically added device IDs in the same format, with one entry per line. For example: diff --git a/Documentation/ABI/testing/sysfs-driver-genwqe b/Documentation/ABI/testing/sysfs-driver-genwqe new file mode 100644 index 00000000000000..1870737a1f5eff --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-genwqe @@ -0,0 +1,62 @@ +What: /sys/class/genwqe/genwqe_card/version +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Unique bitstream identification e.g. + '0000000330336283.00000000475a4950'. + +What: /sys/class/genwqe/genwqe_card/appid +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Identifies the currently active card application e.g. 'GZIP' + for compression/decompression. + +What: /sys/class/genwqe/genwqe_card/type +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Type of the card e.g. 'GenWQE5-A7'. + +What: /sys/class/genwqe/genwqe_card/curr_bitstream +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Currently active bitstream. 1 is default, 0 is backup. + +What: /sys/class/genwqe/genwqe_card/next_bitstream +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Interface to set the next bitstream to be used. + +What: /sys/class/genwqe/genwqe_card/tempsens +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Interface to read the cards temperature sense register. + +What: /sys/class/genwqe/genwqe_card/freerunning_timer +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Interface to read the cards free running timer. + Used for performance and utilization measurements. + +What: /sys/class/genwqe/genwqe_card/queue_working_time +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Interface to read queue working time. + Used for performance and utilization measurements. + +What: /sys/class/genwqe/genwqe_card/state +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: State of the card: "unused", "used", "error". + +What: /sys/class/genwqe/genwqe_card/base_clock +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Base clock frequency of the card. + +What: /sys/class/genwqe/genwqe_card/device/sriov_numvfs +Date: Oct 2013 +Contact: haver@linux.vnet.ibm.com +Description: Enable VFs (1..15): + sudo sh -c 'echo 15 > \ + /sys/bus/pci/devices/0000\:1b\:00.0/sriov_numvfs' + Disable VFs: + Write a 0 into the same sysfs entry. diff --git a/Documentation/ABI/testing/sysfs-firmware-efi b/Documentation/ABI/testing/sysfs-firmware-efi new file mode 100644 index 00000000000000..05874da7ce8030 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-firmware-efi @@ -0,0 +1,20 @@ +What: /sys/firmware/efi/fw_vendor +Date: December 2013 +Contact: Dave Young +Description: It shows the physical address of firmware vendor field in the + EFI system table. +Users: Kexec + +What: /sys/firmware/efi/runtime +Date: December 2013 +Contact: Dave Young +Description: It shows the physical address of runtime service table entry in + the EFI system table. +Users: Kexec + +What: /sys/firmware/efi/config_table +Date: December 2013 +Contact: Dave Young +Description: It shows the physical address of config table entry in the EFI + system table. +Users: Kexec diff --git a/Documentation/ABI/testing/sysfs-firmware-efi-runtime-map b/Documentation/ABI/testing/sysfs-firmware-efi-runtime-map new file mode 100644 index 00000000000000..c61b9b348e9996 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-firmware-efi-runtime-map @@ -0,0 +1,34 @@ +What: /sys/firmware/efi/runtime-map/ +Date: December 2013 +Contact: Dave Young +Description: Switching efi runtime services to virtual mode requires + that all efi memory ranges which have the runtime attribute + bit set to be mapped to virtual addresses. + + The efi runtime services can only be switched to virtual + mode once without rebooting. The kexec kernel must maintain + the same physical to virtual address mappings as the first + kernel. The mappings are exported to sysfs so userspace tools + can reassemble them and pass them into the kexec kernel. + + /sys/firmware/efi/runtime-map/ is the directory the kernel + exports that information in. + + subdirectories are named with the number of the memory range: + + /sys/firmware/efi/runtime-map/0 + /sys/firmware/efi/runtime-map/1 + /sys/firmware/efi/runtime-map/2 + /sys/firmware/efi/runtime-map/3 + ... + + Each subdirectory contains five files: + + attribute : The attributes of the memory range. + num_pages : The size of the memory range in pages. + phys_addr : The physical address of the memory range. + type : The type of the memory range. + virt_addr : The virtual address of the memory range. + + Above values are all hexadecimal numbers with the '0x' prefix. +Users: Kexec diff --git a/Documentation/ABI/testing/sysfs-kernel-boot_params b/Documentation/ABI/testing/sysfs-kernel-boot_params new file mode 100644 index 00000000000000..eca38ce2852d3a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-boot_params @@ -0,0 +1,38 @@ +What: /sys/kernel/boot_params +Date: December 2013 +Contact: Dave Young +Description: The /sys/kernel/boot_params directory contains two + files: "data" and "version" and one subdirectory "setup_data". + It is used to export the kernel boot parameters of an x86 + platform to userspace for kexec and debugging purpose. + + If there's no setup_data in boot_params the subdirectory will + not be created. + + "data" file is the binary representation of struct boot_params. + + "version" file is the string representation of boot + protocol version. + + "setup_data" subdirectory contains the setup_data data + structure in boot_params. setup_data is maintained in kernel + as a link list. In "setup_data" subdirectory there's one + subdirectory for each link list node named with the number + of the list nodes. The list node subdirectory contains two + files "type" and "data". "type" file is the string + representation of setup_data type. "data" file is the binary + representation of setup_data payload. + + The whole boot_params directory structure is like below: + /sys/kernel/boot_params + |__ data + |__ setup_data + | |__ 0 + | | |__ data + | | |__ type + | |__ 1 + | |__ data + | |__ type + |__ version + +Users: Kexec diff --git a/Documentation/ABI/testing/sysfs-platform-tahvo-usb b/Documentation/ABI/testing/sysfs-platform-tahvo-usb new file mode 100644 index 00000000000000..f6e20ce4b538ed --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-tahvo-usb @@ -0,0 +1,16 @@ +What: /sys/bus/platform/devices/tahvo-usb/otg_mode +Date: December 2013 +Contact: Aaro Koskinen +Description: + Set or read the current OTG mode. Valid values are "host" and + "peripheral". + + Reading: returns the current mode. + +What: /sys/bus/platform/devices/tahvo-usb/vbus +Date: December 2013 +Contact: Aaro Koskinen +Description: + Read the current VBUS state. + + Reading: returns "on" or "off". diff --git a/Documentation/HOWTO b/Documentation/HOWTO index 27faae3e3846fa..57cf5efb044da7 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO @@ -112,7 +112,7 @@ required reading: Other excellent descriptions of how to create patches properly are: "The Perfect Patch" - http://kerneltrap.org/node/3737 + http://www.ozlabs.org/~akpm/stuff/tpp.txt "Linux kernel patch submission format" http://linux.yyz.us/patch-format.html @@ -579,7 +579,7 @@ all time. It should describe the patch completely, containing: For more details on what this should all look like, please see the ChangeLog section of the document: "The Perfect Patch" - http://userweb.kernel.org/~akpm/stuff/tpp.txt + http://www.ozlabs.org/~akpm/stuff/tpp.txt diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index f3778f8952da1b..910870b15acd25 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt @@ -396,14 +396,14 @@ o Each element of the form "3/3 ..>. 0:7 ^0" represents one rcu_node The output of "cat rcu/rcu_sched/rcu_pending" looks as follows: - 0!np=26111 qsp=29 rpq=5386 cbr=1 cng=570 gpc=3674 gps=577 nn=15903 - 1!np=28913 qsp=35 rpq=6097 cbr=1 cng=448 gpc=3700 gps=554 nn=18113 - 2!np=32740 qsp=37 rpq=6202 cbr=0 cng=476 gpc=4627 gps=546 nn=20889 - 3 np=23679 qsp=22 rpq=5044 cbr=1 cng=415 gpc=3403 gps=347 nn=14469 - 4!np=30714 qsp=4 rpq=5574 cbr=0 cng=528 gpc=3931 gps=639 nn=20042 - 5 np=28910 qsp=2 rpq=5246 cbr=0 cng=428 gpc=4105 gps=709 nn=18422 - 6!np=38648 qsp=5 rpq=7076 cbr=0 cng=840 gpc=4072 gps=961 nn=25699 - 7 np=37275 qsp=2 rpq=6873 cbr=0 cng=868 gpc=3416 gps=971 nn=25147 + 0!np=26111 qsp=29 rpq=5386 cbr=1 cng=570 gpc=3674 gps=577 nn=15903 ndw=0 + 1!np=28913 qsp=35 rpq=6097 cbr=1 cng=448 gpc=3700 gps=554 nn=18113 ndw=0 + 2!np=32740 qsp=37 rpq=6202 cbr=0 cng=476 gpc=4627 gps=546 nn=20889 ndw=0 + 3 np=23679 qsp=22 rpq=5044 cbr=1 cng=415 gpc=3403 gps=347 nn=14469 ndw=0 + 4!np=30714 qsp=4 rpq=5574 cbr=0 cng=528 gpc=3931 gps=639 nn=20042 ndw=0 + 5 np=28910 qsp=2 rpq=5246 cbr=0 cng=428 gpc=4105 gps=709 nn=18422 ndw=0 + 6!np=38648 qsp=5 rpq=7076 cbr=0 cng=840 gpc=4072 gps=961 nn=25699 ndw=0 + 7 np=37275 qsp=2 rpq=6873 cbr=0 cng=868 gpc=3416 gps=971 nn=25147 ndw=0 The fields are as follows: @@ -432,6 +432,10 @@ o "gpc" is the number of times that an old grace period had o "gps" is the number of times that a new grace period had started, but this CPU was not yet aware of it. +o "ndw" is the number of times that a wakeup of an rcuo + callback-offload kthread had to be deferred in order to avoid + deadlock. + o "nn" is the number of times that this CPU needed nothing. @@ -443,7 +447,7 @@ The output of "cat rcu/rcuboost" looks as follows: balk: nt=0 egt=6541 bt=0 nb=0 ny=126 nos=0 This information is output only for rcu_preempt. Each two-line entry -corresponds to a leaf rcu_node strcuture. The fields are as follows: +corresponds to a leaf rcu_node structure. The fields are as follows: o "n:m" is the CPU-number range for the corresponding two-line entry. In the sample output above, the first entry covers diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt index a58b63da1a366e..f51861bcb07bbb 100644 --- a/Documentation/acpi/apei/einj.txt +++ b/Documentation/acpi/apei/einj.txt @@ -45,11 +45,22 @@ directory apei/einj. The following files are provided. injection. Before this, please specify all necessary error parameters. +- flags + Present for kernel version 3.13 and above. Used to specify which + of param{1..4} are valid and should be used by BIOS during injection. + Value is a bitmask as specified in ACPI5.0 spec for the + SET_ERROR_TYPE_WITH_ADDRESS data structure: + Bit 0 - Processor APIC field valid (see param3 below) + Bit 1 - Memory address and mask valid (param1 and param2) + Bit 2 - PCIe (seg,bus,dev,fn) valid (param4 below) + If set to zero, legacy behaviour is used where the type of injection + specifies just one bit set, and param1 is multiplexed. + - param1 This file is used to set the first error parameter value. Effect of parameter depends on error_type specified. For example, if error type is memory related type, the param1 should be a valid physical - memory address. + memory address. [Unless "flag" is set - see above] - param2 This file is used to set the second error parameter value. Effect of @@ -58,6 +69,12 @@ directory apei/einj. The following files are provided. address mask. Linux requires page or narrower granularity, say, 0xfffffffffffff000. +- param3 + Used when the 0x1 bit is set in "flag" to specify the APIC id + +- param4 + Used when the 0x4 bit is set in "flag" to specify target PCIe device + - notrigger The EINJ mechanism is a two step process. First inject the error, then perform some actions to trigger it. Setting "notrigger" to 1 skips the diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt new file mode 100644 index 00000000000000..b2830b4358956e --- /dev/null +++ b/Documentation/block/null_blk.txt @@ -0,0 +1,72 @@ +Null block device driver +================================================================================ + +I. Overview + +The null block device (/dev/nullb*) is used for benchmarking the various +block-layer implementations. It emulates a block device of X gigabytes in size. +The following instances are possible: + + Single-queue block-layer + - Request-based. + - Single submission queue per device. + - Implements IO scheduling algorithms (CFQ, Deadline, noop). + Multi-queue block-layer + - Request-based. + - Configurable submission queues per device. + No block-layer (Known as bio-based) + - Bio-based. IO requests are submitted directly to the device driver. + - Directly accepts bio data structure and returns them. + +All of them have a completion queue for each core in the system. + +II. Module parameters applicable for all instances: + +queue_mode=[0-2]: Default: 2-Multi-queue + Selects which block-layer the module should instantiate with. + + 0: Bio-based. + 1: Single-queue. + 2: Multi-queue. + +home_node=[0--nr_nodes]: Default: NUMA_NO_NODE + Selects what CPU node the data structures are allocated from. + +gb=[Size in GB]: Default: 250GB + The size of the device reported to the system. + +bs=[Block size (in bytes)]: Default: 512 bytes + The block size reported to the system. + +nr_devices=[Number of devices]: Default: 2 + Number of block devices instantiated. They are instantiated as /dev/nullb0, + etc. + +irq_mode=[0-2]: Default: 1-Soft-irq + The completion mode used for completing IOs to the block-layer. + + 0: None. + 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead + when IOs are issued from another CPU node than the home the device is + connected to. + 2: Timer: Waits a specific period (completion_nsec) for each IO before + completion. + +completion_nsec=[ns]: Default: 10.000ns + Combined with irq_mode=2 (timer). The time each completion event must wait. + +submit_queues=[0..nr_cpus]: + The number of submission queues attached to the device driver. If unset, it + defaults to 1 on single-queue and bio-based instances. For multi-queue, + it is ignored when use_per_node_hctx module parameter is 1. + +hw_queue_depth=[0..qdepth]: Default: 64 + The hardware queue depth of the device. + +III: Multi-queue specific parameters + +use_per_node_hctx=[0/1]: Default: 0 + 0: The number of submit queues are set to the value of the submit_queues + parameter. + 1: The multi-queue block layer is instantiated with a hardware dispatch + queue for each CPU node in the system. diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt index 8117e5bf606503..88951b179262a9 100644 --- a/Documentation/circular-buffers.txt +++ b/Documentation/circular-buffers.txt @@ -160,6 +160,7 @@ The producer will look something like this: spin_lock(&producer_lock); unsigned long head = buffer->head; + /* The spin_unlock() and next spin_lock() provide needed ordering. */ unsigned long tail = ACCESS_ONCE(buffer->tail); if (CIRC_SPACE(head, tail, buffer->size) >= 1) { @@ -168,9 +169,8 @@ The producer will look something like this: produce_item(item); - smp_wmb(); /* commit the item before incrementing the head */ - - buffer->head = (head + 1) & (buffer->size - 1); + smp_store_release(buffer->head, + (head + 1) & (buffer->size - 1)); /* wake_up() will make sure that the head is committed before * waking anyone up */ @@ -183,9 +183,14 @@ This will instruct the CPU that the contents of the new item must be written before the head index makes it available to the consumer and then instructs the CPU that the revised head index must be written before the consumer is woken. -Note that wake_up() doesn't have to be the exact mechanism used, but whatever -is used must guarantee a (write) memory barrier between the update of the head -index and the change of state of the consumer, if a change of state occurs. +Note that wake_up() does not guarantee any sort of barrier unless something +is actually awakened. We therefore cannot rely on it for ordering. However, +there is always one element of the array left empty. Therefore, the +producer must produce two elements before it could possibly corrupt the +element currently being read by the consumer. Therefore, the unlock-lock +pair between consecutive invocations of the consumer provides the necessary +ordering between the read of the index indicating that the consumer has +vacated a given element and the write by the producer to that same element. THE CONSUMER @@ -195,21 +200,20 @@ The consumer will look something like this: spin_lock(&consumer_lock); - unsigned long head = ACCESS_ONCE(buffer->head); + /* Read index before reading contents at that index. */ + unsigned long head = smp_load_acquire(buffer->head); unsigned long tail = buffer->tail; if (CIRC_CNT(head, tail, buffer->size) >= 1) { - /* read index before reading contents at that index */ - smp_read_barrier_depends(); /* extract one item from the buffer */ struct item *item = buffer[tail]; consume_item(item); - smp_mb(); /* finish reading descriptor before incrementing tail */ - - buffer->tail = (tail + 1) & (buffer->size - 1); + /* Finish reading descriptor before incrementing tail. */ + smp_store_release(buffer->tail, + (tail + 1) & (buffer->size - 1)); } spin_unlock(&consumer_lock); @@ -218,12 +222,17 @@ This will instruct the CPU to make sure the index is up to date before reading the new item, and then it shall make sure the CPU has finished reading the item before it writes the new tail pointer, which will erase the item. - -Note the use of ACCESS_ONCE() in both algorithms to read the opposition index. -This prevents the compiler from discarding and reloading its cached value - -which some compilers will do across smp_read_barrier_depends(). This isn't -strictly needed if you can be sure that the opposition index will _only_ be -used the once. +Note the use of ACCESS_ONCE() and smp_load_acquire() to read the +opposition index. This prevents the compiler from discarding and +reloading its cached value - which some compilers will do across +smp_read_barrier_depends(). This isn't strictly needed if you can +be sure that the opposition index will _only_ be used the once. +The smp_load_acquire() additionally forces the CPU to order against +subsequent memory references. Similarly, smp_store_release() is used +in both algorithms to write the thread's index. This documents the +fact that we are writing to something that can be read concurrently, +prevents the compiler from tearing the store, and enforces ordering +against previous accesses. =============== diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt index 1196290082d1ed..78530e621a1ea8 100644 --- a/Documentation/devicetree/bindings/arm/atmel-at91.txt +++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt @@ -20,6 +20,10 @@ TC/TCLIB Timer required properties: - interrupts: Should contain all interrupts for the TC block Note that you can specify several interrupt cells if the TC block has one interrupt per channel. +- clock-names: tuple listing input clock names. + Required elements: "t0_clk" + Optional elements: "t1_clk", "t2_clk" +- clocks: phandles to input clocks. Examples: @@ -28,6 +32,8 @@ One interrupt per TC block: compatible = "atmel,at91rm9200-tcb"; reg = <0xfff7c000 0x100>; interrupts = <18 4>; + clocks = <&tcb0_clk>; + clock-names = "t0_clk"; }; One interrupt per TC channel in a TC block: @@ -35,6 +41,8 @@ One interrupt per TC channel in a TC block: compatible = "atmel,at91rm9200-tcb"; reg = <0xfffdc000 0x100>; interrupts = <26 4 27 4 28 4>; + clocks = <&tcb1_clk>; + clock-names = "t0_clk"; }; RSTC Reset Controller required properties: diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt index 46f5c791ea0df6..0f2f920e873485 100644 --- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt +++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt @@ -159,6 +159,8 @@ clock which they consume. mixer 343 hdmi 344 g2d 345 + mdma0 346 + smmu_mdma0 347 [Clock Muxes] diff --git a/Documentation/devicetree/bindings/extcon/extcon-palmas.txt b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt index 7dab6a8f4a0e46..45414bbcd9455b 100644 --- a/Documentation/devicetree/bindings/extcon/extcon-palmas.txt +++ b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt @@ -2,7 +2,11 @@ EXTCON FOR PALMAS/TWL CHIPS PALMAS USB COMPARATOR Required Properties: - - compatible : Should be "ti,palmas-usb" or "ti,twl6035-usb" + - compatible: should contain one of: + * "ti,palmas-usb-vid". + * "ti,twl6035-usb-vid". + * "ti,palmas-usb" (DEPRECATED - use "ti,palmas-usb-vid"). + * "ti,twl6035-usb" (DEPRECATED - use "ti,twl6035-usb-vid"). Optional Properties: - ti,wakeup : To enable the wakeup comparator in probe diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt index b1cb3415e6f1be..c65f71cfaa5c59 100644 --- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt +++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt @@ -16,6 +16,7 @@ adt7461 +/-1C TDM Extended Temp Range I.C at,24c08 i2c serial eeprom (24cxx) atmel,24c02 i2c serial eeprom (24cxx) atmel,at97sc3204t i2c trusted platform module (TPM) +capella,cm32181 CM32181: Ambient Light Sensor catalyst,24c32 i2c serial eeprom dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock dallas,ds1338 I2C RTC with 56-Byte NV RAM diff --git a/Documentation/devicetree/bindings/iio/humidity/dht11.txt b/Documentation/devicetree/bindings/iio/humidity/dht11.txt new file mode 100644 index 00000000000000..ecc24c199fd609 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/humidity/dht11.txt @@ -0,0 +1,14 @@ +* DHT11 humidity/temperature sensor (and compatibles like DHT22) + +Required properties: + - compatible: Should be "dht11" + - gpios: Should specify the GPIO connected to the sensor's data + line, see "gpios property" in + Documentation/devicetree/bindings/gpio/gpio.txt. + +Example: + +humidity_sensor { + compatible = "dht11"; + gpios = <&gpio0 6 0>; +} diff --git a/Documentation/devicetree/bindings/iio/light/tsl2563.txt b/Documentation/devicetree/bindings/iio/light/tsl2563.txt new file mode 100644 index 00000000000000..f91e809e736eae --- /dev/null +++ b/Documentation/devicetree/bindings/iio/light/tsl2563.txt @@ -0,0 +1,19 @@ +* AMS TAOS TSL2563 ambient light sensor + +Required properties: + + - compatible : should be "amstaos,tsl2563" + - reg : the I2C address of the sensor + +Optional properties: + + - amstaos,cover-comp-gain : integer used as multiplier for gain + compensation (default = 1) + +Example: + +tsl2563@29 { + compatible = "amstaos,tsl2563"; + reg = <0x29>; + amstaos,cover-comp-gain = <16>; +}; diff --git a/Documentation/devicetree/bindings/iio/magnetometer/hmc5843.txt b/Documentation/devicetree/bindings/iio/magnetometer/hmc5843.txt new file mode 100644 index 00000000000000..90d5f34db04e2a --- /dev/null +++ b/Documentation/devicetree/bindings/iio/magnetometer/hmc5843.txt @@ -0,0 +1,17 @@ +* Honeywell HMC5843 magnetometer sensor + +Required properties: + + - compatible : should be "honeywell,hmc5843" + - reg : the I2C address of the magnetometer - typically 0x1e + +Optional properties: + + - gpios : should be device tree identifier of the magnetometer DRDY pin + +Example: + +hmc5843@1e { + compatible = "honeywell,hmc5843" + reg = <0x1e>; +}; diff --git a/Documentation/devicetree/bindings/misc/atmel-ssc.txt b/Documentation/devicetree/bindings/misc/atmel-ssc.txt index a45ae08c8ed1d3..60960b2755f435 100644 --- a/Documentation/devicetree/bindings/misc/atmel-ssc.txt +++ b/Documentation/devicetree/bindings/misc/atmel-ssc.txt @@ -6,6 +6,9 @@ Required properties: - atmel,at91sam9g45-ssc: support dma transfer - reg: Should contain SSC registers location and length - interrupts: Should contain SSC interrupt +- clock-names: tuple listing input clock names. + Required elements: "pclk" +- clocks: phandles to input clocks. Required properties for devices compatible with "atmel,at91sam9g45-ssc": @@ -20,6 +23,8 @@ ssc0: ssc@fffbc000 { compatible = "atmel,at91rm9200-ssc"; reg = <0xfffbc000 0x4000>; interrupts = <14 4 5>; + clocks = <&ssc0_clk>; + clock-names = "pclk"; }; - DMA transfer: diff --git a/Documentation/devicetree/bindings/misc/bmp085.txt b/Documentation/devicetree/bindings/misc/bmp085.txt index 91dfda2e4e1190..d7a6deb6b21e3d 100644 --- a/Documentation/devicetree/bindings/misc/bmp085.txt +++ b/Documentation/devicetree/bindings/misc/bmp085.txt @@ -8,6 +8,8 @@ Optional properties: - temp-measurement-period: temperature measurement period (milliseconds) - default-oversampling: default oversampling value to be used at startup, value range is 0-3 with rising sensitivity. +- interrupt-parent: should be the phandle for the interrupt controller +- interrupts: interrupt mapping for IRQ Example: @@ -17,4 +19,6 @@ pressure@77 { chip-id = <10>; temp-measurement-period = <100>; default-oversampling = <2>; + interrupt-parent = <&gpio0>; + interrupts = <25 IRQ_TYPE_EDGE_RISING>; }; diff --git a/Documentation/devicetree/bindings/phy/bcm-phy.txt b/Documentation/devicetree/bindings/phy/bcm-phy.txt new file mode 100644 index 00000000000000..3dc8b3d2ffbbee --- /dev/null +++ b/Documentation/devicetree/bindings/phy/bcm-phy.txt @@ -0,0 +1,15 @@ +BROADCOM KONA USB2 PHY + +Required properties: + - compatible: brcm,kona-usb2-phy + - reg: offset and length of the PHY registers + - #phy-cells: must be 0 +Refer to phy/phy-bindings.txt for the generic PHY binding properties + +Example: + + usbphy: usb-phy@3f130000 { + compatible = "brcm,kona-usb2-phy"; + reg = <0x3f130000 0x28>; + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/serial/atmel-usart.txt b/Documentation/devicetree/bindings/serial/atmel-usart.txt index 2191dcb9f1da98..9c5d19ac935c33 100644 --- a/Documentation/devicetree/bindings/serial/atmel-usart.txt +++ b/Documentation/devicetree/bindings/serial/atmel-usart.txt @@ -6,6 +6,9 @@ Required properties: additional mode or an USART new feature. - reg: Should contain registers location and length - interrupts: Should contain interrupt +- clock-names: tuple listing input clock names. + Required elements: "usart" +- clocks: phandles to input clocks. Optional properties: - atmel,use-dma-rx: use of PDC or DMA for receiving data @@ -26,6 +29,8 @@ Example: compatible = "atmel,at91sam9260-usart"; reg = <0xfff8c000 0x4000>; interrupts = <7>; + clocks = <&usart0_clk>; + clock-names = "usart"; atmel,use-dma-rx; atmel,use-dma-tx; }; @@ -35,6 +40,8 @@ Example: compatible = "atmel,at91sam9260-usart"; reg = <0xf001c000 0x100>; interrupts = <12 4 5>; + clocks = <&usart0_clk>; + clock-names = "usart"; atmel,use-dma-rx; atmel,use-dma-tx; dmas = <&dma0 2 0x3>, diff --git a/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt b/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt new file mode 100644 index 00000000000000..12f3cf834debaf --- /dev/null +++ b/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt @@ -0,0 +1,28 @@ +* Cirrus Logic CLPS711X Universal Asynchronous Receiver/Transmitter (UART) + +Required properties: +- compatible: Should be "cirrus,clps711x-uart". +- reg: Address and length of the register set for the device. +- interrupts: Should contain UART TX and RX interrupt. +- clocks: Should contain UART core clock number. +- syscon: Phandle to SYSCON node, which contain UART control bits. + +Optional properties: +- uart-use-ms: Indicate the UART has modem signal (DCD, DSR, CTS). + +Note: Each UART port should have an alias correctly numbered +in "aliases" node. + +Example: + aliases { + serial0 = &uart1; + }; + + uart1: uart@80000480 { + compatible = "cirrus,clps711x-uart"; + reg = <0x80000480 0x80>; + interrupts = <12 13>; + clocks = <&clks 11>; + syscon = <&syscon1>; + uart-use-ms; + }; diff --git a/Documentation/devicetree/bindings/staging/dwc2.txt b/Documentation/devicetree/bindings/staging/dwc2.txt deleted file mode 100644 index 1a1b7cfa4845bb..00000000000000 --- a/Documentation/devicetree/bindings/staging/dwc2.txt +++ /dev/null @@ -1,15 +0,0 @@ -Platform DesignWare HS OTG USB 2.0 controller ------------------------------------------------------ - -Required properties: -- compatible : "snps,dwc2" -- reg : Should contain 1 register range (address and length) -- interrupts : Should contain 1 interrupt - -Example: - - usb@101c0000 { - compatible = "ralink,rt3050-usb, snps,dwc2"; - reg = <0x101c0000 40000>; - interrupts = <18>; - }; diff --git a/Documentation/devicetree/bindings/staging/xillybus.txt b/Documentation/devicetree/bindings/staging/xillybus.txt new file mode 100644 index 00000000000000..9e316dc2e40fdf --- /dev/null +++ b/Documentation/devicetree/bindings/staging/xillybus.txt @@ -0,0 +1,20 @@ +* Xillybus driver for generic FPGA interface + +Required properties: +- compatible: Should be "xillybus,xillybus-1.00.a" +- reg: Address and length of the register set for the device +- interrupts: Contains one interrupt node, typically consisting of three cells. +- interrupt-parent: the phandle for the interrupt controller that + services interrupts for this device. + +Optional properties: +- dma-coherent: Present if DMA operations are coherent + +Example: + + xillybus@ff200400 { + compatible = "xillybus,xillybus-1.00.a"; + reg = < 0xff200400 0x00000080 >; + interrupts = < 0 40 1 >; + interrupt-parent = <&intc>; + } ; diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt new file mode 100644 index 00000000000000..7c26154b8bbb6a --- /dev/null +++ b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt @@ -0,0 +1,22 @@ +Allwinner SoCs High Speed Timer Controller + +Required properties: + +- compatible : should be "allwinner,sun5i-a13-hstimer" or + "allwinner,sun7i-a20-hstimer" +- reg : Specifies base physical address and size of the registers. +- interrupts : The interrupts of these timers (2 for the sun5i IP, 4 for the sun7i + one) +- clocks: phandle to the source clock (usually the AHB clock) + +Example: + +timer@01c60000 { + compatible = "allwinner,sun7i-a20-hstimer"; + reg = <0x01c60000 0x1000>; + interrupts = <0 51 1>, + <0 52 1>, + <0 53 1>, + <0 54 1>; + clocks = <&ahb1_gates 19>; +}; diff --git a/Documentation/devicetree/bindings/usb/ci13xxx-imx.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-imx.txt similarity index 100% rename from Documentation/devicetree/bindings/usb/ci13xxx-imx.txt rename to Documentation/devicetree/bindings/usb/ci-hdrc-imx.txt diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt new file mode 100644 index 00000000000000..b8b6871f116f39 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/dwc2.txt @@ -0,0 +1,29 @@ +Platform DesignWare HS OTG USB 2.0 controller +----------------------------------------------------- + +Required properties: +- compatible : One of: + - brcm,bcm2835-usb: The DWC2 USB controller instance in the BCM2835 SoC. + - snps,dwc2: A generic DWC2 USB controller with default parameters. +- reg : Should contain 1 register range (address and length) +- interrupts : Should contain 1 interrupt +- clocks: clock provider specifier +- clock-names: shall be "otg" +Refer to clk/clock-bindings.txt for generic clock consumer properties + +Optional properties: +- phys: phy provider specifier +- phy-names: shall be "device" +Refer to phy/phy-bindings.txt for generic phy consumer properties + +Example: + + usb@101c0000 { + compatible = "ralink,rt3050-usb, snps,dwc2"; + reg = <0x101c0000 40000>; + interrupts = <18>; + clocks = <&usb_otg_ahb_clk>; + clock-names = "otg"; + phys = <&usbphy>; + phy-names = "usb2-phy"; + }; diff --git a/Documentation/devicetree/bindings/usb/gr-udc.txt b/Documentation/devicetree/bindings/usb/gr-udc.txt new file mode 100644 index 00000000000000..0c5118f7a916b7 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/gr-udc.txt @@ -0,0 +1,28 @@ +USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC. + +The GRUSBDC USB Device Controller core is available in the GRLIB VHDL +IP core library. + +Note: In the ordinary environment for the core, a Leon SPARC system, +these properties are built from information in the AMBA plug&play. + +Required properties: + +- name : Should be "GAISLER_USBDC" or "01_021" + +- reg : Address and length of the register set for the device + +- interrupts : Interrupt numbers for this device + +Optional properties: + +- epobufsizes : An array of buffer sizes for OUT endpoints. If the property is + not present, or for endpoints outside of the array, 1024 is assumed by + the driver. + +- epibufsizes : An array of buffer sizes for IN endpoints. If the property is + not present, or for endpoints outside of the array, 1024 is assumed by + the driver. + +For further information look in the documentation for the GLIB IP core library: +http://www.gaisler.com/products/grlib/grip.pdf diff --git a/Documentation/devicetree/bindings/usb/omap-usb.txt b/Documentation/devicetree/bindings/usb/omap-usb.txt index 090e5e22bd2b83..c495135115cb7a 100644 --- a/Documentation/devicetree/bindings/usb/omap-usb.txt +++ b/Documentation/devicetree/bindings/usb/omap-usb.txt @@ -87,6 +87,8 @@ Required properties: e.g. USB3 PHY and SATA PHY on OMAP5. "ti,control-phy-dra7usb2" - if it has power down register like USB2 PHY on DRA7 platform. + "ti,control-phy-am437usb2" - if it has power down register like USB2 PHY on + AM437 platform. - reg : Address and length of the register set for the device. It contains the address of "otghs_control" for control-phy-otghs or "power" register for other types. diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index edbb8d88c85e90..f29cd78b6698ab 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -9,6 +9,7 @@ aeroflexgaisler Aeroflex Gaisler AB ak Asahi Kasei Corp. altr Altera Corp. amcc Applied Micro Circuits Corporation (APM, formally AMCC) +amstaos AMS-Taos Inc. apm Applied Micro Circuits Corporation (APM) arm ARM Ltd. atmel Atmel Corporation diff --git a/Documentation/driver-model/design-patterns.txt b/Documentation/driver-model/design-patterns.txt new file mode 100644 index 00000000000000..ba7b2df6490469 --- /dev/null +++ b/Documentation/driver-model/design-patterns.txt @@ -0,0 +1,116 @@ + +Device Driver Design Patterns +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This document describes a few common design patterns found in device drivers. +It is likely that subsystem maintainers will ask driver developers to +conform to these design patterns. + +1. State Container +2. container_of() + + +1. State Container +~~~~~~~~~~~~~~~~~~ + +While the kernel contains a few device drivers that assume that they will +only be probed() once on a certain system (singletons), it is custom to assume +that the device the driver binds to will appear in several instances. This +means that the probe() function and all callbacks need to be reentrant. + +The most common way to achieve this is to use the state container design +pattern. It usually has this form: + +struct foo { + spinlock_t lock; /* Example member */ + (...) +}; + +static int foo_probe(...) +{ + struct foo *foo; + + foo = devm_kzalloc(dev, sizeof(*foo), GFP_KERNEL); + if (!foo) + return -ENOMEM; + spin_lock_init(&foo->lock); + (...) +} + +This will create an instance of struct foo in memory every time probe() is +called. This is our state container for this instance of the device driver. +Of course it is then necessary to always pass this instance of the +state around to all functions that need access to the state and its members. + +For example, if the driver is registering an interrupt handler, you would +pass around a pointer to struct foo like this: + +static irqreturn_t foo_handler(int irq, void *arg) +{ + struct foo *foo = arg; + (...) +} + +static int foo_probe(...) +{ + struct foo *foo; + + (...) + ret = request_irq(irq, foo_handler, 0, "foo", foo); +} + +This way you always get a pointer back to the correct instance of foo in +your interrupt handler. + + +2. container_of() +~~~~~~~~~~~~~~~~~ + +Continuing on the above example we add an offloaded work: + +struct foo { + spinlock_t lock; + struct workqueue_struct *wq; + struct work_struct offload; + (...) +}; + +static void foo_work(struct work_struct *work) +{ + struct foo *foo = container_of(work, struct foo, offload); + + (...) +} + +static irqreturn_t foo_handler(int irq, void *arg) +{ + struct foo *foo = arg; + + queue_work(foo->wq, &foo->offload); + (...) +} + +static int foo_probe(...) +{ + struct foo *foo; + + foo->wq = create_singlethread_workqueue("foo-wq"); + INIT_WORK(&foo->offload, foo_work); + (...) +} + +The design pattern is the same for an hrtimer or something similar that will +return a single argument which is a pointer to a struct member in the +callback. + +container_of() is a macro defined in + +What container_of() does is to obtain a pointer to the containing struct from +a pointer to a member by a simple subtraction using the offsetof() macro from +standard C, which allows something similar to object oriented behaviours. +Notice that the contained member must not be a pointer, but an actual member +for this to work. + +We can see here that we avoid having global pointers to our struct foo * +instance this way, while still keeping the number of parameters passed to the +work function to a single pointer. diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 5bdc8cb5fc2855..4f7897e99cba8a 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -242,6 +242,8 @@ IIO devm_iio_device_free() devm_iio_trigger_alloc() devm_iio_trigger_free() + devm_iio_device_register() + devm_iio_device_unregister() IO region devm_request_region() diff --git a/Documentation/extcon/porting-android-switch-class b/Documentation/extcon/porting-android-switch-class index 5377f63179613e..49c81caef84dc4 100644 --- a/Documentation/extcon/porting-android-switch-class +++ b/Documentation/extcon/porting-android-switch-class @@ -50,7 +50,7 @@ so that they are still compatible with legacy userspace processes. Extcon's extended features for switch device drivers with complex features usually required magic numbers in state value of switch_dev. With extcon, such magic numbers that - support multiple cables ( + support multiple cables are no more required or supported. 1. Define cable names at edev->supported_cable. 2. (Recommended) remove print_state callback. @@ -114,11 +114,8 @@ exclusive, the two cables cannot be in ATTACHED state simulteneously. ****** ABI Location - If "CONFIG_ANDROID" is enabled and "CONFIG_ANDROID_SWITCH" is -disabled, /sys/class/switch/* are created as symbolic links to -/sys/class/extcon/*. Because CONFIG_ANDROID_SWITCH creates -/sys/class/switch directory, we disable symboling linking if -CONFIG_ANDROID_SWITCH is enabled. + If "CONFIG_ANDROID" is enabled, /sys/class/switch/* are created +as symbolic links to /sys/class/extcon/*. The two files of switch class, name and state, are provided with extcon, too. When the multistate support (STEP 2 of CHAPTER 1.) is diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO index 8148a47fc70e17..0091a8215ac150 100644 --- a/Documentation/ja_JP/HOWTO +++ b/Documentation/ja_JP/HOWTO @@ -149,7 +149,7 @@ linux-api@ver.kernel.org に送ることを勧めます。 この他にパッチを作る方法についてのよくできた記述は- "The Perfect Patch" - http://userweb.kernel.org/~akpm/stuff/tpp.txt + http://www.ozlabs.org/~akpm/stuff/tpp.txt "Linux kernel patch submission format" http://linux.yyz.us/patch-format.html @@ -622,7 +622,7 @@ Linux カーネルコミュニティは、一度に大量のコードの塊を これについて全てがどのようにあるべきかについての詳細は、以下のドキュメ ントの ChangeLog セクションを見てください- "The Perfect Patch" - http://userweb.kernel.org/~akpm/stuff/tpp.txt + http://www.ozlabs.org/~akpm/stuff/tpp.txt これらのどれもが、時にはとても困難です。これらの慣例を完璧に実施するに は数年かかるかもしれません。これは継続的な改善のプロセスであり、そのた diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 50680a59a2ff9a..4252af6ffda18d 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -774,6 +774,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. disable= [IPV6] See Documentation/networking/ipv6.txt. + disable_cpu_apicid= [X86,APIC,SMP] + Format: + The number of initial APIC ID for the + corresponding CPU to be disabled at boot, + mostly used for the kdump 2nd kernel to + disable BSP to wake up multiple CPUs without + causing system reset or hang due to sending + INIT from AP to BSP. + disable_ddw [PPC/PSERIES] Disable Dynamic DMA Window support. Use this if to workaround buggy firmware. @@ -881,6 +890,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted. The xen output can only be used by Xen PV guests. + edac_report= [HW,EDAC] Control how to report EDAC event + Format: {"on" | "off" | "force"} + on: enable EDAC to report H/W event. May be overridden + by other higher priority error reporting module. + off: disable H/W event reporting through EDAC. + force: enforce the use of EDAC to report H/W event. + default: on. + ekgdboc= [X86,KGDB] Allow early kernel console debugging ekgdboc=kbd @@ -890,6 +907,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. edd= [EDD] Format: {"off" | "on" | "skip[mbr]"} + efi= [EFI] + Format: { "old_map" } + old_map [X86-64]: switch to the old ioremap-based EFI + runtime services mapping. 32-bit still uses this one by + default. + efi_no_storage_paranoia [EFI; X86] Using this parameter you can use more than 50% of your efi variable storage. Use this parameter only if @@ -1529,6 +1552,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. * atapi_dmadir: Enable ATAPI DMADIR bridge support + * disable: Disable this device. + If there are multiple matching configurations changing the same attribute, the last one is used. @@ -1992,6 +2017,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. noapic [SMP,APIC] Tells the kernel to not make use of any IOAPICs that may be present in the system. + nokaslr [X86] + Disable kernel base offset ASLR (Address Space + Layout Randomization) if built into the kernel. + noautogroup Disable scheduler automatic task group creation. nobats [PPC] Do not use BATs for mapping kernel lowmem @@ -2625,7 +2654,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. for RCU-preempt, and "s" for RCU-sched, and "N" is the CPU number. This reduces OS jitter on the offloaded CPUs, which can be useful for HPC and - real-time workloads. It can also improve energy efficiency for asymmetric multiprocessors. @@ -2641,8 +2669,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. periodically wake up to do the polling. rcutree.blimit= [KNL] - Set maximum number of finished RCU callbacks to process - in one batch. + Set maximum number of finished RCU callbacks to + process in one batch. rcutree.rcu_fanout_leaf= [KNL] Increase the number of CPUs assigned to each @@ -2661,8 +2689,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. value is one, and maximum value is HZ. rcutree.qhimark= [KNL] - Set threshold of queued - RCU callbacks over which batch limiting is disabled. + Set threshold of queued RCU callbacks beyond which + batch limiting is disabled. rcutree.qlowmark= [KNL] Set threshold of queued RCU callbacks below which diff --git a/Documentation/kmsg/s390/zcrypt b/Documentation/kmsg/s390/zcrypt new file mode 100644 index 00000000000000..7fb2087409d6d1 --- /dev/null +++ b/Documentation/kmsg/s390/zcrypt @@ -0,0 +1,20 @@ +/*? + * Text: "Cryptographic device %x failed and was set offline\n" + * Severity: Error + * Parameter: + * @1: device index + * Description: + * A cryptographic device failed to process a cryptographic request. + * The cryptographic device driver could not correct the error and + * set the device offline. The application that issued the + * request received an indication that the request has failed. + * User action: + * Use the lszcrypt command to confirm that the cryptographic + * hardware is still configured to your LPAR or z/VM guest virtual + * machine. If the device is available to your Linux instance the + * command output contains a line that begins with 'card', + * where is the two-digit decimal number in the message text. + * After ensuring that the device is available, use the chzcrypt command to + * set it online again. + * If the error persists, contact your support organization. + */ diff --git a/Documentation/ko_KR/HOWTO b/Documentation/ko_KR/HOWTO index 680e64635958a1..dc2ff8f611e021 100644 --- a/Documentation/ko_KR/HOWTO +++ b/Documentation/ko_KR/HOWTO @@ -122,7 +122,7 @@ mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다. 올바른 패치들을 만드는 법에 관한 훌륭한 다른 문서들이 있다. "The Perfect Patch" - http://userweb.kernel.org/~akpm/stuff/tpp.txt + http://www.ozlabs.org/~akpm/stuff/tpp.txt "Linux kernel patch submission format" http://linux.yyz.us/patch-format.html @@ -213,7 +213,7 @@ Documentation/DocBook/ 디렉토리 내에서 만들어지며 PDF, Postscript, H 것은 Linux Cross-Reference project이며 그것은 자기 참조 방식이며 소스코드를 인덱스된 웹 페이지들의 형태로 보여준다. 최신의 멋진 커널 코드 저장소는 다음을 통하여 참조할 수 있다. - http://users.sosdg.org/~qiyong/lxr/ + http://lxr.linux.no/+trees 개발 프로세스 @@ -222,20 +222,20 @@ Documentation/DocBook/ 디렉토리 내에서 만들어지며 PDF, Postscript, H 리눅스 커널 개발 프로세스는 현재 몇몇 다른 메인 커널 "브랜치들"과 서브시스템에 특화된 커널 브랜치들로 구성된다. 몇몇 다른 메인 브랜치들은 다음과 같다. - - main 2.6.x 커널 트리 - - 2.6.x.y - 안정된 커널 트리 - - 2.6.x -git 커널 패치들 - - 2.6.x -mm 커널 패치들 + - main 3.x 커널 트리 + - 3.x.y - 안정된 커널 트리 + - 3.x -git 커널 패치들 - 서브시스템을 위한 커널 트리들과 패치들 + - 3.x - 통합 테스트를 위한 next 커널 트리 -2.6.x 커널 트리 +3.x 커널 트리 --------------- -2.6.x 커널들은 Linux Torvalds가 관리하며 kernel.org의 pub/linux/kernel/v2.6/ +3.x 커널들은 Linux Torvalds가 관리하며 kernel.org의 pub/linux/kernel/v3.x/ 디렉토리에서 참조될 수 있다.개발 프로세스는 다음과 같다. - 새로운 커널이 배포되자마자 2주의 시간이 주어진다. 이 기간동은 메인테이너들은 큰 diff들을 Linus에게 제출할 수 있다. 대개 이 패치들은 - 몇 주 동안 -mm 커널내에 이미 있었던 것들이다. 큰 변경들을 제출하는 데 + 몇 주 동안 -next 커널내에 이미 있었던 것들이다. 큰 변경들을 제출하는 데 선호되는 방법은 git(커널의 소스 관리 툴, 더 많은 정보들은 http://git.or.cz/ 에서 참조할 수 있다)를 사용하는 것이지만 순수한 패치파일의 형식으로 보내는 것도 무관하다. @@ -262,20 +262,20 @@ Andrew Morton의 글이 있다. 버그의 상황에 따라 배포되는 것이지 미리정해 놓은 시간에 따라 배포되는 것은 아니기 때문이다." -2.6.x.y - 안정 커널 트리 +3.x.y - 안정 커널 트리 ------------------------ -4 자리 숫자로 이루어진 버젼의 커널들은 -stable 커널들이다. 그것들은 2.6.x +3 자리 숫자로 이루어진 버젼의 커널들은 -stable 커널들이다. 그것들은 3.x 커널에서 발견된 큰 회귀들이나 보안 문제들 중 비교적 작고 중요한 수정들을 포함한다. 이것은 가장 최근의 안정적인 커널을 원하는 사용자에게 추천되는 브랜치이며, 개발/실험적 버젼을 테스트하는 것을 돕고자 하는 사용자들과는 별로 관련이 없다. -어떤 2.6.x.y 커널도 사용할 수 없다면 그때는 가장 높은 숫자의 2.6.x +어떤 3.x.y 커널도 사용할 수 없다면 그때는 가장 높은 숫자의 3.x 커널이 현재의 안정 커널이다. -2.6.x.y는 "stable" 팀에 의해 관리되며 거의 매번 격주로 +3.x.y는 "stable" 팀에 의해 관리되며 거의 매번 격주로 배포된다. 커널 트리 문서들 내에 Documentation/stable_kernel_rules.txt 파일은 어떤 @@ -283,84 +283,46 @@ Andrew Morton의 글이 있다. 진행되는지를 설명한다. -2.6.x -git 패치들 +3.x -git 패치들 ------------------ git 저장소(그러므로 -git이라는 이름이 붙음)에는 날마다 관리되는 Linus의 커널 트리의 snapshot 들이 있다. 이 패치들은 일반적으로 날마다 배포되며 Linus의 트리의 현재 상태를 나타낸다. 이 패치들은 정상적인지 조금도 살펴보지 않고 자동적으로 생성된 것이므로 -rc 커널들 보다도 더 실험적이다. -2.6.x -mm 커널 패치들 ---------------------- -Andrew Morton에 의해 배포된 실험적인 커널 패치들이다. Andrew는 모든 다른 -서브시스템 커널 트리와 패치들을 가져와서 리눅스 커널 메일링 리스트로 -온 많은 패치들과 한데 묶는다. 이 트리는 새로운 기능들과 패치들을 위한 -장소를 제공하는 역할을 한다. 하나의 패치가 -mm에 한동안 있으면서 그 가치가 -증명되게 되면 Andrew나 서브시스템 메인테이너는 그것을 메인라인에 포함시키기 -위하여 Linus에게 보낸다. - -커널 트리에 포함하고 싶은 모든 새로운 패치들은 Linus에게 보내지기 전에 --mm 트리에서 테스트를 하는 것을 적극 추천한다. - -이 커널들은 안정되게 사용할 시스템에서에 실행하는 것은 적합하지 않으며 -다른 브랜치들의 어떤 것들보다 위험하다. - -여러분이 커널 개발 프로세스를 돕길 원한다면 이 커널 배포들을 사용하고 -테스트한 후 어떤 문제를 발견하거나 또는 모든 것이 잘 동작한다면 리눅스 -커널 메일링 리스트로 피드백을 해달라. - -이 커널들은 일반적으로 모든 다른 실험적인 패치들과 배포될 당시의 -사용가능한 메인라인 -git 커널들의 몇몇 변경을 포함한다. - --mm 커널들은 정해진 일정대로 배포되지 않는다. 하지만 대개 몇몇 -mm 커널들은 -각 -rc 커널(1부터 3이 흔함) 사이에서 배포된다. - 서브시스템 커널 트리들과 패치들 ------------------------------- -많은 다른 커널 서브시스템 개발자들은 커널의 다른 부분들에서 무슨 일이 -일어나고 있는지를 볼수 있도록 그들의 개발 트리를 공개한다. 이 트리들은 -위에서 설명하였던 것 처럼 -mm 커널 배포들로 합쳐진다. - -다음은 활용가능한 커널 트리들을 나열한다. - git trees: - - Kbuild development tree, Sam Ravnborg < sam@ravnborg.org> - git.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild.git - - - ACPI development tree, Len Brown - git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git - - - Block development tree, Jens Axboe - git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git - - - DRM development tree, Dave Airlie - git.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6.git - - - ia64 development tree, Tony Luck < tony.luck@intel.com> - git.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git - - - infiniband, Roland Dreier - git.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git - - - libata, Jeff Garzik - git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git - - - network drivers, Jeff Garzik - git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git - - - pcmcia, Dominik Brodowski < linux@dominikbrodowski.net> - git.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git - - - SCSI, James Bottomley < James.Bottomley@SteelEye.com> - git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git - - quilt trees: - - USB, PCI, Driver Core, and I2C, Greg Kroah-Hartman < gregkh@linuxfoundation.org> - kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ - - x86-64, partly i386, Andi Kleen < ak@suse.de> - ftp.firstfloor.org:/pub/ak/x86_64/quilt/ - - 다른 커널 트리들은 http://kernel.org/git와 MAINTAINERS 파일에서 참조할 수 - 있다. +다양한 커널 서브시스템의 메인테이너들 --- 그리고 많은 커널 서브시스템 개발자들 +--- 은 그들의 현재 개발 상태를 소스 저장소로 노출한다. 이를 통해 다른 사람들도 +커널의 다른 영역에 어떤 변화가 이루어지고 있는지 알 수 있다. 급속히 개발이 +진행되는 영역이 있고 그렇지 않은 영역이 있으므로, 개발자는 다른 개발자가 제출한 +수정 사항과 자신의 수정사항의 충돌이나 동일한 일을 동시에 두사람이 따로 +진행하는 사태를 방지하기 위해 급속히 개발이 진행되고 있는 영역에 작업의 +베이스를 맞춰줄 것이 요구된다. + +대부분의 이러한 저장소는 git 트리지만, git이 아닌 SCM으로 관리되거나, quilt +시리즈로 제공되는 패치들도 존재한다. 이러한 서브시스템 저장소들은 MAINTAINERS +파일에 나열되어 있다. 대부분은 http://git.kernel.org 에서 볼 수 있다. + +제안된 패치는 서브시스템 트리에 커밋되기 전에 메일링 리스트를 통해 +리뷰된다(아래의 관련 섹션을 참고하기 바란다). 일부 커널 서브시스템의 경우, 이 +리뷰 프로세스는 patchwork라는 도구를 통해 추적된다. patchwork은 등록된 패치와 +패치에 대한 코멘트, 패치의 버전을 볼 수 있는 웹 인터페이스를 제공하고, +메인테이너는 패치를 리뷰 중, 리뷰 통과, 또는 반려됨으로 표시할 수 있다. +대부분의 이러한 patchwork 사이트는 http://patchwork.kernel.org/ 또는 +http://patchwork.ozlabs.org/ 에 나열되어 있다. + +3.x - 통합 테스트를 위한 next 커널 트리 +----------------------------------------- +서브시스템 트리들의 변경사항들은 mainline 3.x 트리로 들어오기 전에 통합 +테스트를 거쳐야 한다. 이런 목적으로, 모든 서브시스템 트리의 변경사항을 거의 +매일 받아가는 특수한 테스트 저장소가 존재한다: + http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git + http://linux.f-seidel.de/linux-next/pmwiki/ + +이런 식으로, -next 커널을 통해 다음 머지 기간에 메인라인 커널에 어떤 변경이 +가해질 것인지 간략히 알 수 있다. 모험심 강한 테스터라면 -next 커널에서 테스트를 +수행하는 것도 좋을 것이다. 버그 보고 --------- @@ -597,7 +559,7 @@ Pat이라는 이름을 가진 여자가 있을 수도 있는 것이다. 리눅 이것이 무엇인지 더 자세한 것을 알고 싶다면 다음 문서의 ChageLog 항을 봐라. "The Perfect Patch" - http://userweb.kernel.org/~akpm/stuff/tpp.txt + http://www.ozlabs.org/~akpm/stuff/tpp.txt diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt index c5182bb2c16c3c..f87241dfed8765 100644 --- a/Documentation/kobject.txt +++ b/Documentation/kobject.txt @@ -342,7 +342,10 @@ kset use: When you are finished with the kset, call: void kset_unregister(struct kset *kset); -to destroy it. +to destroy it. This removes the kset from sysfs and decrements its reference +count. When the reference count goes to zero, the kset will be released. +Because other references to the kset may still exist, the release may happen +after kset_unregister() returns. An example of using a kset can be seen in the samples/kobject/kset-example.c file in the kernel tree. diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index c8c42e64e953b4..102dc19c411980 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -194,18 +194,22 @@ There are some minimal guarantees that may be expected of a CPU: (*) On any given CPU, dependent memory accesses will be issued in order, with respect to itself. This means that for: - Q = P; D = *Q; + ACCESS_ONCE(Q) = P; smp_read_barrier_depends(); D = ACCESS_ONCE(*Q); the CPU will issue the following memory operations: Q = LOAD P, D = LOAD *Q - and always in that order. + and always in that order. On most systems, smp_read_barrier_depends() + does nothing, but it is required for DEC Alpha. The ACCESS_ONCE() + is required to prevent compiler mischief. Please note that you + should normally use something like rcu_dereference() instead of + open-coding smp_read_barrier_depends(). (*) Overlapping loads and stores within a particular CPU will appear to be ordered within that CPU. This means that for: - a = *X; *X = b; + a = ACCESS_ONCE(*X); ACCESS_ONCE(*X) = b; the CPU will only issue the following sequence of memory operations: @@ -213,7 +217,7 @@ There are some minimal guarantees that may be expected of a CPU: And for: - *X = c; d = *X; + ACCESS_ONCE(*X) = c; d = ACCESS_ONCE(*X); the CPU will only issue: @@ -224,6 +228,12 @@ There are some minimal guarantees that may be expected of a CPU: And there are a number of things that _must_ or _must_not_ be assumed: + (*) It _must_not_ be assumed that the compiler will do what you want with + memory references that are not protected by ACCESS_ONCE(). Without + ACCESS_ONCE(), the compiler is within its rights to do all sorts + of "creative" transformations, which are covered in the Compiler + Barrier section. + (*) It _must_not_ be assumed that independent loads and stores will be issued in the order given. This means that for: @@ -371,33 +381,44 @@ Memory barriers come in four basic varieties: And a couple of implicit varieties: - (5) LOCK operations. + (5) ACQUIRE operations. This acts as a one-way permeable barrier. It guarantees that all memory - operations after the LOCK operation will appear to happen after the LOCK - operation with respect to the other components of the system. + operations after the ACQUIRE operation will appear to happen after the + ACQUIRE operation with respect to the other components of the system. + ACQUIRE operations include LOCK operations and smp_load_acquire() + operations. - Memory operations that occur before a LOCK operation may appear to happen - after it completes. + Memory operations that occur before an ACQUIRE operation may appear to + happen after it completes. - A LOCK operation should almost always be paired with an UNLOCK operation. + An ACQUIRE operation should almost always be paired with a RELEASE + operation. - (6) UNLOCK operations. + (6) RELEASE operations. This also acts as a one-way permeable barrier. It guarantees that all - memory operations before the UNLOCK operation will appear to happen before - the UNLOCK operation with respect to the other components of the system. + memory operations before the RELEASE operation will appear to happen + before the RELEASE operation with respect to the other components of the + system. RELEASE operations include UNLOCK operations and + smp_store_release() operations. - Memory operations that occur after an UNLOCK operation may appear to + Memory operations that occur after a RELEASE operation may appear to happen before it completes. - LOCK and UNLOCK operations are guaranteed to appear with respect to each - other strictly in the order specified. + The use of ACQUIRE and RELEASE operations generally precludes the need + for other sorts of memory barrier (but note the exceptions mentioned in + the subsection "MMIO write barrier"). In addition, a RELEASE+ACQUIRE + pair is -not- guaranteed to act as a full memory barrier. However, after + an ACQUIRE on a given variable, all memory accesses preceding any prior + RELEASE on that same variable are guaranteed to be visible. In other + words, within a given variable's critical section, all accesses of all + previous critical sections for that variable are guaranteed to have + completed. - The use of LOCK and UNLOCK operations generally precludes the need for - other sorts of memory barrier (but note the exceptions mentioned in the - subsection "MMIO write barrier"). + This means that ACQUIRE acts as a minimal "acquire" operation and + RELEASE acts as a minimal "release" operation. Memory barriers are only required where there's a possibility of interaction @@ -450,14 +471,14 @@ The usage requirements of data dependency barriers are a little subtle, and it's not always obvious that they're needed. To illustrate, consider the following sequence of events: - CPU 1 CPU 2 - =============== =============== + CPU 1 CPU 2 + =============== =============== { A == 1, B == 2, C = 3, P == &A, Q == &C } B = 4; - P = &B - Q = P; - D = *Q; + ACCESS_ONCE(P) = &B + Q = ACCESS_ONCE(P); + D = *Q; There's a clear data dependency here, and it would seem that by the end of the sequence, Q must be either &A or &B, and that: @@ -477,15 +498,15 @@ Alpha). To deal with this, a data dependency barrier or better must be inserted between the address load and the data load: - CPU 1 CPU 2 - =============== =============== + CPU 1 CPU 2 + =============== =============== { A == 1, B == 2, C = 3, P == &A, Q == &C } B = 4; - P = &B - Q = P; - - D = *Q; + ACCESS_ONCE(P) = &B + Q = ACCESS_ONCE(P); + + D = *Q; This enforces the occurrence of one of the two implications, and prevents the third possibility from arising. @@ -500,25 +521,26 @@ odd-numbered bank is idle, one can see the new value of the pointer P (&B), but the old value of the variable B (2). -Another example of where data dependency barriers might by required is where a +Another example of where data dependency barriers might be required is where a number is read from memory and then used to calculate the index for an array access: - CPU 1 CPU 2 - =============== =============== + CPU 1 CPU 2 + =============== =============== { M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 } M[1] = 4; - P = 1 - Q = P; - - D = M[Q]; + ACCESS_ONCE(P) = 1 + Q = ACCESS_ONCE(P); + + D = M[Q]; -The data dependency barrier is very important to the RCU system, for example. -See rcu_dereference() in include/linux/rcupdate.h. This permits the current -target of an RCU'd pointer to be replaced with a new modified target, without -the replacement target appearing to be incompletely initialised. +The data dependency barrier is very important to the RCU system, +for example. See rcu_assign_pointer() and rcu_dereference() in +include/linux/rcupdate.h. This permits the current target of an RCU'd +pointer to be replaced with a new modified target, without the replacement +target appearing to be incompletely initialised. See also the subsection on "Cache Coherency" for a more thorough example. @@ -530,24 +552,190 @@ A control dependency requires a full read memory barrier, not simply a data dependency barrier to make it work correctly. Consider the following bit of code: - q = &a; - if (p) { - - q = &b; + q = ACCESS_ONCE(a); + if (q) { + /* BUG: No data dependency!!! */ + p = ACCESS_ONCE(b); } - x = *q; This will not have the desired effect because there is no actual data -dependency, but rather a control dependency that the CPU may short-circuit by -attempting to predict the outcome in advance. In such a case what's actually -required is: +dependency, but rather a control dependency that the CPU may short-circuit +by attempting to predict the outcome in advance, so that other CPUs see +the load from b as having happened before the load from a. In such a +case what's actually required is: - q = &a; - if (p) { + q = ACCESS_ONCE(a); + if (q) { - q = &b; + p = ACCESS_ONCE(b); + } + +However, stores are not speculated. This means that ordering -is- provided +in the following example: + + q = ACCESS_ONCE(a); + if (ACCESS_ONCE(q)) { + ACCESS_ONCE(b) = p; + } + +Please note that ACCESS_ONCE() is not optional! Without the ACCESS_ONCE(), +the compiler is within its rights to transform this example: + + q = a; + if (q) { + b = p; /* BUG: Compiler can reorder!!! */ + do_something(); + } else { + b = p; /* BUG: Compiler can reorder!!! */ + do_something_else(); + } + +into this, which of course defeats the ordering: + + b = p; + q = a; + if (q) + do_something(); + else + do_something_else(); + +Worse yet, if the compiler is able to prove (say) that the value of +variable 'a' is always non-zero, it would be well within its rights +to optimize the original example by eliminating the "if" statement +as follows: + + q = a; + b = p; /* BUG: Compiler can reorder!!! */ + do_something(); + +The solution is again ACCESS_ONCE(), which preserves the ordering between +the load from variable 'a' and the store to variable 'b': + + q = ACCESS_ONCE(a); + if (q) { + ACCESS_ONCE(b) = p; + do_something(); + } else { + ACCESS_ONCE(b) = p; + do_something_else(); + } + +You could also use barrier() to prevent the compiler from moving +the stores to variable 'b', but barrier() would not prevent the +compiler from proving to itself that a==1 always, so ACCESS_ONCE() +is also needed. + +It is important to note that control dependencies absolutely require a +a conditional. For example, the following "optimized" version of +the above example breaks ordering: + + q = ACCESS_ONCE(a); + ACCESS_ONCE(b) = p; /* BUG: No ordering vs. load from a!!! */ + if (q) { + /* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */ + do_something(); + } else { + /* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */ + do_something_else(); } - x = *q; + +It is of course legal for the prior load to be part of the conditional, +for example, as follows: + + if (ACCESS_ONCE(a) > 0) { + ACCESS_ONCE(b) = q / 2; + do_something(); + } else { + ACCESS_ONCE(b) = q / 3; + do_something_else(); + } + +This will again ensure that the load from variable 'a' is ordered before the +stores to variable 'b'. + +In addition, you need to be careful what you do with the local variable 'q', +otherwise the compiler might be able to guess the value and again remove +the needed conditional. For example: + + q = ACCESS_ONCE(a); + if (q % MAX) { + ACCESS_ONCE(b) = p; + do_something(); + } else { + ACCESS_ONCE(b) = p; + do_something_else(); + } + +If MAX is defined to be 1, then the compiler knows that (q % MAX) is +equal to zero, in which case the compiler is within its rights to +transform the above code into the following: + + q = ACCESS_ONCE(a); + ACCESS_ONCE(b) = p; + do_something_else(); + +This transformation loses the ordering between the load from variable 'a' +and the store to variable 'b'. If you are relying on this ordering, you +should do something like the following: + + q = ACCESS_ONCE(a); + BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */ + if (q % MAX) { + ACCESS_ONCE(b) = p; + do_something(); + } else { + ACCESS_ONCE(b) = p; + do_something_else(); + } + +Finally, control dependencies do -not- provide transitivity. This is +demonstrated by two related examples: + + CPU 0 CPU 1 + ===================== ===================== + r1 = ACCESS_ONCE(x); r2 = ACCESS_ONCE(y); + if (r1 >= 0) if (r2 >= 0) + ACCESS_ONCE(y) = 1; ACCESS_ONCE(x) = 1; + + assert(!(r1 == 1 && r2 == 1)); + +The above two-CPU example will never trigger the assert(). However, +if control dependencies guaranteed transitivity (which they do not), +then adding the following two CPUs would guarantee a related assertion: + + CPU 2 CPU 3 + ===================== ===================== + ACCESS_ONCE(x) = 2; ACCESS_ONCE(y) = 2; + + assert(!(r1 == 2 && r2 == 2 && x == 1 && y == 1)); /* FAILS!!! */ + +But because control dependencies do -not- provide transitivity, the +above assertion can fail after the combined four-CPU example completes. +If you need the four-CPU example to provide ordering, you will need +smp_mb() between the loads and stores in the CPU 0 and CPU 1 code fragments. + +In summary: + + (*) Control dependencies can order prior loads against later stores. + However, they do -not- guarantee any other sort of ordering: + Not prior loads against later loads, nor prior stores against + later anything. If you need these other forms of ordering, + use smb_rmb(), smp_wmb(), or, in the case of prior stores and + later loads, smp_mb(). + + (*) Control dependencies require at least one run-time conditional + between the prior load and the subsequent store. If the compiler + is able to optimize the conditional away, it will have also + optimized away the ordering. Careful use of ACCESS_ONCE() can + help to preserve the needed conditional. + + (*) Control dependencies require that the compiler avoid reordering the + dependency into nonexistence. Careful use of ACCESS_ONCE() or + barrier() can help to preserve your control dependency. Please + see the Compiler Barrier section for more information. + + (*) Control dependencies do -not- provide transitivity. If you + need transitivity, use smp_mb(). SMP BARRIER PAIRING @@ -561,23 +749,23 @@ barrier, though a general barrier would also be viable. Similarly a read barrier or a data dependency barrier should always be paired with at least an write barrier, though, again, a general barrier is viable: - CPU 1 CPU 2 - =============== =============== - a = 1; + CPU 1 CPU 2 + =============== =============== + ACCESS_ONCE(a) = 1; - b = 2; x = b; - - y = a; + ACCESS_ONCE(b) = 2; x = ACCESS_ONCE(b); + + y = ACCESS_ONCE(a); Or: - CPU 1 CPU 2 - =============== =============================== + CPU 1 CPU 2 + =============== =============================== a = 1; - b = &a; x = b; - - y = *x; + ACCESS_ONCE(b) = &a; x = ACCESS_ONCE(b); + + y = *x; Basically, the read barrier always has to be there, even though it can be of the "weaker" type. @@ -586,13 +774,13 @@ the "weaker" type. match the loads after the read barrier or the data dependency barrier, and vice versa: - CPU 1 CPU 2 - =============== =============== - a = 1; }---- --->{ v = c - b = 2; } \ / { w = d - \ - c = 3; } / \ { x = a; - d = 4; }---- --->{ y = b; + CPU 1 CPU 2 + =================== =================== + ACCESS_ONCE(a) = 1; }---- --->{ v = ACCESS_ONCE(c); + ACCESS_ONCE(b) = 2; } \ / { w = ACCESS_ONCE(d); + \ + ACCESS_ONCE(c) = 3; } / \ { x = ACCESS_ONCE(a); + ACCESS_ONCE(d) = 4; }---- --->{ y = ACCESS_ONCE(b); EXAMPLES OF MEMORY BARRIER SEQUENCES @@ -882,12 +1070,12 @@ cache it for later use. Consider: - CPU 1 CPU 2 + CPU 1 CPU 2 ======================= ======================= - LOAD B - DIVIDE } Divide instructions generally - DIVIDE } take a long time to perform - LOAD A + LOAD B + DIVIDE } Divide instructions generally + DIVIDE } take a long time to perform + LOAD A Which might appear as this: @@ -910,13 +1098,13 @@ Which might appear as this: Placing a read barrier or a data dependency barrier just before the second load: - CPU 1 CPU 2 + CPU 1 CPU 2 ======================= ======================= - LOAD B - DIVIDE - DIVIDE + LOAD B + DIVIDE + DIVIDE - LOAD A + LOAD A will force any value speculatively obtained to be reconsidered to an extent dependent on the type of barrier used. If there was no change made to the @@ -1042,10 +1230,277 @@ compiler from moving the memory accesses either side of it to the other side: barrier(); -This is a general barrier - lesser varieties of compiler barrier do not exist. +This is a general barrier -- there are no read-read or write-write variants +of barrier(). However, ACCESS_ONCE() can be thought of as a weak form +for barrier() that affects only the specific accesses flagged by the +ACCESS_ONCE(). + +The barrier() function has the following effects: + + (*) Prevents the compiler from reordering accesses following the + barrier() to precede any accesses preceding the barrier(). + One example use for this property is to ease communication between + interrupt-handler code and the code that was interrupted. + + (*) Within a loop, forces the compiler to load the variables used + in that loop's conditional on each pass through that loop. + +The ACCESS_ONCE() function can prevent any number of optimizations that, +while perfectly safe in single-threaded code, can be fatal in concurrent +code. Here are some examples of these sorts of optimizations: + + (*) The compiler is within its rights to merge successive loads from + the same variable. Such merging can cause the compiler to "optimize" + the following code: + + while (tmp = a) + do_something_with(tmp); + + into the following code, which, although in some sense legitimate + for single-threaded code, is almost certainly not what the developer + intended: + + if (tmp = a) + for (;;) + do_something_with(tmp); + + Use ACCESS_ONCE() to prevent the compiler from doing this to you: + + while (tmp = ACCESS_ONCE(a)) + do_something_with(tmp); + + (*) The compiler is within its rights to reload a variable, for example, + in cases where high register pressure prevents the compiler from + keeping all data of interest in registers. The compiler might + therefore optimize the variable 'tmp' out of our previous example: + + while (tmp = a) + do_something_with(tmp); + + This could result in the following code, which is perfectly safe in + single-threaded code, but can be fatal in concurrent code: + + while (a) + do_something_with(a); + + For example, the optimized version of this code could result in + passing a zero to do_something_with() in the case where the variable + a was modified by some other CPU between the "while" statement and + the call to do_something_with(). + + Again, use ACCESS_ONCE() to prevent the compiler from doing this: + + while (tmp = ACCESS_ONCE(a)) + do_something_with(tmp); + + Note that if the compiler runs short of registers, it might save + tmp onto the stack. The overhead of this saving and later restoring + is why compilers reload variables. Doing so is perfectly safe for + single-threaded code, so you need to tell the compiler about cases + where it is not safe. + + (*) The compiler is within its rights to omit a load entirely if it knows + what the value will be. For example, if the compiler can prove that + the value of variable 'a' is always zero, it can optimize this code: + + while (tmp = a) + do_something_with(tmp); -The compiler barrier has no direct effect on the CPU, which may then reorder -things however it wishes. + Into this: + + do { } while (0); + + This transformation is a win for single-threaded code because it gets + rid of a load and a branch. The problem is that the compiler will + carry out its proof assuming that the current CPU is the only one + updating variable 'a'. If variable 'a' is shared, then the compiler's + proof will be erroneous. Use ACCESS_ONCE() to tell the compiler + that it doesn't know as much as it thinks it does: + + while (tmp = ACCESS_ONCE(a)) + do_something_with(tmp); + + But please note that the compiler is also closely watching what you + do with the value after the ACCESS_ONCE(). For example, suppose you + do the following and MAX is a preprocessor macro with the value 1: + + while ((tmp = ACCESS_ONCE(a)) % MAX) + do_something_with(tmp); + + Then the compiler knows that the result of the "%" operator applied + to MAX will always be zero, again allowing the compiler to optimize + the code into near-nonexistence. (It will still load from the + variable 'a'.) + + (*) Similarly, the compiler is within its rights to omit a store entirely + if it knows that the variable already has the value being stored. + Again, the compiler assumes that the current CPU is the only one + storing into the variable, which can cause the compiler to do the + wrong thing for shared variables. For example, suppose you have + the following: + + a = 0; + /* Code that does not store to variable a. */ + a = 0; + + The compiler sees that the value of variable 'a' is already zero, so + it might well omit the second store. This would come as a fatal + surprise if some other CPU might have stored to variable 'a' in the + meantime. + + Use ACCESS_ONCE() to prevent the compiler from making this sort of + wrong guess: + + ACCESS_ONCE(a) = 0; + /* Code that does not store to variable a. */ + ACCESS_ONCE(a) = 0; + + (*) The compiler is within its rights to reorder memory accesses unless + you tell it not to. For example, consider the following interaction + between process-level code and an interrupt handler: + + void process_level(void) + { + msg = get_message(); + flag = true; + } + + void interrupt_handler(void) + { + if (flag) + process_message(msg); + } + + There is nothing to prevent the the compiler from transforming + process_level() to the following, in fact, this might well be a + win for single-threaded code: + + void process_level(void) + { + flag = true; + msg = get_message(); + } + + If the interrupt occurs between these two statement, then + interrupt_handler() might be passed a garbled msg. Use ACCESS_ONCE() + to prevent this as follows: + + void process_level(void) + { + ACCESS_ONCE(msg) = get_message(); + ACCESS_ONCE(flag) = true; + } + + void interrupt_handler(void) + { + if (ACCESS_ONCE(flag)) + process_message(ACCESS_ONCE(msg)); + } + + Note that the ACCESS_ONCE() wrappers in interrupt_handler() + are needed if this interrupt handler can itself be interrupted + by something that also accesses 'flag' and 'msg', for example, + a nested interrupt or an NMI. Otherwise, ACCESS_ONCE() is not + needed in interrupt_handler() other than for documentation purposes. + (Note also that nested interrupts do not typically occur in modern + Linux kernels, in fact, if an interrupt handler returns with + interrupts enabled, you will get a WARN_ONCE() splat.) + + You should assume that the compiler can move ACCESS_ONCE() past + code not containing ACCESS_ONCE(), barrier(), or similar primitives. + + This effect could also be achieved using barrier(), but ACCESS_ONCE() + is more selective: With ACCESS_ONCE(), the compiler need only forget + the contents of the indicated memory locations, while with barrier() + the compiler must discard the value of all memory locations that + it has currented cached in any machine registers. Of course, + the compiler must also respect the order in which the ACCESS_ONCE()s + occur, though the CPU of course need not do so. + + (*) The compiler is within its rights to invent stores to a variable, + as in the following example: + + if (a) + b = a; + else + b = 42; + + The compiler might save a branch by optimizing this as follows: + + b = 42; + if (a) + b = a; + + In single-threaded code, this is not only safe, but also saves + a branch. Unfortunately, in concurrent code, this optimization + could cause some other CPU to see a spurious value of 42 -- even + if variable 'a' was never zero -- when loading variable 'b'. + Use ACCESS_ONCE() to prevent this as follows: + + if (a) + ACCESS_ONCE(b) = a; + else + ACCESS_ONCE(b) = 42; + + The compiler can also invent loads. These are usually less + damaging, but they can result in cache-line bouncing and thus in + poor performance and scalability. Use ACCESS_ONCE() to prevent + invented loads. + + (*) For aligned memory locations whose size allows them to be accessed + with a single memory-reference instruction, prevents "load tearing" + and "store tearing," in which a single large access is replaced by + multiple smaller accesses. For example, given an architecture having + 16-bit store instructions with 7-bit immediate fields, the compiler + might be tempted to use two 16-bit store-immediate instructions to + implement the following 32-bit store: + + p = 0x00010002; + + Please note that GCC really does use this sort of optimization, + which is not surprising given that it would likely take more + than two instructions to build the constant and then store it. + This optimization can therefore be a win in single-threaded code. + In fact, a recent bug (since fixed) caused GCC to incorrectly use + this optimization in a volatile store. In the absence of such bugs, + use of ACCESS_ONCE() prevents store tearing in the following example: + + ACCESS_ONCE(p) = 0x00010002; + + Use of packed structures can also result in load and store tearing, + as in this example: + + struct __attribute__((__packed__)) foo { + short a; + int b; + short c; + }; + struct foo foo1, foo2; + ... + + foo2.a = foo1.a; + foo2.b = foo1.b; + foo2.c = foo1.c; + + Because there are no ACCESS_ONCE() wrappers and no volatile markings, + the compiler would be well within its rights to implement these three + assignment statements as a pair of 32-bit loads followed by a pair + of 32-bit stores. This would result in load tearing on 'foo1.b' + and store tearing on 'foo2.b'. ACCESS_ONCE() again prevents tearing + in this example: + + foo2.a = foo1.a; + ACCESS_ONCE(foo2.b) = ACCESS_ONCE(foo1.b); + foo2.c = foo1.c; + +All that aside, it is never necessary to use ACCESS_ONCE() on a variable +that has been marked volatile. For example, because 'jiffies' is marked +volatile, it is never necessary to say ACCESS_ONCE(jiffies). The reason +for this is that ACCESS_ONCE() is implemented as a volatile cast, which +has no effect when its argument is already marked volatile. + +Please note that these compiler barriers have no direct effect on the CPU, +which may then reorder things however it wishes. CPU MEMORY BARRIERS @@ -1135,7 +1590,7 @@ There are some more advanced barrier functions: clear_bit( ... ); This prevents memory operations before the clear leaking to after it. See - the subsection on "Locking Functions" with reference to UNLOCK operation + the subsection on "Locking Functions" with reference to RELEASE operation implications. See Documentation/atomic_ops.txt for more information. See the "Atomic @@ -1169,8 +1624,8 @@ provide more substantial guarantees, but these may not be relied upon outside of arch specific code. -LOCKING FUNCTIONS ------------------ +ACQUIRING FUNCTIONS +------------------- The Linux kernel has a number of locking constructs: @@ -1181,65 +1636,107 @@ The Linux kernel has a number of locking constructs: (*) R/W semaphores (*) RCU -In all cases there are variants on "LOCK" operations and "UNLOCK" operations +In all cases there are variants on "ACQUIRE" operations and "RELEASE" operations for each construct. These operations all imply certain barriers: - (1) LOCK operation implication: + (1) ACQUIRE operation implication: - Memory operations issued after the LOCK will be completed after the LOCK - operation has completed. + Memory operations issued after the ACQUIRE will be completed after the + ACQUIRE operation has completed. - Memory operations issued before the LOCK may be completed after the LOCK - operation has completed. + Memory operations issued before the ACQUIRE may be completed after the + ACQUIRE operation has completed. An smp_mb__before_spinlock(), combined + with a following ACQUIRE, orders prior loads against subsequent stores and + stores and prior stores against subsequent stores. Note that this is + weaker than smp_mb()! The smp_mb__before_spinlock() primitive is free on + many architectures. - (2) UNLOCK operation implication: + (2) RELEASE operation implication: - Memory operations issued before the UNLOCK will be completed before the - UNLOCK operation has completed. + Memory operations issued before the RELEASE will be completed before the + RELEASE operation has completed. - Memory operations issued after the UNLOCK may be completed before the - UNLOCK operation has completed. + Memory operations issued after the RELEASE may be completed before the + RELEASE operation has completed. - (3) LOCK vs LOCK implication: + (3) ACQUIRE vs ACQUIRE implication: - All LOCK operations issued before another LOCK operation will be completed - before that LOCK operation. + All ACQUIRE operations issued before another ACQUIRE operation will be + completed before that ACQUIRE operation. - (4) LOCK vs UNLOCK implication: + (4) ACQUIRE vs RELEASE implication: - All LOCK operations issued before an UNLOCK operation will be completed - before the UNLOCK operation. + All ACQUIRE operations issued before a RELEASE operation will be + completed before the RELEASE operation. - All UNLOCK operations issued before a LOCK operation will be completed - before the LOCK operation. + (5) Failed conditional ACQUIRE implication: - (5) Failed conditional LOCK implication: - - Certain variants of the LOCK operation may fail, either due to being - unable to get the lock immediately, or due to receiving an unblocked + Certain locking variants of the ACQUIRE operation may fail, either due to + being unable to get the lock immediately, or due to receiving an unblocked signal whilst asleep waiting for the lock to become available. Failed locks do not imply any sort of barrier. -Therefore, from (1), (2) and (4) an UNLOCK followed by an unconditional LOCK is -equivalent to a full barrier, but a LOCK followed by an UNLOCK is not. - -[!] Note: one of the consequences of LOCKs and UNLOCKs being only one-way - barriers is that the effects of instructions outside of a critical section - may seep into the inside of the critical section. +[!] Note: one of the consequences of lock ACQUIREs and RELEASEs being only +one-way barriers is that the effects of instructions outside of a critical +section may seep into the inside of the critical section. -A LOCK followed by an UNLOCK may not be assumed to be full memory barrier -because it is possible for an access preceding the LOCK to happen after the -LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the -two accesses can themselves then cross: +An ACQUIRE followed by a RELEASE may not be assumed to be full memory barrier +because it is possible for an access preceding the ACQUIRE to happen after the +ACQUIRE, and an access following the RELEASE to happen before the RELEASE, and +the two accesses can themselves then cross: *A = a; - LOCK - UNLOCK + ACQUIRE M + RELEASE M *B = b; may occur as: - LOCK, STORE *B, STORE *A, UNLOCK + ACQUIRE M, STORE *B, STORE *A, RELEASE M + +This same reordering can of course occur if the lock's ACQUIRE and RELEASE are +to the same lock variable, but only from the perspective of another CPU not +holding that lock. + +In short, a RELEASE followed by an ACQUIRE may -not- be assumed to be a full +memory barrier because it is possible for a preceding RELEASE to pass a +later ACQUIRE from the viewpoint of the CPU, but not from the viewpoint +of the compiler. Note that deadlocks cannot be introduced by this +interchange because if such a deadlock threatened, the RELEASE would +simply complete. + +If it is necessary for a RELEASE-ACQUIRE pair to produce a full barrier, the +ACQUIRE can be followed by an smp_mb__after_unlock_lock() invocation. This +will produce a full barrier if either (a) the RELEASE and the ACQUIRE are +executed by the same CPU or task, or (b) the RELEASE and ACQUIRE act on the +same variable. The smp_mb__after_unlock_lock() primitive is free on many +architectures. Without smp_mb__after_unlock_lock(), the critical sections +corresponding to the RELEASE and the ACQUIRE can cross: + + *A = a; + RELEASE M + ACQUIRE N + *B = b; + +could occur as: + + ACQUIRE N, STORE *B, STORE *A, RELEASE M + +With smp_mb__after_unlock_lock(), they cannot, so that: + + *A = a; + RELEASE M + ACQUIRE N + smp_mb__after_unlock_lock(); + *B = b; + +will always occur as either of the following: + + STORE *A, RELEASE, ACQUIRE, STORE *B + STORE *A, ACQUIRE, RELEASE, STORE *B + +If the RELEASE and ACQUIRE were instead both operating on the same lock +variable, only the first of these two alternatives can occur. Locks and semaphores may not provide any guarantee of ordering on UP compiled systems, and so cannot be counted on in such a situation to actually achieve @@ -1253,33 +1750,33 @@ As an example, consider the following: *A = a; *B = b; - LOCK + ACQUIRE *C = c; *D = d; - UNLOCK + RELEASE *E = e; *F = f; The following sequence of events is acceptable: - LOCK, {*F,*A}, *E, {*C,*D}, *B, UNLOCK + ACQUIRE, {*F,*A}, *E, {*C,*D}, *B, RELEASE [+] Note that {*F,*A} indicates a combined access. But none of the following are: - {*F,*A}, *B, LOCK, *C, *D, UNLOCK, *E - *A, *B, *C, LOCK, *D, UNLOCK, *E, *F - *A, *B, LOCK, *C, UNLOCK, *D, *E, *F - *B, LOCK, *C, *D, UNLOCK, {*F,*A}, *E + {*F,*A}, *B, ACQUIRE, *C, *D, RELEASE, *E + *A, *B, *C, ACQUIRE, *D, RELEASE, *E, *F + *A, *B, ACQUIRE, *C, RELEASE, *D, *E, *F + *B, ACQUIRE, *C, *D, RELEASE, {*F,*A}, *E INTERRUPT DISABLING FUNCTIONS ----------------------------- -Functions that disable interrupts (LOCK equivalent) and enable interrupts -(UNLOCK equivalent) will act as compiler barriers only. So if memory or I/O +Functions that disable interrupts (ACQUIRE equivalent) and enable interrupts +(RELEASE equivalent) will act as compiler barriers only. So if memory or I/O barriers are required in such a situation, they must be provided from some other means. @@ -1418,75 +1915,81 @@ Other functions that imply barriers: (*) schedule() and similar imply full memory barriers. -================================= -INTER-CPU LOCKING BARRIER EFFECTS -================================= +=================================== +INTER-CPU ACQUIRING BARRIER EFFECTS +=================================== On SMP systems locking primitives give a more substantial form of barrier: one that does affect memory access ordering on other CPUs, within the context of conflict on any particular lock. -LOCKS VS MEMORY ACCESSES ------------------------- +ACQUIRES VS MEMORY ACCESSES +--------------------------- Consider the following: the system has a pair of spinlocks (M) and (Q), and three CPUs; then should the following sequence of events occur: CPU 1 CPU 2 =============================== =============================== - *A = a; *E = e; - LOCK M LOCK Q - *B = b; *F = f; - *C = c; *G = g; - UNLOCK M UNLOCK Q - *D = d; *H = h; + ACCESS_ONCE(*A) = a; ACCESS_ONCE(*E) = e; + ACQUIRE M ACQUIRE Q + ACCESS_ONCE(*B) = b; ACCESS_ONCE(*F) = f; + ACCESS_ONCE(*C) = c; ACCESS_ONCE(*G) = g; + RELEASE M RELEASE Q + ACCESS_ONCE(*D) = d; ACCESS_ONCE(*H) = h; Then there is no guarantee as to what order CPU 3 will see the accesses to *A through *H occur in, other than the constraints imposed by the separate locks on the separate CPUs. It might, for example, see: - *E, LOCK M, LOCK Q, *G, *C, *F, *A, *B, UNLOCK Q, *D, *H, UNLOCK M + *E, ACQUIRE M, ACQUIRE Q, *G, *C, *F, *A, *B, RELEASE Q, *D, *H, RELEASE M But it won't see any of: - *B, *C or *D preceding LOCK M - *A, *B or *C following UNLOCK M - *F, *G or *H preceding LOCK Q - *E, *F or *G following UNLOCK Q + *B, *C or *D preceding ACQUIRE M + *A, *B or *C following RELEASE M + *F, *G or *H preceding ACQUIRE Q + *E, *F or *G following RELEASE Q However, if the following occurs: CPU 1 CPU 2 =============================== =============================== - *A = a; - LOCK M [1] - *B = b; - *C = c; - UNLOCK M [1] - *D = d; *E = e; - LOCK M [2] - *F = f; - *G = g; - UNLOCK M [2] - *H = h; + ACCESS_ONCE(*A) = a; + ACQUIRE M [1] + ACCESS_ONCE(*B) = b; + ACCESS_ONCE(*C) = c; + RELEASE M [1] + ACCESS_ONCE(*D) = d; ACCESS_ONCE(*E) = e; + ACQUIRE M [2] + smp_mb__after_unlock_lock(); + ACCESS_ONCE(*F) = f; + ACCESS_ONCE(*G) = g; + RELEASE M [2] + ACCESS_ONCE(*H) = h; CPU 3 might see: - *E, LOCK M [1], *C, *B, *A, UNLOCK M [1], - LOCK M [2], *H, *F, *G, UNLOCK M [2], *D + *E, ACQUIRE M [1], *C, *B, *A, RELEASE M [1], + ACQUIRE M [2], *H, *F, *G, RELEASE M [2], *D But assuming CPU 1 gets the lock first, CPU 3 won't see any of: - *B, *C, *D, *F, *G or *H preceding LOCK M [1] - *A, *B or *C following UNLOCK M [1] - *F, *G or *H preceding LOCK M [2] - *A, *B, *C, *E, *F or *G following UNLOCK M [2] + *B, *C, *D, *F, *G or *H preceding ACQUIRE M [1] + *A, *B or *C following RELEASE M [1] + *F, *G or *H preceding ACQUIRE M [2] + *A, *B, *C, *E, *F or *G following RELEASE M [2] +Note that the smp_mb__after_unlock_lock() is critically important +here: Without it CPU 3 might see some of the above orderings. +Without smp_mb__after_unlock_lock(), the accesses are not guaranteed +to be seen in order unless CPU 3 holds lock M. -LOCKS VS I/O ACCESSES ---------------------- + +ACQUIRES VS I/O ACCESSES +------------------------ Under certain circumstances (especially involving NUMA), I/O accesses within two spinlocked sections on two different CPUs may be seen as interleaved by the @@ -1687,28 +2190,30 @@ explicit lock operations, described later). These include: xchg(); cmpxchg(); - atomic_xchg(); - atomic_cmpxchg(); - atomic_inc_return(); - atomic_dec_return(); - atomic_add_return(); - atomic_sub_return(); - atomic_inc_and_test(); - atomic_dec_and_test(); - atomic_sub_and_test(); - atomic_add_negative(); - atomic_add_unless(); /* when succeeds (returns 1) */ + atomic_xchg(); atomic_long_xchg(); + atomic_cmpxchg(); atomic_long_cmpxchg(); + atomic_inc_return(); atomic_long_inc_return(); + atomic_dec_return(); atomic_long_dec_return(); + atomic_add_return(); atomic_long_add_return(); + atomic_sub_return(); atomic_long_sub_return(); + atomic_inc_and_test(); atomic_long_inc_and_test(); + atomic_dec_and_test(); atomic_long_dec_and_test(); + atomic_sub_and_test(); atomic_long_sub_and_test(); + atomic_add_negative(); atomic_long_add_negative(); test_and_set_bit(); test_and_clear_bit(); test_and_change_bit(); -These are used for such things as implementing LOCK-class and UNLOCK-class + /* when succeeds (returns 1) */ + atomic_add_unless(); atomic_long_add_unless(); + +These are used for such things as implementing ACQUIRE-class and RELEASE-class operations and adjusting reference counters towards object destruction, and as such the implicit memory barrier effects are necessary. The following operations are potential problems as they do _not_ imply memory -barriers, but might be used for implementing such things as UNLOCK-class +barriers, but might be used for implementing such things as RELEASE-class operations: atomic_set(); @@ -1750,7 +2255,7 @@ The following operations are special locking primitives: clear_bit_unlock(); __clear_bit_unlock(); -These implement LOCK-class and UNLOCK-class operations. These should be used in +These implement ACQUIRE-class and RELEASE-class operations. These should be used in preference to other operations when implementing locking primitives, because their implementations can be optimised on many architectures. @@ -1887,8 +2392,8 @@ functions: space should suffice for PCI. [*] NOTE! attempting to load from the same location as was written to may - cause a malfunction - consider the 16550 Rx/Tx serial registers for - example. + cause a malfunction - consider the 16550 Rx/Tx serial registers for + example. Used with prefetchable I/O memory, an mmiowb() barrier may be required to force stores to be ordered. @@ -1955,19 +2460,19 @@ barriers for the most part act at the interface between the CPU and its cache : +--------+ +--------+ : +--------+ +-----------+ | | | | : | | | | +--------+ - | CPU | | Memory | : | CPU | | | | | - | Core |--->| Access |----->| Cache |<-->| | | | + | CPU | | Memory | : | CPU | | | | | + | Core |--->| Access |----->| Cache |<-->| | | | | | | Queue | : | | | |--->| Memory | - | | | | : | | | | | | - +--------+ +--------+ : +--------+ | | | | + | | | | : | | | | | | + +--------+ +--------+ : +--------+ | | | | : | Cache | +--------+ : | Coherency | : | Mechanism | +--------+ +--------+ +--------+ : +--------+ | | | | | | | | : | | | | | | | CPU | | Memory | : | CPU | | |--->| Device | - | Core |--->| Access |----->| Cache |<-->| | | | - | | | Queue | : | | | | | | + | Core |--->| Access |----->| Cache |<-->| | | | + | | | Queue | : | | | | | | | | | | : | | | | +--------+ +--------+ +--------+ : +--------+ +-----------+ : @@ -2090,7 +2595,7 @@ CPU's caches by some other cache event: p = &v; q = p; - + x = *q; Reads from v before v updated in cache @@ -2115,7 +2620,7 @@ queue before processing any further requests: p = &v; q = p; - + smp_read_barrier_depends() @@ -2177,11 +2682,11 @@ A programmer might take it for granted that the CPU will perform memory operations in exactly the order specified, so that if the CPU is, for example, given the following piece of code to execute: - a = *A; - *B = b; - c = *C; - d = *D; - *E = e; + a = ACCESS_ONCE(*A); + ACCESS_ONCE(*B) = b; + c = ACCESS_ONCE(*C); + d = ACCESS_ONCE(*D); + ACCESS_ONCE(*E) = e; they would then expect that the CPU will complete the memory operation for each instruction before moving on to the next one, leading to a definite sequence of @@ -2228,12 +2733,12 @@ However, it is guaranteed that a CPU will be self-consistent: it will see its _own_ accesses appear to be correctly ordered, without the need for a memory barrier. For instance with the following code: - U = *A; - *A = V; - *A = W; - X = *A; - *A = Y; - Z = *A; + U = ACCESS_ONCE(*A); + ACCESS_ONCE(*A) = V; + ACCESS_ONCE(*A) = W; + X = ACCESS_ONCE(*A); + ACCESS_ONCE(*A) = Y; + Z = ACCESS_ONCE(*A); and assuming no intervention by an external influence, it can be assumed that the final result will appear to be: @@ -2250,7 +2755,12 @@ accesses: in that order, but, without intervention, the sequence may have almost any combination of elements combined or discarded, provided the program's view of -the world remains consistent. +the world remains consistent. Note that ACCESS_ONCE() is -not- optional +in the above example, as there are architectures where a given CPU might +interchange successive loads to the same location. On such architectures, +ACCESS_ONCE() does whatever is necessary to prevent this, for example, on +Itanium the volatile casts used by ACCESS_ONCE() cause GCC to emit the +special ld.acq and st.rel instructions that prevent such reordering. The compiler may also combine, discard or defer elements of the sequence before the CPU even sees them. @@ -2264,13 +2774,13 @@ may be reduced to: *A = W; -since, without a write barrier, it can be assumed that the effect of the -storage of V to *A is lost. Similarly: +since, without either a write barrier or an ACCESS_ONCE(), it can be +assumed that the effect of the storage of V to *A is lost. Similarly: *A = Y; Z = *A; -may, without a memory barrier, be reduced to: +may, without a memory barrier or an ACCESS_ONCE(), be reduced to: *A = Y; Z = Y; diff --git a/Documentation/misc-devices/mei/mei-amt-version.c b/Documentation/misc-devices/mei/mei-amt-version.c index 49e4f770864a5f..57d0d871dcf714 100644 --- a/Documentation/misc-devices/mei/mei-amt-version.c +++ b/Documentation/misc-devices/mei/mei-amt-version.c @@ -115,8 +115,6 @@ static bool mei_init(struct mei *me, const uuid_le *guid, struct mei_client *cl; struct mei_connect_client_data data; - mei_deinit(me); - me->verbose = verbose; me->fd = open("/dev/mei", O_RDWR); diff --git a/Documentation/robust-futex-ABI.txt b/Documentation/robust-futex-ABI.txt index fd1cd8aae4eb58..16eb314f56cc45 100644 --- a/Documentation/robust-futex-ABI.txt +++ b/Documentation/robust-futex-ABI.txt @@ -146,8 +146,8 @@ On removal: 1) set the 'list_op_pending' word to the address of the 'lock entry' to be removed, 2) remove the lock entry for this lock from the 'head' list, - 2) release the futex lock, and - 2) clear the 'lock_op_pending' word. + 3) release the futex lock, and + 4) clear the 'lock_op_pending' word. On exit, the kernel will consider the address stored in 'list_op_pending' and the address of each 'lock word' found by walking diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 26b7ee491df891..6d486404200e9b 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -428,11 +428,6 @@ rate for each task. numa_balancing_scan_size_mb is how many megabytes worth of pages are scanned for a given scan. -numa_balancing_settle_count is how many scan periods must complete before -the schedule balancer stops pushing the task towards a preferred node. This -gives the scheduler a chance to place the task on an alternative node if the -preferred node is overloaded. - numa_balancing_migrate_deferred is how many page migrations get skipped unconditionally, after a page migration is skipped because a page is shared with other tasks. This reduces page migration overhead, and determines diff --git a/Documentation/vme_api.txt b/Documentation/vme_api.txt index 856efa35f6e3c5..ffe6e22a2ccd99 100644 --- a/Documentation/vme_api.txt +++ b/Documentation/vme_api.txt @@ -393,4 +393,14 @@ Slot Detection This function returns the slot ID of the provided bridge. - int vme_slot_get(struct vme_dev *dev); + int vme_slot_num(struct vme_dev *dev); + + +Bus Detection +============= + +This function returns the bus ID of the provided bridge. + + int vme_bus_num(struct vme_dev *dev); + + diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt index f4f268c2b826de..cb81741d3b0bd9 100644 --- a/Documentation/x86/boot.txt +++ b/Documentation/x86/boot.txt @@ -608,6 +608,9 @@ Protocol: 2.12+ - If 1, the kernel supports the 64-bit EFI handoff entry point given at handover_offset + 0x200. + Bit 4 (read): XLF_EFI_KEXEC + - If 1, the kernel supports kexec EFI boot with EFI runtime support. + Field name: cmdline_size Type: read Offset/size: 0x238/4 diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 881582f75c9ceb..c584a51add15ad 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -28,4 +28,11 @@ reference. Current X86-64 implementations only support 40 bits of address space, but we support up to 46 bits. This expands into MBZ space in the page tables. +->trampoline_pgd: + +We map EFI runtime services in the aforementioned PGD in the virtual +range of 64Gb (arbitrarily set, can be raised if needed) + +0xffffffef00000000 - 0xffffffff00000000 + -Andi Kleen, Jul 2004 diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO index 7fba5aab9ef9b8..6c914aa87e7138 100644 --- a/Documentation/zh_CN/HOWTO +++ b/Documentation/zh_CN/HOWTO @@ -112,7 +112,7 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与 其他关于如何正确地生成补丁的优秀文档包括: "The Perfect Patch" - http://userweb.kernel.org/~akpm/stuff/tpp.txt + http://www.ozlabs.org/~akpm/stuff/tpp.txt "Linux kernel patch submission format" http://linux.yyz.us/patch-format.html @@ -515,7 +515,7 @@ Linux内核社区并不喜欢一下接收大段的代码。修改需要被恰当 想了解它具体应该看起来像什么,请查阅以下文档中的“ChangeLog”章节: “The Perfect Patch” - http://userweb.kernel.org/~akpm/stuff/tpp.txt + http://www.ozlabs.org/~akpm/stuff/tpp.txt 这些事情有时候做起来很难。要在任何方面都做到完美可能需要好几年时间。这是 diff --git a/Documentation/zorro.txt b/Documentation/zorro.txt index d5829d14774a67..90a64d52bea2f3 100644 --- a/Documentation/zorro.txt +++ b/Documentation/zorro.txt @@ -95,8 +95,9 @@ The treatment of these regions depends on the type of Zorro space: ------------- linux/include/linux/zorro.h -linux/include/asm-{m68k,ppc}/zorro.h -linux/include/linux/zorro_ids.h +linux/include/uapi/linux/zorro.h +linux/include/uapi/linux/zorro_ids.h +linux/arch/m68k/include/asm/zorro.h linux/drivers/zorro /proc/bus/zorro diff --git a/MAINTAINERS b/MAINTAINERS index d5e4ff328cc714..88ae0eca7cd838 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts F: arch/arm/boot/dts/sama*.dtsi ARM/CALXEDA HIGHBANK ARCHITECTURE -M: Rob Herring +M: Rob Herring L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-highbank/ @@ -1368,6 +1368,9 @@ T: git git://git.xilinx.com/linux-xlnx.git S: Supported F: arch/arm/mach-zynq/ F: drivers/cpuidle/cpuidle-zynq.c +N: zynq +N: xilinx +F: drivers/clocksource/cadence_ttc_timer.c ARM SMMU DRIVER M: Will Deacon @@ -2616,7 +2619,7 @@ S: Maintained F: drivers/platform/x86/dell-laptop.c DELL LAPTOP SMM DRIVER -S: Orphan +M: Guenter Roeck F: drivers/char/i8k.c F: include/uapi/linux/i8k.h @@ -2635,7 +2638,7 @@ DESIGNWARE USB2 DRD IP DRIVER M: Paul Zimmerman L: linux-usb@vger.kernel.org S: Maintained -F: drivers/staging/dwc2/ +F: drivers/usb/dwc2/ DESIGNWARE USB3 DRD IP DRIVER M: Felipe Balbi @@ -2825,8 +2828,10 @@ F: include/uapi/drm/ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) M: Daniel Vetter +M: Jani Nikula L: intel-gfx@lists.freedesktop.org L: dri-devel@lists.freedesktop.org +Q: http://patchwork.freedesktop.org/project/intel-gfx/ T: git git://people.freedesktop.org/~danvet/drm-intel S: Supported F: drivers/gpu/drm/i915/ @@ -3330,6 +3335,7 @@ EXTERNAL CONNECTOR SUBSYSTEM (EXTCON) M: MyungJoo Ham M: Chanwoo Choi L: linux-kernel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/extcon.git S: Maintained F: drivers/extcon/ F: Documentation/extcon/ @@ -5136,6 +5142,11 @@ F: drivers/lguest/ F: include/linux/lguest*.h F: tools/lguest/ +LIBLOCKDEP +M: Sasha Levin +S: Maintained +F: tools/lib/lockdep/ + LINUX FOR IBM pSERIES (RS/6000) M: Paul Mackerras W: http://www.ibm.com/linux/ltc/projects/ppc @@ -6256,7 +6267,7 @@ F: drivers/i2c/busses/i2c-ocores.c OPEN FIRMWARE AND FLATTENED DEVICE TREE M: Grant Likely -M: Rob Herring +M: Rob Herring L: devicetree@vger.kernel.org W: http://fdt.secretlab.ca T: git git://git.secretlab.ca/git/linux-2.6.git @@ -6268,7 +6279,7 @@ K: of_get_property K: of_match_table OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS -M: Rob Herring +M: Rob Herring M: Pawel Moll M: Mark Rutland M: Ian Campbell @@ -7094,6 +7105,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git F: Documentation/RCU/torture.txt F: kernel/rcu/torture.c +RCUTORTURE TEST FRAMEWORK +M: "Paul E. McKenney" +S: Supported +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +F: tools/testing/selftests/rcutorture + RDC R-321X SoC M: Florian Fainelli S: Maintained @@ -9226,6 +9243,7 @@ F: include/media/videobuf2-* VIRTIO CONSOLE DRIVER M: Amit Shah +L: virtio-dev@lists.oasis-open.org L: virtualization@lists.linux-foundation.org S: Maintained F: drivers/char/virtio_console.c @@ -9235,6 +9253,7 @@ F: include/uapi/linux/virtio_console.h VIRTIO CORE, NET AND BLOCK DRIVERS M: Rusty Russell M: "Michael S. Tsirkin" +L: virtio-dev@lists.oasis-open.org L: virtualization@lists.linux-foundation.org S: Maintained F: drivers/virtio/ @@ -9247,6 +9266,7 @@ F: include/uapi/linux/virtio_*.h VIRTIO HOST (VHOST) M: "Michael S. Tsirkin" L: kvm@vger.kernel.org +L: virtio-dev@lists.oasis-open.org L: virtualization@lists.linux-foundation.org L: netdev@vger.kernel.org S: Maintained diff --git a/Makefile b/Makefile index 14d592cbbc5f77..455fd484b20edb 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 13 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = NAME = One Giant Leap for Frogkind # *DOCUMENTATION* @@ -595,10 +595,24 @@ ifneq ($(CONFIG_FRAME_WARN),0) KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) endif -# Force gcc to behave correct even for buggy distributions -ifndef CONFIG_CC_STACKPROTECTOR -KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) +# Handle stack protector mode. +ifdef CONFIG_CC_STACKPROTECTOR_REGULAR + stackp-flag := -fstack-protector + ifeq ($(call cc-option, $(stackp-flag)),) + $(warning Cannot use CONFIG_CC_STACKPROTECTOR: \ + -fstack-protector not supported by compiler)) + endif +else ifdef CONFIG_CC_STACKPROTECTOR_STRONG + stackp-flag := -fstack-protector-strong + ifeq ($(call cc-option, $(stackp-flag)),) + $(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \ + -fstack-protector-strong not supported by compiler) + endif +else + # Force off for distro compilers that enable stack protector by default. + stackp-flag := $(call cc-option, -fno-stack-protector) endif +KBUILD_CFLAGS += $(stackp-flag) # This warning generated too much noise in a regular build. # Use make W=1 to enable this warning (see scripts/Makefile.build) diff --git a/arch/Kconfig b/arch/Kconfig index f1cf895c040fb6..80bbb8ccd0d10b 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -336,6 +336,73 @@ config SECCOMP_FILTER See Documentation/prctl/seccomp_filter.txt for details. +config HAVE_CC_STACKPROTECTOR + bool + help + An arch should select this symbol if: + - its compiler supports the -fstack-protector option + - it has implemented a stack canary (e.g. __stack_chk_guard) + +config CC_STACKPROTECTOR + def_bool n + help + Set when a stack-protector mode is enabled, so that the build + can enable kernel-side support for the GCC feature. + +choice + prompt "Stack Protector buffer overflow detection" + depends on HAVE_CC_STACKPROTECTOR + default CC_STACKPROTECTOR_NONE + help + This option turns on the "stack-protector" GCC feature. This + feature puts, at the beginning of functions, a canary value on + the stack just before the return address, and validates + the value just before actually returning. Stack based buffer + overflows (that need to overwrite this return address) now also + overwrite the canary, which gets detected and the attack is then + neutralized via a kernel panic. + +config CC_STACKPROTECTOR_NONE + bool "None" + help + Disable "stack-protector" GCC feature. + +config CC_STACKPROTECTOR_REGULAR + bool "Regular" + select CC_STACKPROTECTOR + help + Functions will have the stack-protector canary logic added if they + have an 8-byte or larger character array on the stack. + + This feature requires gcc version 4.2 or above, or a distribution + gcc with the feature backported ("-fstack-protector"). + + On an x86 "defconfig" build, this feature adds canary checks to + about 3% of all kernel functions, which increases kernel code size + by about 0.3%. + +config CC_STACKPROTECTOR_STRONG + bool "Strong" + select CC_STACKPROTECTOR + help + Functions will have the stack-protector canary logic added in any + of the following conditions: + + - local variable's address used as part of the right hand side of an + assignment or function argument + - local variable is an array (or union containing an array), + regardless of array type or length + - uses register local variables + + This feature requires gcc version 4.9 or above, or a distribution + gcc with the feature backported ("-fstack-protector-strong"). + + On an x86 "defconfig" build, this feature adds canary checks to + about 20% of all kernel functions, which increases the kernel code + size by about 2%. + +endchoice + config HAVE_CONTEXT_TRACKING bool help diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h index ce8860a0b32db6..3832bdb794fec8 100644 --- a/arch/alpha/include/asm/barrier.h +++ b/arch/alpha/include/asm/barrier.h @@ -3,33 +3,18 @@ #include -#define mb() \ -__asm__ __volatile__("mb": : :"memory") +#define mb() __asm__ __volatile__("mb": : :"memory") +#define rmb() __asm__ __volatile__("mb": : :"memory") +#define wmb() __asm__ __volatile__("wmb": : :"memory") -#define rmb() \ -__asm__ __volatile__("mb": : :"memory") - -#define wmb() \ -__asm__ __volatile__("wmb": : :"memory") - -#define read_barrier_depends() \ -__asm__ __volatile__("mb": : :"memory") +#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") #ifdef CONFIG_SMP #define __ASM_SMP_MB "\tmb\n" -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() #else #define __ASM_SMP_MB -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) #endif -#define set_mb(var, value) \ -do { var = value; mb(); } while (0) +#include #endif /* __BARRIER_H */ diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 5943f7f9d32550..9ae21c19800775 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += auxvec.h +generic-y += barrier.h generic-y += bugs.h generic-y += bitsperlong.h generic-y += clkdev.h diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 83f03ca6caf6c7..03e494f695d1d1 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #endif /* !CONFIG_ARC_HAS_LLSC */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + /** * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h index f6cb7c4ffb3502..c32245c3d1e9b5 100644 --- a/arch/arc/include/asm/barrier.h +++ b/arch/arc/include/asm/barrier.h @@ -30,11 +30,6 @@ #define smp_wmb() barrier() #endif -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #define smp_read_barrier_depends() do { } while (0) #endif diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c1f1a7eee953de..9c909fc292720a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -30,6 +30,7 @@ config ARM select HAVE_BPF_JIT select HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT + select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS @@ -1856,18 +1857,6 @@ config SECCOMP and the task is only allowed to execute a few safe syscalls defined by each seccomp mode. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - This feature requires gcc version 4.2 or above. - config SWIOTLB def_bool y diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c99b1086d83dfa..55b4255ad6ed9d 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -40,10 +40,6 @@ ifeq ($(CONFIG_FRAME_POINTER),y) KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog endif -ifeq ($(CONFIG_CC_STACKPROTECTOR),y) -KBUILD_CFLAGS +=-fstack-protector -endif - ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) KBUILD_CPPFLAGS += -mbig-endian AS += -EB diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index 31bd43b8209585..d4f891f5699655 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -127,6 +127,18 @@ asmlinkage void __div0(void) error("Attempting division by 0!"); } +unsigned long __stack_chk_guard; + +void __stack_chk_guard_setup(void) +{ + __stack_chk_guard = 0x000a0dff; +} + +void __stack_chk_fail(void) +{ + error("stack-protector: Kernel stack is corrupted\n"); +} + extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)); @@ -137,6 +149,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, { int ret; + __stack_chk_guard_setup(); + output_data = (unsigned char *)output_start; free_mem_ptr = free_mem_ptr_p; free_mem_end_ptr = free_mem_ptr_end_p; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9db5047812f3d6..177becde7a268b 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -559,7 +559,7 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0x10800000 0x1000>; interrupts = <0 33 0>; - clocks = <&clock 271>; + clocks = <&clock 346>; clock-names = "apb_pclk"; #dma-cells = <1>; #dma-channels = <8>; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index 46e1d7ef163f52..9987dd0e9c5999 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -241,7 +241,7 @@ sdhi0: sdhi@ee100000 { compatible = "renesas,sdhi-r8a7790"; - reg = <0 0xee100000 0 0x100>; + reg = <0 0xee100000 0 0x200>; interrupt-parent = <&gic>; interrupts = <0 165 4>; cap-sd-highspeed; @@ -250,7 +250,7 @@ sdhi1: sdhi@ee120000 { compatible = "renesas,sdhi-r8a7790"; - reg = <0 0xee120000 0 0x100>; + reg = <0 0xee120000 0 0x200>; interrupt-parent = <&gic>; interrupts = <0 166 4>; cap-sd-highspeed; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 52476742a1043e..e674c94c720698 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -332,5 +332,12 @@ clock-frequency = <100000>; status = "disabled"; }; + + timer@01c60000 { + compatible = "allwinner,sun5i-a13-hstimer"; + reg = <0x01c60000 0x1000>; + interrupts = <82>, <83>; + clocks = <&ahb_gates 28>; + }; }; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index ce8ef2a45be098..1ccd75d37f49d4 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -273,5 +273,12 @@ clock-frequency = <100000>; status = "disabled"; }; + + timer@01c60000 { + compatible = "allwinner,sun5i-a13-hstimer"; + reg = <0x01c60000 0x1000>; + interrupts = <82>, <83>; + clocks = <&ahb_gates 28>; + }; }; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 367611a0730bc0..0135039eff9644 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -395,6 +395,16 @@ status = "disabled"; }; + hstimer@01c60000 { + compatible = "allwinner,sun7i-a20-hstimer"; + reg = <0x01c60000 0x1000>; + interrupts = <0 81 1>, + <0 82 1>, + <0 83 1>, + <0 84 1>; + clocks = <&ahb_gates 28>; + }; + gic: interrupt-controller@01c81000 { compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; reg = <0x01c81000 0x1000>, diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 64205d453260d1..71e5fc7cfb18f4 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped @@ -58,7 +58,7 @@ # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_ARCH__ 7 #endif #ifdef __thumb__ diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index f3d96d9325737f..be068db960ee00 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl @@ -701,7 +701,7 @@ sub bitslice { # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_ARCH__ 7 #endif #ifdef __thumb__ diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 60f15e274e6d46..2f59f744339640 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -59,6 +59,21 @@ #define smp_wmb() dmb(ishst) #endif +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 3c597c222ef278..fbeb39c869e9fd 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t); */ #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) +#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) #define iounmap __arm_iounmap diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 6976b03e521369..8756e4bcdba060 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -347,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) +#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ + && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) #endif diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 141baa3f9a72d1..acabef1a75df00 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -15,7 +15,7 @@ #include -#define __NR_syscalls (380) +#define __NR_syscalls (384) #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) #define __ARCH_WANT_STAT64 diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 75579a9d6f76cb..3759cacdd7f860 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) return __set_phys_to_machine(pfn, mfn); } -#define xen_remap(cookie, size) ioremap_cached((cookie), (size)); +#define xen_remap(cookie, size) ioremap_cache((cookie), (size)); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index af33b44990ed4a..fb5584d0cc050a 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -406,6 +406,8 @@ #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) #define __NR_kcmp (__NR_SYSCALL_BASE+378) #define __NR_finit_module (__NR_SYSCALL_BASE+379) +#define __NR_sched_setattr (__NR_SYSCALL_BASE+380) +#define __NR_sched_getattr (__NR_SYSCALL_BASE+381) /* * This may need to be greater than __NR_last_syscall+1 in order to diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index c6ca7e376773fc..166e945de832f2 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -389,6 +389,8 @@ CALL(sys_process_vm_writev) CALL(sys_kcmp) CALL(sys_finit_module) +/* 380 */ CALL(sys_sched_setattr) + CALL(sys_sched_getattr) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 739c3dfc1da25c..34d5fd585bbb1d 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void) bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { - return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); + return phys_id == cpu_logical_map(cpu); } static const void * __init arch_get_next_mach(const char *const **match) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index bc3f2efa0d86b4..789d846a918453 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -99,10 +99,6 @@ int armpmu_event_set_period(struct perf_event *event) s64 period = hwc->sample_period; int ret = 0; - /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ - if (unlikely(period != hwc->last_period)) - left = period - (hwc->last_period - left); - if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index d85055cd24bacc..20d553c9f5e292 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu) static int cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; - int (*init_fn)(struct arm_pmu *); + const int (*init_fn)(struct arm_pmu *); struct device_node *node = pdev->dev.of_node; struct arm_pmu *pmu; int ret = -ENODEV; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 7940241f0576b0..4636d56af2db63 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -36,7 +36,13 @@ #include #include -static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; +static const char *handler[]= { + "prefetch abort", + "data abort", + "address exception", + "interrupt", + "undefined instruction", +}; void *vectors_page; @@ -425,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) instr2 = __mem_to_opcode_thumb16(instr2); instr = __opcode_thumb32_compose(instr, instr2); } - } else if (get_user(instr, (u32 __user *)pc)) { + } else { + if (get_user(instr, (u32 __user *)pc)) + goto die_sig; instr = __mem_to_opcode_arm(instr); - goto die_sig; } if (call_undef_hook(regs, instr) == 0) diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 2a700e00528d0a..b18165ca1d387b 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -17,6 +17,7 @@ */ #include +#include #include #include #include @@ -853,6 +854,33 @@ static struct notifier_block hyp_init_cpu_nb = { .notifier_call = hyp_init_cpu_notify, }; +#ifdef CONFIG_CPU_PM +static int hyp_init_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, + void *v) +{ + if (cmd == CPU_PM_EXIT) { + cpu_init_hyp_mode(NULL); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static struct notifier_block hyp_init_cpu_pm_nb = { + .notifier_call = hyp_init_cpu_pm_notifier, +}; + +static void __init hyp_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); +} +#else +static inline void hyp_cpu_pm_init(void) +{ +} +#endif + /** * Inits Hyp-mode on all online CPUs */ @@ -1013,6 +1041,8 @@ int kvm_arch_init(void *opaque) goto out_err; } + hyp_cpu_pm_init(); + kvm_coproc_table_init(); return 0; out_err: diff --git a/arch/arm/mach-clps711x/devices.c b/arch/arm/mach-clps711x/devices.c index fb77d1448fec88..2001488a5ef24b 100644 --- a/arch/arm/mach-clps711x/devices.c +++ b/arch/arm/mach-clps711x/devices.c @@ -61,8 +61,29 @@ static void __init clps711x_add_syscon(void) &clps711x_syscon_res[i], 1); } +static const struct resource clps711x_uart1_res[] __initconst = { + DEFINE_RES_MEM(CLPS711X_PHYS_BASE + UARTDR1, SZ_128), + DEFINE_RES_IRQ(IRQ_UTXINT1), + DEFINE_RES_IRQ(IRQ_URXINT1), +}; + +static const struct resource clps711x_uart2_res[] __initconst = { + DEFINE_RES_MEM(CLPS711X_PHYS_BASE + UARTDR2, SZ_128), + DEFINE_RES_IRQ(IRQ_UTXINT2), + DEFINE_RES_IRQ(IRQ_URXINT2), +}; + +static void __init clps711x_add_uart(void) +{ + platform_device_register_simple("clps711x-uart", 0, clps711x_uart1_res, + ARRAY_SIZE(clps711x_uart1_res)); + platform_device_register_simple("clps711x-uart", 1, clps711x_uart2_res, + ARRAY_SIZE(clps711x_uart2_res)); +}; + void __init clps711x_devices_init(void) { clps711x_add_gpio(); clps711x_add_syscon(); + clps711x_add_uart(); } diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 9ee78f7b499075..782f6c71fa0a6c 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c @@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = { void __init footbridge_timer_init(void) { struct clock_event_device *ce = &ckevt_dc21285; + unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16); - clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); + clocksource_register_hz(&cksrc_dc21285, rate); setup_irq(ce->irq, &footbridge_timer_irq); ce->cpumask = cpumask_of(smp_processor_id()); - clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff); + clockevents_config_and_register(ce, rate, 0x4, 0xffffff); } diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index bd3bf66ce3449a..c7de89b263dd3b 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void) static void highbank_l2x0_disable(void) { + outer_flush_all(); /* Disable PL310 L2 Cache controller */ highbank_smc1(0x102, 0x0); } diff --git a/arch/arm/mach-omap1/include/mach/usb.h b/arch/arm/mach-omap1/include/mach/usb.h index 45e5ac707cbb79..2c263051dc518b 100644 --- a/arch/arm/mach-omap1/include/mach/usb.h +++ b/arch/arm/mach-omap1/include/mach/usb.h @@ -8,43 +8,7 @@ #define is_usb0_device(config) 0 #endif -struct omap_usb_config { - /* Configure drivers according to the connectors on your board: - * - "A" connector (rectagular) - * ... for host/OHCI use, set "register_host". - * - "B" connector (squarish) or "Mini-B" - * ... for device/gadget use, set "register_dev". - * - "Mini-AB" connector (very similar to Mini-B) - * ... for OTG use as device OR host, initialize "otg" - */ - unsigned register_host:1; - unsigned register_dev:1; - u8 otg; /* port number, 1-based: usb1 == 2 */ - - u8 hmc_mode; - - /* implicitly true if otg: host supports remote wakeup? */ - u8 rwc; - - /* signaling pins used to talk to transceiver on usbN: - * 0 == usbN unused - * 2 == usb0-only, using internal transceiver - * 3 == 3 wire bidirectional - * 4 == 4 wire bidirectional - * 6 == 6 wire unidirectional (or TLL) - */ - u8 pins[3]; - - struct platform_device *udc_device; - struct platform_device *ohci_device; - struct platform_device *otg_device; - - u32 (*usb0_init)(unsigned nwires, unsigned is_device); - u32 (*usb1_init)(unsigned nwires); - u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup); - - int (*ocpi_enable)(void); -}; +#include void omap_otg_init(struct omap_usb_config *config); diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 4ec8d82b0492f3..44a59c3abfb053 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -242,12 +242,18 @@ static void __init ldp_display_init(void) static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { + int res; + /* LCD enable GPIO */ ldp_lcd_pdata.enable_gpio = gpio + 7; /* Backlight enable GPIO */ ldp_lcd_pdata.backlight_gpio = gpio + 15; + res = platform_device_register(&ldp_lcd_device); + if (res) + pr_err("Unable to register LCD: %d\n", res); + return 0; } @@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { static struct platform_device *ldp_devices[] __initdata = { &ldp_gpio_keys_device, - &ldp_lcd_device, }; #ifdef CONFIG_OMAP_MUX diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index b39efd46abf991..c0ab9b26be3da9 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void) static void omap4_l2x0_disable(void) { + outer_flush_all(); /* Disable PL310 L2 Cache controller */ omap_smc1(0x102, 0x0); } diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 56cebb05509e15..d23c77fadb3103 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c @@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { /* gpmc */ static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { - { .irq = 20 }, + { .irq = 20 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { }; static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { - { .irq = 52 }, + { .irq = 52 + OMAP_INTC_START, }, { .irq = -1 } }; diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index d33742908f970a..4c3b1e6df50806 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { }; static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { - { .irq = 20 }, + { .irq = 20 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { static struct omap_hwmod omap3xxx_mmu_isp_hwmod; static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { - { .irq = 24 }, + { .irq = 24 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { static struct omap_hwmod omap3xxx_mmu_iva_hwmod; static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { - { .irq = 28 }, + { .irq = 28 + OMAP_INTC_START, }, { .irq = -1 } }; diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index db32d5380b118c..18f333c440db3b 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { .class = &dra7xx_uart_hwmod_class, .clkdm_name = "l4per_clkdm", .main_clk = "uart1_gfclk_mux", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, .prcm = { .omap4 = { .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 2a086e8373eb7b..958cd6af938425 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h @@ -10,6 +10,8 @@ * published by the Free Software Foundation. */ +#include + #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 958e3cbf0ac2f2..8ea87bd45c330a 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = { .id = 0, .dev = { .platform_data = &lcdc0_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = { .id = 1, .dev = { .platform_data = &hdmi_lcdc_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), }; +/* Fixed 3.3V regulator used by LCD backlight */ +static struct regulator_consumer_supply fixed5v0_power_consumers[] = { + REGULATOR_SUPPLY("power", "pwm-backlight.0"), +}; + /* Fixed 3.3V regulator to be used by SDHI0 */ static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), @@ -1196,6 +1201,8 @@ static void __init eva_init(void) regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, + ARRAY_SIZE(fixed5v0_power_consumers), 5000000); pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 38611526fe9a55..3c4995aebd2208 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c @@ -679,7 +679,7 @@ static void __init bockw_init(void) .id = i, .data = &rsnd_card_info[i], .size_data = sizeof(struct asoc_simple_card_info), - .dma_mask = ~0, + .dma_mask = DMA_BIT_MASK(32), }; platform_device_register_full(&cardinfo); diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index fe689b7fdc9e71..bc40b853ffd3cf 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c @@ -334,7 +334,7 @@ static struct platform_device lcdc_device = { .resource = lcdc_resources, .dev = { .platform_data = &lcdc_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index af06753eb80925..e721d2ccceaef8 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -409,7 +409,7 @@ static struct platform_device lcdc_device = { .resource = lcdc_resources, .dev = { .platform_data = &lcdc_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = { .id = 1, .dev = { .platform_data = &hdmi_lcdc_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index c9e72c89066ad7..bce0d4277f7197 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig @@ -12,3 +12,4 @@ config ARCH_SUNXI select PINCTRL_SUNXI select SPARSE_IRQ select SUN4I_TIMER + select SUN5I_HSTIMER diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6d5ba9afb16a44..3387e60e4ea381 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) unsigned long i; if (cache_is_vipt_nonaliasing()) { for (i = 0; i < (1 << compound_order(page)); i++) { - void *addr = kmap_atomic(page); + void *addr = kmap_atomic(page + i); __cpuc_flush_dcache_area(addr, PAGE_SIZE); kunmap_atomic(addr); } } else { for (i = 0; i < (1 << compound_order(page)); i++) { - void *addr = kmap_high_get(page); + void *addr = kmap_high_get(page + i); if (addr) { __cpuc_flush_dcache_area(addr, PAGE_SIZE); - kunmap_high(page); + kunmap_high(page + i); } } } diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 1f7b19a4706067..3e8f106ee5fe01 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) #ifdef CONFIG_ZONE_DMA if (mdesc->dma_zone_size) { arm_dma_zone_size = mdesc->dma_zone_size; - arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; } else arm_dma_limit = 0xffffffff; arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 9ed155ad0f97c4..271b5e9715682a 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -641,10 +641,10 @@ static int build_body(struct jit_ctx *ctx) emit(ARM_MUL(r_A, r_A, r_X), ctx); break; case BPF_S_ALU_DIV_K: - /* current k == reciprocal_value(userspace k) */ + if (k == 1) + break; emit_mov_i(r_scratch, k, ctx); - /* A = top 32 bits of the product */ - emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx); + emit_udiv(r_A, r_A, r_scratch, ctx); break; case BPF_S_ALU_DIV_X: update_on_xread(ctx); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6d4dd22ee4b7bc..dd4327f09ba45b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2,6 +2,7 @@ config ARM64 def_bool y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_FRAME_POINTERS @@ -11,19 +12,27 @@ config ARM64 select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK + select CPU_PM if (SUSPEND || CPU_IDLE) + select DCACHE_WORD_ACCESS select GENERIC_CLOCKEVENTS + select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL select HARDIRQS_SW_RESEND + select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_TRACEHOOK select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS + select HAVE_DMA_CONTIGUOUS + select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_MEMBLOCK @@ -275,6 +284,24 @@ config SYSVIPC_COMPAT endmenu +menu "Power management options" + +source "kernel/power/Kconfig" + +config ARCH_SUSPEND_POSSIBLE + def_bool y + +config ARM64_CPU_SUSPEND + def_bool PM_SLEEP + +endmenu + +menu "CPU Power Management" + +source "drivers/cpuidle/Kconfig" + +endmenu + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts index 519c4b2c06873d..4a060906809d5c 100644 --- a/arch/arm64/boot/dts/foundation-v8.dts +++ b/arch/arm64/boot/dts/foundation-v8.dts @@ -224,7 +224,7 @@ virtio_block@0130000 { compatible = "virtio,mmio"; - reg = <0x130000 0x1000>; + reg = <0x130000 0x200>; interrupts = <42>; }; }; diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi index b45e5f39f57794..2f2ecd217363d4 100644 --- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi +++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi @@ -183,6 +183,12 @@ clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>; clock-names = "clcdclk", "apb_pclk"; }; + + virtio_block@0130000 { + compatible = "virtio,mmio"; + reg = <0x130000 0x200>; + interrupts = <42>; + }; }; v2m_fixed_3v3: fixedregulator@0 { diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 519f89f5b6a3de..d0ff25de67cade 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -26,7 +26,6 @@ generic-y += mman.h generic-y += msgbuf.h generic-y += mutex.h generic-y += pci.h -generic-y += percpu.h generic-y += poll.h generic-y += posix_types.h generic-y += resource.h diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index d4a63338a53c49..78e20ba8806b8b 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -35,10 +35,60 @@ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #else + #define smp_mb() asm volatile("dmb ish" : : : "memory") #define smp_rmb() asm volatile("dmb ishld" : : : "memory") #define smp_wmb() asm volatile("dmb ishst" : : : "memory") + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 4: \ + asm volatile ("stlr %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + case 8: \ + asm volatile ("stlr %1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + } \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 4: \ + asm volatile ("ldar %w0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + case 8: \ + asm volatile ("ldar %0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + } \ + ___p1; \ +}) + #endif #define read_barrier_depends() do { } while(0) diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 3914c0dcd09cf0..56166d7f4a2588 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -158,17 +158,23 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, return ret; } -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -#define cmpxchg_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ + __ret; \ +}) + +#define cmpxchg_local(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ +}) #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index c4cdb5e5b73d24..152413076503ba 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -39,6 +39,9 @@ struct device_node; * from the cpu to be killed. * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * cpu being killed. + * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing + * to wrong parameters or error conditions. Called from the + * CPU being suspended. Must be called with IRQs disabled. */ struct cpu_operations { const char *name; @@ -50,6 +53,9 @@ struct cpu_operations { int (*cpu_disable)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); #endif +#ifdef CONFIG_ARM64_CPU_SUSPEND + int (*cpu_suspend)(unsigned long); +#endif }; extern const struct cpu_operations *cpu_ops[NR_CPUS]; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 5fe138e0b828f1..c404fb0df3a673 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -16,23 +16,23 @@ #ifndef __ASM_CPUTYPE_H #define __ASM_CPUTYPE_H -#define ID_MIDR_EL1 "midr_el1" -#define ID_MPIDR_EL1 "mpidr_el1" -#define ID_CTR_EL0 "ctr_el0" - -#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1" -#define ID_AA64DFR0_EL1 "id_aa64dfr0_el1" -#define ID_AA64AFR0_EL1 "id_aa64afr0_el1" -#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1" -#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1" - #define INVALID_HWID ULONG_MAX #define MPIDR_HWID_BITMASK 0xff00ffffff +#define MPIDR_LEVEL_BITS_SHIFT 3 +#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_LEVEL_SHIFT(level) \ + (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ + ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK) + #define read_cpuid(reg) ({ \ u64 __val; \ - asm("mrs %0, " reg : "=r" (__val)); \ + asm("mrs %0, " #reg : "=r" (__val)); \ __val; \ }) @@ -54,12 +54,12 @@ */ static inline u32 __attribute_const__ read_cpuid_id(void) { - return read_cpuid(ID_MIDR_EL1); + return read_cpuid(MIDR_EL1); } static inline u64 __attribute_const__ read_cpuid_mpidr(void) { - return read_cpuid(ID_MPIDR_EL1); + return read_cpuid(MPIDR_EL1); } static inline unsigned int __attribute_const__ read_cpuid_implementor(void) @@ -74,7 +74,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void) static inline u32 __attribute_const__ read_cpuid_cachetype(void) { - return read_cpuid(ID_CTR_EL0); + return read_cpuid(CTR_EL0); } #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index a2232d07be9d63..62314791570cbd 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -62,6 +62,27 @@ struct task_struct; #define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */ +#define DBG_HOOK_HANDLED 0 +#define DBG_HOOK_ERROR 1 + +struct step_hook { + struct list_head node; + int (*fn)(struct pt_regs *regs, unsigned int esr); +}; + +void register_step_hook(struct step_hook *hook); +void unregister_step_hook(struct step_hook *hook); + +struct break_hook { + struct list_head node; + u32 esr_val; + u32 esr_mask; + int (*fn)(struct pt_regs *regs, unsigned int esr); +}; + +void register_break_hook(struct break_hook *hook); +void unregister_break_hook(struct break_hook *hook); + u8 debug_monitors_arch(void); void enable_debug_monitors(enum debug_el el); diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h new file mode 100644 index 00000000000000..d6aacb61ff4ae1 --- /dev/null +++ b/arch/arm64/include/asm/dma-contiguous.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_DMA_CONTIGUOUS_H +#define _ASM_DMA_CONTIGUOUS_H + +#ifdef __KERNEL__ +#ifdef CONFIG_DMA_CMA + +#include +#include + +static inline void +dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } + +#endif +#endif + +#endif diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index c582fa31636680..78cc3aba5d69e9 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -30,6 +30,7 @@ " cbnz %w3, 1b\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ +" .align 2\n" \ "4: mov %w0, %w5\n" \ " b 3b\n" \ " .popsection\n" \ diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 990c051e7829a6..ae4801d77514ed 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -20,7 +20,7 @@ #include #include -#define NR_IPI 4 +#define NR_IPI 5 typedef struct { unsigned int __softirq_pending; diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h new file mode 100644 index 00000000000000..c44ad39ed31078 --- /dev/null +++ b/arch/arm64/include/asm/insn.h @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_INSN_H +#define __ASM_INSN_H +#include + +/* A64 instructions are always 32 bits. */ +#define AARCH64_INSN_SIZE 4 + +/* + * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a + * Section C3.1 "A64 instruction index by encoding": + * AArch64 main encoding table + * Bit position + * 28 27 26 25 Encoding Group + * 0 0 - - Unallocated + * 1 0 0 - Data processing, immediate + * 1 0 1 - Branch, exception generation and system instructions + * - 1 - 0 Loads and stores + * - 1 0 1 Data processing - register + * 0 1 1 1 Data processing - SIMD and floating point + * 1 1 1 1 Data processing - SIMD and floating point + * "-" means "don't care" + */ +enum aarch64_insn_encoding_class { + AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */ + AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */ + AARCH64_INSN_CLS_DP_REG, /* Data processing - register */ + AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */ + AARCH64_INSN_CLS_LDST, /* Loads and stores */ + AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and + * system instructions */ +}; + +enum aarch64_insn_hint_op { + AARCH64_INSN_HINT_NOP = 0x0 << 5, + AARCH64_INSN_HINT_YIELD = 0x1 << 5, + AARCH64_INSN_HINT_WFE = 0x2 << 5, + AARCH64_INSN_HINT_WFI = 0x3 << 5, + AARCH64_INSN_HINT_SEV = 0x4 << 5, + AARCH64_INSN_HINT_SEVL = 0x5 << 5, +}; + +enum aarch64_insn_imm_type { + AARCH64_INSN_IMM_ADR, + AARCH64_INSN_IMM_26, + AARCH64_INSN_IMM_19, + AARCH64_INSN_IMM_16, + AARCH64_INSN_IMM_14, + AARCH64_INSN_IMM_12, + AARCH64_INSN_IMM_9, + AARCH64_INSN_IMM_MAX +}; + +enum aarch64_insn_branch_type { + AARCH64_INSN_BRANCH_NOLINK, + AARCH64_INSN_BRANCH_LINK, +}; + +#define __AARCH64_INSN_FUNCS(abbr, mask, val) \ +static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ +{ return (code & (mask)) == (val); } \ +static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ +{ return (val); } + +__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) +__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) +__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) +__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) +__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) +__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) +__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) + +#undef __AARCH64_INSN_FUNCS + +bool aarch64_insn_is_nop(u32 insn); + +int aarch64_insn_read(void *addr, u32 *insnp); +int aarch64_insn_write(void *addr, u32 insn); +enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); +u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, + u32 insn, u64 imm); +u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); +u32 aarch64_insn_gen_nop(void); + +bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); + +int aarch64_insn_patch_text_nosync(void *addr, u32 insn); +int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); +int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); + +#endif /* __ASM_INSN_H */ diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 57276972722768..4cc813eddacbeb 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot extern void __iounmap(volatile void __iomem *addr); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); -#define PROT_DEFAULT (pgprot_default | PTE_DIRTY) +#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h new file mode 100644 index 00000000000000..076a1c714049ff --- /dev/null +++ b/arch/arm64/include/asm/jump_label.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu + * + * Based on arch/arm/include/asm/jump_label.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_JUMP_LABEL_H +#define __ASM_JUMP_LABEL_H +#include +#include + +#ifdef __KERNEL__ + +#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE + +static __always_inline bool arch_static_branch(struct static_key *key) +{ + asm goto("1: nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i"(key) : : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __KERNEL__ */ + +typedef u64 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 37762175896fe6..9dc5dc39fded63 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -146,8 +146,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ - ((void *)(kaddr) < (void *)high_memory)) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #endif diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h new file mode 100644 index 00000000000000..13fb0b3efc5f72 --- /dev/null +++ b/arch/arm64/include/asm/percpu.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_PERCPU_H +#define __ASM_PERCPU_H + +static inline void set_my_cpu_offset(unsigned long off) +{ + asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); +} + +static inline unsigned long __my_cpu_offset(void) +{ + unsigned long off; + register unsigned long *sp asm ("sp"); + + /* + * We want to allow caching the value, so avoid using volatile and + * instead use a fake stack read to hazard against barrier(). + */ + asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp)); + + return off; +} +#define __my_cpu_offset __my_cpu_offset() + +#include + +#endif /* __ASM_PERCPU_H */ diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 7cdf466fd0c5c8..0c657bb54597e4 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -26,11 +26,14 @@ #include struct mm_struct; +struct cpu_suspend_ctx; extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); +extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); +extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index ed43a0d2b1b2c1..59e282311b582b 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -21,6 +21,19 @@ #include +struct mpidr_hash { + u64 mask; + u32 shift_aff[4]; + u32 bits; +}; + +extern struct mpidr_hash mpidr_hash; + +static inline u32 mpidr_hash_size(void) +{ + return 1 << mpidr_hash.bits; +} + /* * Logical CPU mapping. */ diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h new file mode 100644 index 00000000000000..e9c149c042e097 --- /dev/null +++ b/arch/arm64/include/asm/suspend.h @@ -0,0 +1,27 @@ +#ifndef __ASM_SUSPEND_H +#define __ASM_SUSPEND_H + +#define NR_CTX_REGS 11 + +/* + * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on + * the stack, which must be 16-byte aligned on v8 + */ +struct cpu_suspend_ctx { + /* + * This struct must be kept in sync with + * cpu_do_{suspend/resume} in mm/proc.S + */ + u64 ctx_regs[NR_CTX_REGS]; + u64 sp; +} __aligned(16); + +struct sleep_save_sp { + phys_addr_t *save_ptr_stash; + phys_addr_t save_ptr_stash_phys; +}; + +extern void cpu_resume(void); +extern int cpu_suspend(unsigned long); + +#endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 7ecc2b23882e44..6c0f684aca81ce 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -100,6 +100,7 @@ static inline void set_fs(mm_segment_t fs) }) #define access_ok(type, addr, size) __range_ok(addr, size) +#define user_addr_max get_fs /* * The "__xxx" versions of the user access functions do not verify the address @@ -240,9 +241,6 @@ extern unsigned long __must_check __copy_to_user(void __user *to, const void *fr extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); -extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); -extern unsigned long __must_check __strnlen_user(const char __user *s, long n); - static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) @@ -276,24 +274,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo return n; } -static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res = -EFAULT; - if (access_ok(VERIFY_READ, src, 1)) - res = __strncpy_from_user(dst, src, count); - return res; -} - -#define strlen_user(s) strnlen_user(s, ~0UL >> 1) +extern long strncpy_from_user(char *dest, const char __user *src, long count); -static inline long __must_check strnlen_user(const char __user *s, long n) -{ - unsigned long res = 0; - - if (__addr_ok(s)) - res = __strnlen_user(s, n); - - return res; -} +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); #endif /* __ASM_UACCESS_H */ diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h new file mode 100644 index 00000000000000..aab5bf09e9d902 --- /dev/null +++ b/arch/arm64/include/asm/word-at-a-time.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_WORD_AT_A_TIME_H +#define __ASM_WORD_AT_A_TIME_H + +#ifndef __AARCH64EB__ + +#include + +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, + const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +#define prep_zero_mask(a, bits, c) (bits) + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ + return fls64(mask) >> 3; +} + +#define zero_bytemask(mask) (mask) + +#else /* __AARCH64EB__ */ +#include +#endif + +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret, offset; + + /* Load word from unaligned pointer addr */ + asm( + "1: ldr %0, %3\n" + "2:\n" + " .pushsection .fixup,\"ax\"\n" + " .align 2\n" + "3: and %1, %2, #0x7\n" + " bic %2, %2, #0x7\n" + " ldr %0, [%2]\n" + " lsl %1, %1, #0x3\n" +#ifndef __AARCH64EB__ + " lsr %0, %0, %1\n" +#else + " lsl %0, %0, %1\n" +#endif + " b 2b\n" + " .popsection\n" + " .pushsection __ex_table,\"a\"\n" + " .align 3\n" + " .quad 1b, 3b\n" + " .popsection" + : "=&r" (ret), "=&r" (offset) + : "r" (addr), "Q" (*(unsigned long *)addr)); + + return ret; +} + +#endif /* __ASM_WORD_AT_A_TIME_H */ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 9b12476e9c8567..73cf0f54d57cc2 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -22,6 +22,10 @@ #define HWCAP_FP (1 << 0) #define HWCAP_ASIMD (1 << 1) #define HWCAP_EVTSTRM (1 << 2) - +#define HWCAP_AES (1 << 3) +#define HWCAP_PMULL (1 << 4) +#define HWCAP_SHA1 (1 << 5) +#define HWCAP_SHA2 (1 << 6) +#define HWCAP_CRC32 (1 << 7) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 5ba2fd43a75bd2..2d4554b134100a 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -9,7 +9,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o + hyp-stub.o psci.o cpu_ops.o insn.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o @@ -18,6 +18,8 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o +arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index e7ee770c06978d..338b568cd8ae4a 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -29,13 +29,10 @@ #include - /* user mem (segment) */ -EXPORT_SYMBOL(__strnlen_user); -EXPORT_SYMBOL(__strncpy_from_user); - EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); + /* user mem (segment) */ EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__clear_user); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 666e231d410b44..646f888387cd10 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include @@ -137,6 +139,15 @@ int main(void) DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); +#endif +#ifdef CONFIG_ARM64_CPU_SUSPEND + DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); + DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); + DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); + DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); + DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); + DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); + DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif return 0; } diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 4ae68579031db9..636ba8b6240b80 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -187,6 +187,48 @@ static void clear_regs_spsr_ss(struct pt_regs *regs) regs->pstate = spsr; } +/* EL1 Single Step Handler hooks */ +static LIST_HEAD(step_hook); +DEFINE_RWLOCK(step_hook_lock); + +void register_step_hook(struct step_hook *hook) +{ + write_lock(&step_hook_lock); + list_add(&hook->node, &step_hook); + write_unlock(&step_hook_lock); +} + +void unregister_step_hook(struct step_hook *hook) +{ + write_lock(&step_hook_lock); + list_del(&hook->node); + write_unlock(&step_hook_lock); +} + +/* + * Call registered single step handers + * There is no Syndrome info to check for determining the handler. + * So we call all the registered handlers, until the right handler is + * found which returns zero. + */ +static int call_step_hook(struct pt_regs *regs, unsigned int esr) +{ + struct step_hook *hook; + int retval = DBG_HOOK_ERROR; + + read_lock(&step_hook_lock); + + list_for_each_entry(hook, &step_hook, node) { + retval = hook->fn(regs, esr); + if (retval == DBG_HOOK_HANDLED) + break; + } + + read_unlock(&step_hook_lock); + + return retval; +} + static int single_step_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -214,7 +256,9 @@ static int single_step_handler(unsigned long addr, unsigned int esr, */ user_rewind_single_step(current); } else { - /* TODO: route to KGDB */ + if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED) + return 0; + pr_warning("Unexpected kernel single-step exception at EL1\n"); /* * Re-enable stepping since we know that we will be @@ -226,11 +270,53 @@ static int single_step_handler(unsigned long addr, unsigned int esr, return 0; } +/* + * Breakpoint handler is re-entrant as another breakpoint can + * hit within breakpoint handler, especically in kprobes. + * Use reader/writer locks instead of plain spinlock. + */ +static LIST_HEAD(break_hook); +DEFINE_RWLOCK(break_hook_lock); + +void register_break_hook(struct break_hook *hook) +{ + write_lock(&break_hook_lock); + list_add(&hook->node, &break_hook); + write_unlock(&break_hook_lock); +} + +void unregister_break_hook(struct break_hook *hook) +{ + write_lock(&break_hook_lock); + list_del(&hook->node); + write_unlock(&break_hook_lock); +} + +static int call_break_hook(struct pt_regs *regs, unsigned int esr) +{ + struct break_hook *hook; + int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; + + read_lock(&break_hook_lock); + list_for_each_entry(hook, &break_hook, node) + if ((esr & hook->esr_mask) == hook->esr_val) + fn = hook->fn; + read_unlock(&break_hook_lock); + + return fn ? fn(regs, esr) : DBG_HOOK_ERROR; +} + static int brk_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { siginfo_t info; + if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) + return 0; + + pr_warn("unexpected brk exception at %lx, esr=0x%x\n", + (long)instruction_pointer(regs), esr); + if (!user_mode(regs)) return -EFAULT; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 4d2c6f3f0c4186..39ac630d83de77 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -288,6 +288,8 @@ el1_dbg: /* * Debug exception handling */ + cmp x24, #ESR_EL1_EC_BRK64 // if BRK64 + cinc x24, x24, eq // set bit '0' tbz x24, #0, el1_inv // EL1 only mrs x0, far_el1 mov x2, sp // struct pt_regs @@ -314,7 +316,7 @@ el1_irq: #ifdef CONFIG_PREEMPT get_thread_info tsk - ldr w24, [tsk, #TI_PREEMPT] // restore preempt count + ldr w24, [tsk, #TI_PREEMPT] // get preempt count cbnz w24, 1f // preempt count != 0 ldr x0, [tsk, #TI_FLAGS] // get flags tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index bb785d23dbde87..4aef42a04bdcc3 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -17,6 +17,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -113,6 +114,39 @@ EXPORT_SYMBOL(kernel_neon_end); #endif /* CONFIG_KERNEL_MODE_NEON */ +#ifdef CONFIG_CPU_PM +static int fpsimd_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + switch (cmd) { + case CPU_PM_ENTER: + if (current->mm) + fpsimd_save_state(¤t->thread.fpsimd_state); + break; + case CPU_PM_EXIT: + if (current->mm) + fpsimd_load_state(¤t->thread.fpsimd_state); + break; + case CPU_PM_ENTER_FAILED: + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static struct notifier_block fpsimd_cpu_pm_notifier_block = { + .notifier_call = fpsimd_cpu_pm_notifier, +}; + +static void fpsimd_pm_init(void) +{ + cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block); +} + +#else +static inline void fpsimd_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + /* * FP/SIMD support code initialisation. */ @@ -131,6 +165,8 @@ static int __init fpsimd_init(void) else elf_hwcap |= HWCAP_ASIMD; + fpsimd_pm_init(); + return 0; } late_initcall(fpsimd_init); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c68cca5c352322..0b281fffda5199 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -482,8 +482,6 @@ ENDPROC(__create_page_tables) .type __switch_data, %object __switch_data: .quad __mmap_switched - .quad __data_loc // x4 - .quad _data // x5 .quad __bss_start // x6 .quad _end // x7 .quad processor_id // x4 @@ -498,15 +496,7 @@ __switch_data: __mmap_switched: adr x3, __switch_data + 8 - ldp x4, x5, [x3], #16 ldp x6, x7, [x3], #16 - cmp x4, x5 // Copy data segment if needed -1: ccmp x5, x6, #4, ne - b.eq 2f - ldr x16, [x4], #8 - str x16, [x5], #8 - b 1b -2: 1: cmp x6, x7 b.hs 2f str xzr, [x6], #8 // Clear BSS diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index ff516f6691e48b..f17f581116fc15 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -20,6 +20,7 @@ #define pr_fmt(fmt) "hw-breakpoint: " fmt +#include #include #include #include @@ -169,15 +170,68 @@ static enum debug_el debug_exception_level(int privilege) } } -/* - * Install a perf counter breakpoint. +enum hw_breakpoint_ops { + HW_BREAKPOINT_INSTALL, + HW_BREAKPOINT_UNINSTALL, + HW_BREAKPOINT_RESTORE +}; + +/** + * hw_breakpoint_slot_setup - Find and setup a perf slot according to + * operations + * + * @slots: pointer to array of slots + * @max_slots: max number of slots + * @bp: perf_event to setup + * @ops: operation to be carried out on the slot + * + * Return: + * slot index on success + * -ENOSPC if no slot is available/matches + * -EINVAL on wrong operations parameter */ -int arch_install_hw_breakpoint(struct perf_event *bp) +static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots, + struct perf_event *bp, + enum hw_breakpoint_ops ops) +{ + int i; + struct perf_event **slot; + + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + switch (ops) { + case HW_BREAKPOINT_INSTALL: + if (!*slot) { + *slot = bp; + return i; + } + break; + case HW_BREAKPOINT_UNINSTALL: + if (*slot == bp) { + *slot = NULL; + return i; + } + break; + case HW_BREAKPOINT_RESTORE: + if (*slot == bp) + return i; + break; + default: + pr_warn_once("Unhandled hw breakpoint ops %d\n", ops); + return -EINVAL; + } + } + return -ENOSPC; +} + +static int hw_breakpoint_control(struct perf_event *bp, + enum hw_breakpoint_ops ops) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); - struct perf_event **slot, **slots; + struct perf_event **slots; struct debug_info *debug_info = ¤t->thread.debug; int i, max_slots, ctrl_reg, val_reg, reg_enable; + enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege); u32 ctrl; if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { @@ -196,67 +250,54 @@ int arch_install_hw_breakpoint(struct perf_event *bp) reg_enable = !debug_info->wps_disabled; } - for (i = 0; i < max_slots; ++i) { - slot = &slots[i]; - - if (!*slot) { - *slot = bp; - break; - } - } - - if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) - return -ENOSPC; + i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops); - /* Ensure debug monitors are enabled at the correct exception level. */ - enable_debug_monitors(debug_exception_level(info->ctrl.privilege)); + if (WARN_ONCE(i < 0, "Can't find any breakpoint slot")) + return i; - /* Setup the address register. */ - write_wb_reg(val_reg, i, info->address); + switch (ops) { + case HW_BREAKPOINT_INSTALL: + /* + * Ensure debug monitors are enabled at the correct exception + * level. + */ + enable_debug_monitors(dbg_el); + /* Fall through */ + case HW_BREAKPOINT_RESTORE: + /* Setup the address register. */ + write_wb_reg(val_reg, i, info->address); + + /* Setup the control register. */ + ctrl = encode_ctrl_reg(info->ctrl); + write_wb_reg(ctrl_reg, i, + reg_enable ? ctrl | 0x1 : ctrl & ~0x1); + break; + case HW_BREAKPOINT_UNINSTALL: + /* Reset the control register. */ + write_wb_reg(ctrl_reg, i, 0); - /* Setup the control register. */ - ctrl = encode_ctrl_reg(info->ctrl); - write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1); + /* + * Release the debug monitors for the correct exception + * level. + */ + disable_debug_monitors(dbg_el); + break; + } return 0; } -void arch_uninstall_hw_breakpoint(struct perf_event *bp) +/* + * Install a perf counter breakpoint. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - struct perf_event **slot, **slots; - int i, max_slots, base; - - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { - /* Breakpoint */ - base = AARCH64_DBG_REG_BCR; - slots = this_cpu_ptr(bp_on_reg); - max_slots = core_num_brps; - } else { - /* Watchpoint */ - base = AARCH64_DBG_REG_WCR; - slots = this_cpu_ptr(wp_on_reg); - max_slots = core_num_wrps; - } - - /* Remove the breakpoint. */ - for (i = 0; i < max_slots; ++i) { - slot = &slots[i]; - - if (*slot == bp) { - *slot = NULL; - break; - } - } - - if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) - return; - - /* Reset the control register. */ - write_wb_reg(base, i, 0); + return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL); +} - /* Release the debug monitors for the correct exception level. */ - disable_debug_monitors(debug_exception_level(info->ctrl.privilege)); +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL); } static int get_hbp_len(u8 hbp_len) @@ -806,18 +847,36 @@ void hw_breakpoint_thread_switch(struct task_struct *next) /* * CPU initialisation. */ -static void reset_ctrl_regs(void *unused) +static void hw_breakpoint_reset(void *unused) { int i; - - for (i = 0; i < core_num_brps; ++i) { - write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); - write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); + struct perf_event **slots; + /* + * When a CPU goes through cold-boot, it does not have any installed + * slot, so it is safe to share the same function for restoring and + * resetting breakpoints; when a CPU is hotplugged in, it goes + * through the slots, which are all empty, hence it just resets control + * and value for debug registers. + * When this function is triggered on warm-boot through a CPU PM + * notifier some slots might be initialized; if so they are + * reprogrammed according to the debug slots content. + */ + for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) { + if (slots[i]) { + hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); + } else { + write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); + write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); + } } - for (i = 0; i < core_num_wrps; ++i) { - write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); - write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); + for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) { + if (slots[i]) { + hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); + } else { + write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); + write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); + } } } @@ -827,7 +886,7 @@ static int hw_breakpoint_reset_notify(struct notifier_block *self, { int cpu = (long)hcpu; if (action == CPU_ONLINE) - smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1); + smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1); return NOTIFY_OK; } @@ -835,6 +894,14 @@ static struct notifier_block hw_breakpoint_reset_nb = { .notifier_call = hw_breakpoint_reset_notify, }; +#ifdef CONFIG_ARM64_CPU_SUSPEND +extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)); +#else +static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) +{ +} +#endif + /* * One-time initialisation. */ @@ -850,8 +917,8 @@ static int __init arch_hw_breakpoint_init(void) * Reset the breakpoint resources. We assume that a halting * debugger will leave the world in a nice state for us. */ - smp_call_function(reset_ctrl_regs, NULL, 1); - reset_ctrl_regs(NULL); + smp_call_function(hw_breakpoint_reset, NULL, 1); + hw_breakpoint_reset(NULL); /* Register debug fault handlers. */ hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, @@ -861,6 +928,8 @@ static int __init arch_hw_breakpoint_init(void) /* Register hotplug notifier. */ register_cpu_notifier(&hw_breakpoint_reset_nb); + /* Register cpu_suspend hw breakpoint restore hook */ + cpu_suspend_set_dbg_restorer(hw_breakpoint_reset); return 0; } diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c new file mode 100644 index 00000000000000..92f36835486b36 --- /dev/null +++ b/arch/arm64/kernel/insn.c @@ -0,0 +1,304 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static int aarch64_insn_encoding_class[] = { + AARCH64_INSN_CLS_UNKNOWN, + AARCH64_INSN_CLS_UNKNOWN, + AARCH64_INSN_CLS_UNKNOWN, + AARCH64_INSN_CLS_UNKNOWN, + AARCH64_INSN_CLS_LDST, + AARCH64_INSN_CLS_DP_REG, + AARCH64_INSN_CLS_LDST, + AARCH64_INSN_CLS_DP_FPSIMD, + AARCH64_INSN_CLS_DP_IMM, + AARCH64_INSN_CLS_DP_IMM, + AARCH64_INSN_CLS_BR_SYS, + AARCH64_INSN_CLS_BR_SYS, + AARCH64_INSN_CLS_LDST, + AARCH64_INSN_CLS_DP_REG, + AARCH64_INSN_CLS_LDST, + AARCH64_INSN_CLS_DP_FPSIMD, +}; + +enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn) +{ + return aarch64_insn_encoding_class[(insn >> 25) & 0xf]; +} + +/* NOP is an alias of HINT */ +bool __kprobes aarch64_insn_is_nop(u32 insn) +{ + if (!aarch64_insn_is_hint(insn)) + return false; + + switch (insn & 0xFE0) { + case AARCH64_INSN_HINT_YIELD: + case AARCH64_INSN_HINT_WFE: + case AARCH64_INSN_HINT_WFI: + case AARCH64_INSN_HINT_SEV: + case AARCH64_INSN_HINT_SEVL: + return false; + default: + return true; + } +} + +/* + * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always + * little-endian. + */ +int __kprobes aarch64_insn_read(void *addr, u32 *insnp) +{ + int ret; + u32 val; + + ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +int __kprobes aarch64_insn_write(void *addr, u32 insn) +{ + insn = cpu_to_le32(insn); + return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE); +} + +static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) +{ + if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS) + return false; + + return aarch64_insn_is_b(insn) || + aarch64_insn_is_bl(insn) || + aarch64_insn_is_svc(insn) || + aarch64_insn_is_hvc(insn) || + aarch64_insn_is_smc(insn) || + aarch64_insn_is_brk(insn) || + aarch64_insn_is_nop(insn); +} + +/* + * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a + * Section B2.6.5 "Concurrent modification and execution of instructions": + * Concurrent modification and execution of instructions can lead to the + * resulting instruction performing any behavior that can be achieved by + * executing any sequence of instructions that can be executed from the + * same Exception level, except where the instruction before modification + * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC, + * or SMC instruction. + */ +bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn) +{ + return __aarch64_insn_hotpatch_safe(old_insn) && + __aarch64_insn_hotpatch_safe(new_insn); +} + +int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) +{ + u32 *tp = addr; + int ret; + + /* A64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + + ret = aarch64_insn_write(tp, insn); + if (ret == 0) + flush_icache_range((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); + + return ret; +} + +struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; +}; + +static int __kprobes aarch64_insn_patch_text_cb(void *arg) +{ + int i, ret = 0; + struct aarch64_insn_patch *pp = arg; + + /* The first CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == 1) { + for (i = 0; ret == 0 && i < pp->insn_cnt; i++) + ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], + pp->new_insns[i]); + /* + * aarch64_insn_patch_text_nosync() calls flush_icache_range(), + * which ends with "dsb; isb" pair guaranteeing global + * visibility. + */ + atomic_set(&pp->cpu_count, -1); + } else { + while (atomic_read(&pp->cpu_count) != -1) + cpu_relax(); + isb(); + } + + return ret; +} + +int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) +{ + struct aarch64_insn_patch patch = { + .text_addrs = addrs, + .new_insns = insns, + .insn_cnt = cnt, + .cpu_count = ATOMIC_INIT(0), + }; + + if (cnt <= 0) + return -EINVAL; + + return stop_machine(aarch64_insn_patch_text_cb, &patch, + cpu_online_mask); +} + +int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) +{ + int ret; + u32 insn; + + /* Unsafe to patch multiple instructions without synchronizaiton */ + if (cnt == 1) { + ret = aarch64_insn_read(addrs[0], &insn); + if (ret) + return ret; + + if (aarch64_insn_hotpatch_safe(insn, insns[0])) { + /* + * ARMv8 architecture doesn't guarantee all CPUs see + * the new instruction after returning from function + * aarch64_insn_patch_text_nosync(). So send IPIs to + * all other CPUs to achieve instruction + * synchronization. + */ + ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]); + kick_all_cpus_sync(); + return ret; + } + } + + return aarch64_insn_patch_text_sync(addrs, insns, cnt); +} + +u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, + u32 insn, u64 imm) +{ + u32 immlo, immhi, lomask, himask, mask; + int shift; + + switch (type) { + case AARCH64_INSN_IMM_ADR: + lomask = 0x3; + himask = 0x7ffff; + immlo = imm & lomask; + imm >>= 2; + immhi = imm & himask; + imm = (immlo << 24) | (immhi); + mask = (lomask << 24) | (himask); + shift = 5; + break; + case AARCH64_INSN_IMM_26: + mask = BIT(26) - 1; + shift = 0; + break; + case AARCH64_INSN_IMM_19: + mask = BIT(19) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_16: + mask = BIT(16) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_14: + mask = BIT(14) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_12: + mask = BIT(12) - 1; + shift = 10; + break; + case AARCH64_INSN_IMM_9: + mask = BIT(9) - 1; + shift = 12; + break; + default: + pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", + type); + return 0; + } + + /* Update the immediate field. */ + insn &= ~(mask << shift); + insn |= (imm & mask) << shift; + + return insn; +} + +u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_branch_type type) +{ + u32 insn; + long offset; + + /* + * PC: A 64-bit Program Counter holding the address of the current + * instruction. A64 instructions must be word-aligned. + */ + BUG_ON((pc & 0x3) || (addr & 0x3)); + + /* + * B/BL support [-128M, 128M) offset + * ARM64 virtual address arrangement guarantees all kernel and module + * texts are within +/-128M. + */ + offset = ((long)addr - (long)pc); + BUG_ON(offset < -SZ_128M || offset >= SZ_128M); + + if (type == AARCH64_INSN_BRANCH_LINK) + insn = aarch64_insn_get_bl_value(); + else + insn = aarch64_insn_get_b_value(); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, + offset >> 2); +} + +u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) +{ + return aarch64_insn_get_hint_value() | op; +} + +u32 __kprobes aarch64_insn_gen_nop(void) +{ + return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); +} diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c new file mode 100644 index 00000000000000..263a166291fbd9 --- /dev/null +++ b/arch/arm64/kernel/jump_label.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu + * + * Based on arch/arm/kernel/jump_label.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include + +#ifdef HAVE_JUMP_LABEL + +static void __arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type, + bool is_static) +{ + void *addr = (void *)entry->code; + u32 insn; + + if (type == JUMP_LABEL_ENABLE) { + insn = aarch64_insn_gen_branch_imm(entry->code, + entry->target, + AARCH64_INSN_BRANCH_NOLINK); + } else { + insn = aarch64_insn_gen_nop(); + } + + if (is_static) + aarch64_insn_patch_text_nosync(addr, insn); + else + aarch64_insn_patch_text(&addr, &insn, 1); +} + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + __arch_jump_label_transform(entry, type, false); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + __arch_jump_label_transform(entry, type, true); +} + +#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index e2ad0d87721f74..1eb1cc95513953 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,6 +25,10 @@ #include #include #include +#include + +#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX +#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 void *module_alloc(unsigned long size) { @@ -94,28 +98,18 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) return 0; } -enum aarch64_imm_type { - INSN_IMM_MOVNZ, - INSN_IMM_MOVK, - INSN_IMM_ADR, - INSN_IMM_26, - INSN_IMM_19, - INSN_IMM_16, - INSN_IMM_14, - INSN_IMM_12, - INSN_IMM_9, -}; - -static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) +static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, + int lsb, enum aarch64_insn_imm_type imm_type) { - u32 immlo, immhi, lomask, himask, mask; - int shift; + u64 imm, limit = 0; + s64 sval; + u32 insn = le32_to_cpu(*(u32 *)place); - /* The instruction stream is always little endian. */ - insn = le32_to_cpu(insn); + sval = do_reloc(op, place, val); + sval >>= lsb; + imm = sval & 0xffff; - switch (type) { - case INSN_IMM_MOVNZ: + if (imm_type == AARCH64_INSN_IMM_MOVNZ) { /* * For signed MOVW relocations, we have to manipulate the * instruction encoding depending on whether or not the @@ -134,70 +128,12 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) */ imm = ~imm; } - case INSN_IMM_MOVK: - mask = BIT(16) - 1; - shift = 5; - break; - case INSN_IMM_ADR: - lomask = 0x3; - himask = 0x7ffff; - immlo = imm & lomask; - imm >>= 2; - immhi = imm & himask; - imm = (immlo << 24) | (immhi); - mask = (lomask << 24) | (himask); - shift = 5; - break; - case INSN_IMM_26: - mask = BIT(26) - 1; - shift = 0; - break; - case INSN_IMM_19: - mask = BIT(19) - 1; - shift = 5; - break; - case INSN_IMM_16: - mask = BIT(16) - 1; - shift = 5; - break; - case INSN_IMM_14: - mask = BIT(14) - 1; - shift = 5; - break; - case INSN_IMM_12: - mask = BIT(12) - 1; - shift = 10; - break; - case INSN_IMM_9: - mask = BIT(9) - 1; - shift = 12; - break; - default: - pr_err("encode_insn_immediate: unknown immediate encoding %d\n", - type); - return 0; + imm_type = AARCH64_INSN_IMM_MOVK; } - /* Update the immediate field. */ - insn &= ~(mask << shift); - insn |= (imm & mask) << shift; - - return cpu_to_le32(insn); -} - -static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, - int lsb, enum aarch64_imm_type imm_type) -{ - u64 imm, limit = 0; - s64 sval; - u32 insn = *(u32 *)place; - - sval = do_reloc(op, place, val); - sval >>= lsb; - imm = sval & 0xffff; - /* Update the instruction with the new encoding. */ - *(u32 *)place = encode_insn_immediate(imm_type, insn, imm); + insn = aarch64_insn_encode_immediate(imm_type, insn, imm); + *(u32 *)place = cpu_to_le32(insn); /* Shift out the immediate field. */ sval >>= 16; @@ -206,9 +142,9 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, * For unsigned immediates, the overflow check is straightforward. * For signed immediates, the sign bit is actually the bit past the * most significant bit of the field. - * The INSN_IMM_16 immediate type is unsigned. + * The AARCH64_INSN_IMM_16 immediate type is unsigned. */ - if (imm_type != INSN_IMM_16) { + if (imm_type != AARCH64_INSN_IMM_16) { sval++; limit++; } @@ -221,11 +157,11 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, } static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, - int lsb, int len, enum aarch64_imm_type imm_type) + int lsb, int len, enum aarch64_insn_imm_type imm_type) { u64 imm, imm_mask; s64 sval; - u32 insn = *(u32 *)place; + u32 insn = le32_to_cpu(*(u32 *)place); /* Calculate the relocation value. */ sval = do_reloc(op, place, val); @@ -236,7 +172,8 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, imm = sval & imm_mask; /* Update the instruction's immediate field. */ - *(u32 *)place = encode_insn_immediate(imm_type, insn, imm); + insn = aarch64_insn_encode_immediate(imm_type, insn, imm); + *(u32 *)place = cpu_to_le32(insn); /* * Extract the upper value bits (including the sign bit) and @@ -318,125 +255,125 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, overflow_check = false; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, - INSN_IMM_16); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, - INSN_IMM_16); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, - INSN_IMM_16); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, - INSN_IMM_16); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_SABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_SABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_SABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G0_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, - INSN_IMM_MOVK); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G0: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G1_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, - INSN_IMM_MOVK); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G1: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G2_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, - INSN_IMM_MOVK); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G2: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; /* Immediate instruction relocations. */ case R_AARCH64_LD_PREL_LO19: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, - INSN_IMM_19); + AARCH64_INSN_IMM_19); break; case R_AARCH64_ADR_PREL_LO21: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, - INSN_IMM_ADR); + AARCH64_INSN_IMM_ADR); break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, - INSN_IMM_ADR); + AARCH64_INSN_IMM_ADR); break; case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST16_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST32_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST64_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST128_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_TSTBR14: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, - INSN_IMM_14); + AARCH64_INSN_IMM_14); break; case R_AARCH64_CONDBR19: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, - INSN_IMM_19); + AARCH64_INSN_IMM_19); break; case R_AARCH64_JUMP26: case R_AARCH64_CALL26: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, - INSN_IMM_26); + AARCH64_INSN_IMM_26); break; default: diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 0e63c98d224c57..5b1cd792274a34 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -362,27 +363,54 @@ validate_group(struct perf_event *event) return 0; } +static void +armpmu_disable_percpu_irq(void *data) +{ + unsigned int irq = *(unsigned int *)data; + disable_percpu_irq(irq); +} + static void armpmu_release_hardware(struct arm_pmu *armpmu) { - int i, irq, irqs; + int irq; + unsigned int i, irqs; struct platform_device *pmu_device = armpmu->plat_device; irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (!irqs) + return; - for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq >= 0) - free_irq(irq, armpmu); + irq = platform_get_irq(pmu_device, 0); + if (irq <= 0) + return; + + if (irq_is_percpu(irq)) { + on_each_cpu(armpmu_disable_percpu_irq, &irq, 1); + free_percpu_irq(irq, &cpu_hw_events); + } else { + for (i = 0; i < irqs; ++i) { + if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) + continue; + irq = platform_get_irq(pmu_device, i); + if (irq > 0) + free_irq(irq, armpmu); + } } } +static void +armpmu_enable_percpu_irq(void *data) +{ + unsigned int irq = *(unsigned int *)data; + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + static int armpmu_reserve_hardware(struct arm_pmu *armpmu) { - int i, err, irq, irqs; + int err, irq; + unsigned int i, irqs; struct platform_device *pmu_device = armpmu->plat_device; if (!pmu_device) { @@ -391,39 +419,59 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) } irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (irqs < 1) { + if (!irqs) { pr_err("no irqs for PMUs defined\n"); return -ENODEV; } - for (i = 0; i < irqs; ++i) { - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; + irq = platform_get_irq(pmu_device, 0); + if (irq <= 0) { + pr_err("failed to get valid irq for PMU device\n"); + return -ENODEV; + } - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); - continue; - } + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, armpmu->handle_irq, + "arm-pmu", &cpu_hw_events); - err = request_irq(irq, armpmu->handle_irq, - IRQF_NOBALANCING, - "arm-pmu", armpmu); if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); + pr_err("unable to request percpu IRQ%d for ARM PMU counters\n", + irq); armpmu_release_hardware(armpmu); return err; } - cpumask_set_cpu(i, &armpmu->active_irqs); + on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); + } else { + for (i = 0; i < irqs; ++i) { + err = 0; + irq = platform_get_irq(pmu_device, i); + if (irq <= 0) + continue; + + /* + * If we have a single PMU interrupt that we can't shift, + * assume that we're running on a uniprocessor machine and + * continue. Otherwise, continue without this interrupt. + */ + if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); + continue; + } + + err = request_irq(irq, armpmu->handle_irq, + IRQF_NOBALANCING, + "arm-pmu", armpmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + armpmu_release_hardware(armpmu); + return err; + } + + cpumask_set_cpu(i, &armpmu->active_irqs); + } } return 0; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index de17c89985dbe6..248a15db37f2c0 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -98,8 +99,10 @@ void arch_cpu_idle(void) * This should do all the clock switching and wait for interrupt * tricks */ - cpu_do_idle(); - local_irq_enable(); + if (cpuidle_idle_call()) { + cpu_do_idle(); + local_irq_enable(); + } } #ifdef CONFIG_HOTPLUG_CPU @@ -308,6 +311,7 @@ struct task_struct *__switch_to(struct task_struct *prev, unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; + unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; @@ -315,9 +319,11 @@ unsigned long get_wchan(struct task_struct *p) frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); frame.pc = thread_saved_pc(p); + stack_page = (unsigned long)task_stack_page(p); do { - int ret = unwind_frame(&frame); - if (ret < 0) + if (frame.sp < stack_page || + frame.sp >= stack_page + THREAD_SIZE || + unwind_frame(&frame)) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index bd9bbd0e44edf1..c8e9effe52e10f 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -108,20 +108,95 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +void __init smp_setup_processor_id(void) +{ + /* + * clear __my_cpu_offset on boot CPU to avoid hang caused by + * using percpu variable early, for example, lockdep will + * access percpu variable inside lock_release + */ + set_my_cpu_offset(0); +} + bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { return phys_id == cpu_logical_map(cpu); } +struct mpidr_hash mpidr_hash; +#ifdef CONFIG_SMP +/** + * smp_build_mpidr_hash - Pre-compute shifts required at each affinity + * level in order to build a linear index from an + * MPIDR value. Resulting algorithm is a collision + * free hash carried out through shifting and ORing + */ +static void __init smp_build_mpidr_hash(void) +{ + u32 i, affinity, fs[4], bits[4], ls; + u64 mask = 0; + /* + * Pre-scan the list of MPIDRS and filter out bits that do + * not contribute to affinity levels, ie they never toggle. + */ + for_each_possible_cpu(i) + mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); + pr_debug("mask of set bits %#llx\n", mask); + /* + * Find and stash the last and first bit set at all affinity levels to + * check how many bits are required to represent them. + */ + for (i = 0; i < 4; i++) { + affinity = MPIDR_AFFINITY_LEVEL(mask, i); + /* + * Find the MSB bit and LSB bits position + * to determine how many bits are required + * to express the affinity level. + */ + ls = fls(affinity); + fs[i] = affinity ? ffs(affinity) - 1 : 0; + bits[i] = ls - fs[i]; + } + /* + * An index can be created from the MPIDR_EL1 by isolating the + * significant bits at each affinity level and by shifting + * them in order to compress the 32 bits values space to a + * compressed set of values. This is equivalent to hashing + * the MPIDR_EL1 through shifting and ORing. It is a collision free + * hash though not minimal since some levels might contain a number + * of CPUs that is not an exact power of 2 and their bit + * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. + */ + mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; + mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; + mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - + (bits[1] + bits[0]); + mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + + fs[3] - (bits[2] + bits[1] + bits[0]); + mpidr_hash.mask = mask; + mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; + pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", + mpidr_hash.shift_aff[0], + mpidr_hash.shift_aff[1], + mpidr_hash.shift_aff[2], + mpidr_hash.shift_aff[3], + mpidr_hash.mask, + mpidr_hash.bits); + /* + * 4x is an arbitrary value used to warn on a hash table much bigger + * than expected on most systems. + */ + if (mpidr_hash_size() > 4 * num_possible_cpus()) + pr_warn("Large number of MPIDR hash buckets detected\n"); + __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); +} +#endif + static void __init setup_processor(void) { struct cpu_info *cpu_info; + u64 features, block; - /* - * locate processor in the list of supported processor - * types. The linker builds this table for us from the - * entries in arch/arm/mm/proc.S - */ cpu_info = lookup_processor_type(read_cpuid_id()); if (!cpu_info) { printk("CPU configuration botched (ID %08x), unable to continue.\n", @@ -136,6 +211,37 @@ static void __init setup_processor(void) sprintf(init_utsname()->machine, ELF_PLATFORM); elf_hwcap = 0; + + /* + * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. + * The blocks we test below represent incremental functionality + * for non-negative values. Negative values are reserved. + */ + features = read_cpuid(ID_AA64ISAR0_EL1); + block = (features >> 4) & 0xf; + if (!(block & 0x8)) { + switch (block) { + default: + case 2: + elf_hwcap |= HWCAP_PMULL; + case 1: + elf_hwcap |= HWCAP_AES; + case 0: + break; + } + } + + block = (features >> 8) & 0xf; + if (block && !(block & 0x8)) + elf_hwcap |= HWCAP_SHA1; + + block = (features >> 12) & 0xf; + if (block && !(block & 0x8)) + elf_hwcap |= HWCAP_SHA2; + + block = (features >> 16) & 0xf; + if (block && !(block & 0x8)) + elf_hwcap |= HWCAP_CRC32; } static void __init setup_machine_fdt(phys_addr_t dt_phys) @@ -236,6 +342,7 @@ void __init setup_arch(char **cmdline_p) cpu_read_bootcpu_ops(); #ifdef CONFIG_SMP smp_init_cpus(); + smp_build_mpidr_hash(); #endif #ifdef CONFIG_VT @@ -275,6 +382,11 @@ static const char *hwcap_str[] = { "fp", "asimd", "evtstrm", + "aes", + "pmull", + "sha1", + "sha2", + "crc32", NULL }; diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S new file mode 100644 index 00000000000000..b1925729c692c7 --- /dev/null +++ b/arch/arm64/kernel/sleep.S @@ -0,0 +1,184 @@ +#include +#include +#include +#include + + .text +/* + * Implementation of MPIDR_EL1 hash algorithm through shifting + * and OR'ing. + * + * @dst: register containing hash result + * @rs0: register containing affinity level 0 bit shift + * @rs1: register containing affinity level 1 bit shift + * @rs2: register containing affinity level 2 bit shift + * @rs3: register containing affinity level 3 bit shift + * @mpidr: register containing MPIDR_EL1 value + * @mask: register containing MPIDR mask + * + * Pseudo C-code: + * + *u32 dst; + * + *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) { + * u32 aff0, aff1, aff2, aff3; + * u64 mpidr_masked = mpidr & mask; + * aff0 = mpidr_masked & 0xff; + * aff1 = mpidr_masked & 0xff00; + * aff2 = mpidr_masked & 0xff0000; + * aff2 = mpidr_masked & 0xff00000000; + * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3); + *} + * Input registers: rs0, rs1, rs2, rs3, mpidr, mask + * Output register: dst + * Note: input and output registers must be disjoint register sets + (eg: a macro instance with mpidr = x1 and dst = x1 is invalid) + */ + .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask + and \mpidr, \mpidr, \mask // mask out MPIDR bits + and \dst, \mpidr, #0xff // mask=aff0 + lsr \dst ,\dst, \rs0 // dst=aff0>>rs0 + and \mask, \mpidr, #0xff00 // mask = aff1 + lsr \mask ,\mask, \rs1 + orr \dst, \dst, \mask // dst|=(aff1>>rs1) + and \mask, \mpidr, #0xff0000 // mask = aff2 + lsr \mask ,\mask, \rs2 + orr \dst, \dst, \mask // dst|=(aff2>>rs2) + and \mask, \mpidr, #0xff00000000 // mask = aff3 + lsr \mask ,\mask, \rs3 + orr \dst, \dst, \mask // dst|=(aff3>>rs3) + .endm +/* + * Save CPU state for a suspend. This saves callee registers, and allocates + * space on the kernel stack to save the CPU specific registers + some + * other data for resume. + * + * x0 = suspend finisher argument + */ +ENTRY(__cpu_suspend) + stp x29, lr, [sp, #-96]! + stp x19, x20, [sp,#16] + stp x21, x22, [sp,#32] + stp x23, x24, [sp,#48] + stp x25, x26, [sp,#64] + stp x27, x28, [sp,#80] + mov x2, sp + sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx + mov x1, sp + /* + * x1 now points to struct cpu_suspend_ctx allocated on the stack + */ + str x2, [x1, #CPU_CTX_SP] + ldr x2, =sleep_save_sp + ldr x2, [x2, #SLEEP_SAVE_SP_VIRT] +#ifdef CONFIG_SMP + mrs x7, mpidr_el1 + ldr x9, =mpidr_hash + ldr x10, [x9, #MPIDR_HASH_MASK] + /* + * Following code relies on the struct mpidr_hash + * members size. + */ + ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] + ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] + compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 + add x2, x2, x8, lsl #3 +#endif + bl __cpu_suspend_finisher + /* + * Never gets here, unless suspend fails. + * Successful cpu_suspend should return from cpu_resume, returning + * through this code path is considered an error + * If the return value is set to 0 force x0 = -EOPNOTSUPP + * to make sure a proper error condition is propagated + */ + cmp x0, #0 + mov x3, #-EOPNOTSUPP + csel x0, x3, x0, eq + add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer + ldp x19, x20, [sp, #16] + ldp x21, x22, [sp, #32] + ldp x23, x24, [sp, #48] + ldp x25, x26, [sp, #64] + ldp x27, x28, [sp, #80] + ldp x29, lr, [sp], #96 + ret +ENDPROC(__cpu_suspend) + .ltorg + +/* + * x0 must contain the sctlr value retrieved from restored context + */ +ENTRY(cpu_resume_mmu) + ldr x3, =cpu_resume_after_mmu + msr sctlr_el1, x0 // restore sctlr_el1 + isb + br x3 // global jump to virtual address +ENDPROC(cpu_resume_mmu) +cpu_resume_after_mmu: + mov x0, #0 // return zero on success + ldp x19, x20, [sp, #16] + ldp x21, x22, [sp, #32] + ldp x23, x24, [sp, #48] + ldp x25, x26, [sp, #64] + ldp x27, x28, [sp, #80] + ldp x29, lr, [sp], #96 + ret +ENDPROC(cpu_resume_after_mmu) + + .data +ENTRY(cpu_resume) + bl el2_setup // if in EL2 drop to EL1 cleanly +#ifdef CONFIG_SMP + mrs x1, mpidr_el1 + adr x4, mpidr_hash_ptr + ldr x5, [x4] + add x8, x4, x5 // x8 = struct mpidr_hash phys address + /* retrieve mpidr_hash members to compute the hash */ + ldr x2, [x8, #MPIDR_HASH_MASK] + ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] + ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] + compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 + /* x7 contains hash index, let's use it to grab context pointer */ +#else + mov x7, xzr +#endif + adr x0, sleep_save_sp + ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] + ldr x0, [x0, x7, lsl #3] + /* load sp from context */ + ldr x2, [x0, #CPU_CTX_SP] + adr x1, sleep_idmap_phys + /* load physical address of identity map page table in x1 */ + ldr x1, [x1] + mov sp, x2 + /* + * cpu_do_resume expects x0 to contain context physical address + * pointer and x1 to contain physical address of 1:1 page tables + */ + bl cpu_do_resume // PC relative jump, MMU off + b cpu_resume_mmu // Resume MMU, never returns +ENDPROC(cpu_resume) + + .align 3 +mpidr_hash_ptr: + /* + * offset of mpidr_hash symbol from current location + * used to obtain run-time mpidr_hash address with MMU off + */ + .quad mpidr_hash - . +/* + * physical address of identity mapped page tables + */ + .type sleep_idmap_phys, #object +ENTRY(sleep_idmap_phys) + .quad 0 +/* + * struct sleep_save_sp { + * phys_addr_t *save_ptr_stash; + * phys_addr_t save_ptr_stash_phys; + * }; + */ + .type sleep_save_sp, #object +ENTRY(sleep_save_sp) + .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a0c2ca602cf85b..1b7617ab499be7 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -61,6 +61,7 @@ enum ipi_msg_type { IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, + IPI_TIMER, }; /* @@ -122,8 +123,6 @@ asmlinkage void secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); - printk("CPU%u: Booted secondary processor\n", cpu); - /* * All kernel threads share the same mm context; grab a * reference and switch to it. @@ -132,6 +131,9 @@ asmlinkage void secondary_start_kernel(void) current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + printk("CPU%u: Booted secondary processor\n", cpu); + /* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. @@ -271,6 +273,7 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); } static void (*smp_cross_call)(const struct cpumask *, unsigned int); @@ -447,6 +450,7 @@ static const char *ipi_types[NR_IPI] = { S(IPI_CALL_FUNC, "Function call interrupts"), S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), + S(IPI_TIMER, "Timer broadcast interrupts"), }; void show_ipi_list(struct seq_file *p, int prec) @@ -532,6 +536,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs) irq_exit(); break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + case IPI_TIMER: + irq_enter(); + tick_receive_broadcast(); + irq_exit(); + break; +#endif + default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; @@ -544,6 +556,13 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_TIMER); +} +#endif + void smp_send_stop(void) { unsigned long timeout; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index d25459ff57fc18..c3b6c63ea5fb3a 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -43,7 +43,7 @@ int unwind_frame(struct stackframe *frame) low = frame->sp; high = ALIGN(low, THREAD_SIZE); - if (fp < low || fp > high || fp & 0xf) + if (fp < low || fp > high - 0x18 || fp & 0xf) return -EINVAL; frame->sp = fp + 0x10; diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c new file mode 100644 index 00000000000000..430344e2c98957 --- /dev/null +++ b/arch/arm64/kernel/suspend.c @@ -0,0 +1,132 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int __cpu_suspend(unsigned long); +/* + * This is called by __cpu_suspend() to save the state, and do whatever + * flushing is required to ensure that when the CPU goes to sleep we have + * the necessary data available when the caches are not searched. + * + * @arg: Argument to pass to suspend operations + * @ptr: CPU context virtual address + * @save_ptr: address of the location where the context physical address + * must be saved + */ +int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, + phys_addr_t *save_ptr) +{ + int cpu = smp_processor_id(); + + *save_ptr = virt_to_phys(ptr); + + cpu_do_suspend(ptr); + /* + * Only flush the context that must be retrieved with the MMU + * off. VA primitives ensure the flush is applied to all + * cache levels so context is pushed to DRAM. + */ + __flush_dcache_area(ptr, sizeof(*ptr)); + __flush_dcache_area(save_ptr, sizeof(*save_ptr)); + + return cpu_ops[cpu]->cpu_suspend(arg); +} + +/* + * This hook is provided so that cpu_suspend code can restore HW + * breakpoints as early as possible in the resume path, before reenabling + * debug exceptions. Code cannot be run from a CPU PM notifier since by the + * time the notifier runs debug exceptions might have been enabled already, + * with HW breakpoints registers content still in an unknown state. + */ +void (*hw_breakpoint_restore)(void *); +void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) +{ + /* Prevent multiple restore hook initializations */ + if (WARN_ON(hw_breakpoint_restore)) + return; + hw_breakpoint_restore = hw_bp_restore; +} + +/** + * cpu_suspend + * + * @arg: argument to pass to the finisher function + */ +int cpu_suspend(unsigned long arg) +{ + struct mm_struct *mm = current->active_mm; + int ret, cpu = smp_processor_id(); + unsigned long flags; + + /* + * If cpu_ops have not been registered or suspend + * has not been initialized, cpu_suspend call fails early. + */ + if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) + return -EOPNOTSUPP; + + /* + * From this point debug exceptions are disabled to prevent + * updates to mdscr register (saved and restored along with + * general purpose registers) from kernel debuggers. + */ + local_dbg_save(flags); + + /* + * mm context saved on the stack, it will be restored when + * the cpu comes out of reset through the identity mapped + * page tables, so that the thread address space is properly + * set-up on function return. + */ + ret = __cpu_suspend(arg); + if (ret == 0) { + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); + /* + * Restore HW breakpoint registers to sane values + * before debug exceptions are possibly reenabled + * through local_dbg_restore. + */ + if (hw_breakpoint_restore) + hw_breakpoint_restore(NULL); + } + + /* + * Restore pstate flags. OS lock and mdscr have been already + * restored, so from this point onwards, debugging is fully + * renabled if it was enabled when core started shutdown. + */ + local_dbg_restore(flags); + + return ret; +} + +extern struct sleep_save_sp sleep_save_sp; +extern phys_addr_t sleep_idmap_phys; + +static int cpu_suspend_init(void) +{ + void *ctx_ptr; + + /* ctx_ptr is an array of physical addresses */ + ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL); + + if (WARN_ON(!ctx_ptr)) + return -ENOMEM; + + sleep_save_sp.save_ptr_stash = ctx_ptr; + sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); + sleep_idmap_phys = virt_to_phys(idmap_pg_dir); + __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); + __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys)); + + return 0; +} +early_initcall(cpu_suspend_init); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 5161ad99209100..4ba7a55b49c7ef 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -99,17 +99,14 @@ SECTIONS . = ALIGN(PAGE_SIZE); _data = .; - __data_loc = _data - LOAD_OFFSET; _sdata = .; RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) _edata = .; - _edata_loc = __data_loc + SIZEOF(.data); BSS_SECTION(0, 0, 0) _end = .; STABS_DEBUG - .comment 0 : { *(.comment) } } /* diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 59acc0ef04624d..328ce1a99daaa4 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,6 +1,4 @@ -lib-y := bitops.o delay.o \ - strncpy_from_user.o strnlen_user.o clear_user.o \ - copy_from_user.o copy_to_user.o copy_in_user.o \ - copy_page.o clear_page.o \ - memchr.o memcpy.o memmove.o memset.o \ +lib-y := bitops.o clear_user.o delay.o copy_from_user.o \ + copy_to_user.o copy_in_user.o copy_page.o \ + clear_page.o memchr.o memcpy.o memmove.o memset.o \ strchr.o strrchr.o diff --git a/arch/arm64/lib/strncpy_from_user.S b/arch/arm64/lib/strncpy_from_user.S deleted file mode 100644 index 56e448a831a04d..00000000000000 --- a/arch/arm64/lib/strncpy_from_user.S +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Based on arch/arm/lib/strncpy_from_user.S - * - * Copyright (C) 1995-2000 Russell King - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - - .text - .align 5 - -/* - * Copy a string from user space to kernel space. - * x0 = dst, x1 = src, x2 = byte length - * returns the number of characters copied (strlen of copied string), - * -EFAULT on exception, or "len" if we fill the whole buffer - */ -ENTRY(__strncpy_from_user) - mov x4, x1 -1: subs x2, x2, #1 - bmi 2f -USER(9f, ldrb w3, [x1], #1 ) - strb w3, [x0], #1 - cbnz w3, 1b - sub x1, x1, #1 // take NUL character out of count -2: sub x0, x1, x4 - ret -ENDPROC(__strncpy_from_user) - - .section .fixup,"ax" - .align 0 -9: strb wzr, [x0] // null terminate - mov x0, #-EFAULT - ret - .previous diff --git a/arch/arm64/lib/strnlen_user.S b/arch/arm64/lib/strnlen_user.S deleted file mode 100644 index 7f7b176a5646b7..00000000000000 --- a/arch/arm64/lib/strnlen_user.S +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Based on arch/arm/lib/strnlen_user.S - * - * Copyright (C) 1995-2000 Russell King - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - - .text - .align 5 - -/* Prototype: unsigned long __strnlen_user(const char *str, long n) - * Purpose : get length of a string in user memory - * Params : str - address of string in user memory - * Returns : length of string *including terminator* - * or zero on exception, or n if too long - */ -ENTRY(__strnlen_user) - mov x2, x0 -1: subs x1, x1, #1 - b.mi 2f -USER(9f, ldrb w3, [x0], #1 ) - cbnz w3, 1b -2: sub x0, x0, x2 - ret -ENDPROC(__strnlen_user) - - .section .fixup,"ax" - .align 0 -9: mov x0, #0 - ret - .previous diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 4bd7579ec9e602..45b5ab54c9eeb0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -33,17 +34,47 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { + if (dev == NULL) { + WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); + return NULL; + } + if (IS_ENABLED(CONFIG_ZONE_DMA32) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA32; - return swiotlb_alloc_coherent(dev, size, dma_handle, flags); + if (IS_ENABLED(CONFIG_DMA_CMA)) { + struct page *page; + + page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, + get_order(size)); + if (!page) + return NULL; + + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + return page_address(page); + } else { + return swiotlb_alloc_coherent(dev, size, dma_handle, flags); + } } static void arm64_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - swiotlb_free_coherent(dev, size, vaddr, dma_handle); + if (dev == NULL) { + WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); + return; + } + + if (IS_ENABLED(CONFIG_DMA_CMA)) { + phys_addr_t paddr = dma_to_phys(dev, dma_handle); + + dma_release_from_contiguous(dev, + phys_to_page(paddr), + size >> PAGE_SHIFT); + } else { + swiotlb_free_coherent(dev, size, vaddr, dma_handle); + } } static struct dma_map_ops arm64_swiotlb_dma_ops = { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 0cb8742de4f238..d0b4c2efda90aa 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -159,6 +160,8 @@ void __init arm64_memblock_init(void) memblock_reserve(base, size); } + dma_contiguous_reserve(0); + memblock_allow_resize(); memblock_dump_all(); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 0f7fec52c7f840..bed1f1de1caf07 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -80,6 +80,75 @@ ENTRY(cpu_do_idle) ret ENDPROC(cpu_do_idle) +#ifdef CONFIG_ARM64_CPU_SUSPEND +/** + * cpu_do_suspend - save CPU registers context + * + * x0: virtual address of context pointer + */ +ENTRY(cpu_do_suspend) + mrs x2, tpidr_el0 + mrs x3, tpidrro_el0 + mrs x4, contextidr_el1 + mrs x5, mair_el1 + mrs x6, cpacr_el1 + mrs x7, ttbr1_el1 + mrs x8, tcr_el1 + mrs x9, vbar_el1 + mrs x10, mdscr_el1 + mrs x11, oslsr_el1 + mrs x12, sctlr_el1 + stp x2, x3, [x0] + stp x4, x5, [x0, #16] + stp x6, x7, [x0, #32] + stp x8, x9, [x0, #48] + stp x10, x11, [x0, #64] + str x12, [x0, #80] + ret +ENDPROC(cpu_do_suspend) + +/** + * cpu_do_resume - restore CPU register context + * + * x0: Physical address of context pointer + * x1: ttbr0_el1 to be restored + * + * Returns: + * sctlr_el1 value in x0 + */ +ENTRY(cpu_do_resume) + /* + * Invalidate local tlb entries before turning on MMU + */ + tlbi vmalle1 + ldp x2, x3, [x0] + ldp x4, x5, [x0, #16] + ldp x6, x7, [x0, #32] + ldp x8, x9, [x0, #48] + ldp x10, x11, [x0, #64] + ldr x12, [x0, #80] + msr tpidr_el0, x2 + msr tpidrro_el0, x3 + msr contextidr_el1, x4 + msr mair_el1, x5 + msr cpacr_el1, x6 + msr ttbr0_el1, x1 + msr ttbr1_el1, x7 + msr tcr_el1, x8 + msr vbar_el1, x9 + msr mdscr_el1, x10 + /* + * Restore oslsr_el1 by writing oslar_el1 + */ + ubfx x11, x11, #1, #1 + msr oslar_el1, x11 + mov x0, x12 + dsb nsh // Make sure local tlb invalidation completed + isb + ret +ENDPROC(cpu_do_resume) +#endif + /* * cpu_switch_mm(pgd_phys, tsk) * diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h index 0961275373dbb6..715100790fd05c 100644 --- a/arch/avr32/include/asm/barrier.h +++ b/arch/avr32/include/asm/barrier.h @@ -8,22 +8,15 @@ #ifndef __ASM_AVR32_BARRIER_H #define __ASM_AVR32_BARRIER_H -#define nop() asm volatile("nop") - -#define mb() asm volatile("" : : : "memory") -#define rmb() mb() -#define wmb() asm volatile("sync 0" : : : "memory") -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while(0) +/* + * Weirdest thing ever.. no full barrier, but it has a write barrier! + */ +#define wmb() asm volatile("sync 0" : : : "memory") #ifdef CONFIG_SMP # error "The AVR32 port does not support SMP" -#else -# define smp_mb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -# define smp_read_barrier_depends() do { } while(0) #endif +#include #endif /* __ASM_AVR32_BARRIER_H */ diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index ebb189507dd73f..19283a16ac0806 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h @@ -23,26 +23,10 @@ # define rmb() do { barrier(); smp_check_barrier(); } while (0) # define wmb() do { barrier(); smp_mark_barrier(); } while (0) # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) -#else -# define mb() barrier() -# define rmb() barrier() -# define wmb() barrier() -# define read_barrier_depends() do { } while (0) #endif -#else /* !CONFIG_SMP */ - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define read_barrier_depends() do { } while (0) - #endif /* !CONFIG_SMP */ -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define smp_read_barrier_depends() read_barrier_depends() +#include #endif /* _BLACKFIN_BARRIER_H */ diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b06caf649a954a..199b1a9dab8922 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -3,6 +3,7 @@ header-y += arch-v10/ header-y += arch-v32/ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += kvm_para.h diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h deleted file mode 100644 index 198ad7fa6b25ae..00000000000000 --- a/arch/cris/include/asm/barrier.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_CRIS_BARRIER_H -#define __ASM_CRIS_BARRIER_H - -#define nop() __asm__ __volatile__ ("nop"); - -#define barrier() __asm__ __volatile__("": : :"memory") -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#endif /* __ASM_CRIS_BARRIER_H */ diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h index 06776ad9f5e9ca..abbef470154cd3 100644 --- a/arch/frv/include/asm/barrier.h +++ b/arch/frv/include/asm/barrier.h @@ -17,13 +17,7 @@ #define mb() asm volatile ("membar" : : :"memory") #define rmb() asm volatile ("membar" : : :"memory") #define wmb() asm volatile ("membar" : : :"memory") -#define read_barrier_depends() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do {} while(0) -#define set_mb(var, value) \ - do { var = (value); barrier(); } while (0) +#include #endif /* _ASM_BARRIER_H */ diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 67c3450309b745..ada843c701ef56 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -2,6 +2,7 @@ header-y += ucontext.h generic-y += auxvec.h +generic-y += barrier.h generic-y += bug.h generic-y += bugs.h generic-y += clkdev.h diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 8a64ff2337f651..7aae4cb2a29a21 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0) #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) - #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + #endif diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h index 1041a8e70ce85c..4e863daea25b70 100644 --- a/arch/hexagon/include/asm/barrier.h +++ b/arch/hexagon/include/asm/barrier.h @@ -29,10 +29,6 @@ #define smp_read_barrier_depends() barrier() #define smp_wmb() barrier() #define smp_mb() barrier() -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() /* Set a value and use a memory barrier. Used by the scheduler somewhere. */ #define set_mb(var, value) \ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 4e4119b0e6915f..a8c3a11dc5aba2 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -147,9 +147,6 @@ config PARAVIRT over full virtualization. However, when run without a hypervisor the kernel is theoretically slower and slightly larger. - -source "arch/ia64/xen/Kconfig" - endif choice @@ -175,7 +172,6 @@ config IA64_GENERIC SGI-SN2 For SGI Altix systems SGI-UV For SGI UV systems Ski-simulator For the HP simulator - Xen-domU For xen domU system If you don't know what to do, choose "generic". @@ -231,14 +227,6 @@ config IA64_HP_SIM bool "Ski-simulator" select SWIOTLB -config IA64_XEN_GUEST - bool "Xen guest" - select SWIOTLB - depends on XEN - help - Build a kernel that runs on Xen guest domain. At this moment only - 16KB page size in supported. - endchoice choice diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index be7bfa12b70532..f37238f45bcd08 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -51,11 +51,9 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ -core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ core-$(CONFIG_KVM) += arch/ia64/kvm/ -core-$(CONFIG_XEN) += arch/ia64/xen/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig deleted file mode 100644 index b025acfde5c1dc..00000000000000 --- a/arch/ia64/configs/xen_domu_defconfig +++ /dev/null @@ -1,199 +0,0 @@ -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=20 -CONFIG_SYSFS_DEPRECATED_V2=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARAVIRT_GUEST=y -CONFIG_IA64_XEN_GUEST=y -CONFIG_MCKINLEY=y -CONFIG_IA64_CYCLONE=y -CONFIG_SMP=y -CONFIG_NR_CPUS=16 -CONFIG_HOTPLUG_CPU=y -CONFIG_PERMIT_BSP_REMOVE=y -CONFIG_FORCE_CPEI_RETARGET=y -CONFIG_IA64_MCA_RECOVERY=y -CONFIG_PERFMON=y -CONFIG_IA64_PALINFO=y -CONFIG_KEXEC=y -CONFIG_EFI_VARS=y -CONFIG_BINFMT_MISC=m -CONFIG_ACPI_PROCFS=y -CONFIG_ACPI_BUTTON=m -CONFIG_ACPI_FAN=m -CONFIG_ACPI_PROCESSOR=m -CONFIG_ACPI_CONTAINER=m -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=m -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_ARPD=y -CONFIG_SYN_COOKIES=y -# CONFIG_INET_LRO is not set -# CONFIG_IPV6 is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_CMD64X=y -CONFIG_BLK_DEV_PIIX=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_QLOGIC_1280=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_MULTIPATH=m -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_MIRROR=m -CONFIG_DM_ZERO=m -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y -CONFIG_FUSION_FC=y -CONFIG_FUSION_CTL=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_TULIP=y -CONFIG_TULIP=m -CONFIG_NET_PCI=y -CONFIG_NET_VENDOR_INTEL=y -CONFIG_E100=m -CONFIG_E1000=y -CONFIG_TIGON3=y -CONFIG_NETCONSOLE=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_GAMEPORT=m -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=6 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_HW_RANDOM is not set -CONFIG_EFI_RTC=y -CONFIG_RAW_DRIVER=m -CONFIG_HPET=y -CONFIG_AGP=m -CONFIG_DRM=m -CONFIG_DRM_TDFX=m -CONFIG_DRM_R128=m -CONFIG_DRM_RADEON=m -CONFIG_DRM_MGA=m -CONFIG_DRM_SIS=m -CONFIG_HID_GYRATION=y -CONFIG_HID_NTRIG=y -CONFIG_HID_PANTHERLORD=y -CONFIG_HID_PETALYNX=y -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y -CONFIG_HID_SUNPLUS=y -CONFIG_HID_TOPSEED=y -CONFIG_USB=y -CONFIG_USB_DEVICEFS=y -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_UHCI_HCD=y -CONFIG_USB_STORAGE=m -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_REISERFS_FS=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_XFS_FS=y -CONFIG_AUTOFS_FS=y -CONFIG_AUTOFS4_FS=y -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_UDF_FS=m -CONFIG_VFAT_FS=y -CONFIG_NTFS_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_HUGETLBFS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V4=y -CONFIG_NFSD=m -CONFIG_NFSD_V4=y -CONFIG_SMB_FS=m -CONFIG_SMB_NLS_DEFAULT=y -CONFIG_CIFS=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_SGI_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=m -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MUTEXES=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_IA64_GRANULE_16MB=y -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index faa1bf0da8153f..d651102a4d4579 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void) return "uv"; # elif defined (CONFIG_IA64_DIG) return "dig"; -# elif defined (CONFIG_IA64_XEN_GUEST) - return "xen"; # elif defined(CONFIG_IA64_DIG_VTD) return "dig_vtd"; # else diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 60576e06b6fb4b..d0a69aa35e27de 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -45,13 +45,36 @@ # define smp_rmb() rmb() # define smp_wmb() wmb() # define smp_read_barrier_depends() read_barrier_depends() + #else + # define smp_mb() barrier() # define smp_rmb() barrier() # define smp_wmb() barrier() # define smp_read_barrier_depends() do { } while(0) + #endif +/* + * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no + * need for asm trickery! + */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + /* * XXX check on this ---I suspect what Linus really wants here is * acquire vs release semantics but we can't discuss this stuff with diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 2d1ad4b11a85a9..9c39bdfc2da894 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # include # elif defined (CONFIG_IA64_SGI_UV) # include -# elif defined (CONFIG_IA64_XEN_GUEST) -# include # elif defined (CONFIG_IA64_GENERIC) # ifdef MACHVEC_PLATFORM_HEADER diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h deleted file mode 100644 index 8b8bd0eb39236e..00000000000000 --- a/arch/ia64/include/asm/machvec_xen.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _ASM_IA64_MACHVEC_XEN_h -#define _ASM_IA64_MACHVEC_XEN_h - -extern ia64_mv_setup_t dig_setup; -extern ia64_mv_cpu_init_t xen_cpu_init; -extern ia64_mv_irq_init_t xen_irq_init; -extern ia64_mv_send_ipi_t xen_platform_send_ipi; - -/* - * This stuff has dual use! - * - * For a generic kernel, the macros are used to initialize the - * platform's machvec structure. When compiling a non-generic kernel, - * the macros are used directly. - */ -#define ia64_platform_name "xen" -#define platform_setup dig_setup -#define platform_cpu_init xen_cpu_init -#define platform_irq_init xen_irq_init -#define platform_send_ipi xen_platform_send_ipi - -#endif /* _ASM_IA64_MACHVEC_XEN_h */ diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 61c7b1750b169f..092f1c91b36c02 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -18,7 +18,6 @@ * - crash dumping code reserved region * - Kernel memory map built from EFI memory map * - ELF core header - * - xen start info if CONFIG_XEN * * More could be added if necessary */ diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index b149b88ea7953e..b53518a98026fa 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h @@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void); #ifdef CONFIG_PARAVIRT_GUEST #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 -#define PARAVIRT_HYPERVISOR_TYPE_XEN 1 #ifndef __ASSEMBLY__ diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h index 44ef9ef8f5b3ce..42b233bedeb5ee 100644 --- a/arch/ia64/include/asm/pvclock-abi.h +++ b/arch/ia64/include/asm/pvclock-abi.h @@ -11,7 +11,7 @@ /* * These structs MUST NOT be changed. * They are the ABI between hypervisor and guest OS. - * Both Xen and KVM are using this. + * KVM is using this. * * pvclock_vcpu_time_info holds the system time and the tsc timestamp * of the last update. So the guest can use the tsc delta to get a diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h deleted file mode 100644 index 593c12eeb270fe..00000000000000 --- a/arch/ia64/include/asm/sync_bitops.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef _ASM_IA64_SYNC_BITOPS_H -#define _ASM_IA64_SYNC_BITOPS_H - -/* - * Copyright (C) 2008 Isaku Yamahata - * - * Based on synch_bitops.h which Dan Magenhaimer wrote. - * - * bit operations which provide guaranteed strong synchronisation - * when communicating with Xen or other guest OSes running on other CPUs. - */ - -static inline void sync_set_bit(int nr, volatile void *addr) -{ - set_bit(nr, addr); -} - -static inline void sync_clear_bit(int nr, volatile void *addr) -{ - clear_bit(nr, addr); -} - -static inline void sync_change_bit(int nr, volatile void *addr) -{ - change_bit(nr, addr); -} - -static inline int sync_test_and_set_bit(int nr, volatile void *addr) -{ - return test_and_set_bit(nr, addr); -} - -static inline int sync_test_and_clear_bit(int nr, volatile void *addr) -{ - return test_and_clear_bit(nr, addr); -} - -static inline int sync_test_and_change_bit(int nr, volatile void *addr) -{ - return test_and_change_bit(nr, addr); -} - -static inline int sync_test_bit(int nr, const volatile void *addr) -{ - return test_bit(nr, addr); -} - -#define sync_cmpxchg(ptr, old, new) \ - ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new))) - -#endif /* _ASM_IA64_SYNC_BITOPS_H */ diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h deleted file mode 100644 index baa74c82aa71fc..00000000000000 --- a/arch/ia64/include/asm/xen/events.h +++ /dev/null @@ -1,41 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/events.h - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ -#ifndef _ASM_IA64_XEN_EVENTS_H -#define _ASM_IA64_XEN_EVENTS_H - -enum ipi_vector { - XEN_RESCHEDULE_VECTOR, - XEN_IPI_VECTOR, - XEN_CMCP_VECTOR, - XEN_CPEP_VECTOR, - - XEN_NR_IPIS, -}; - -static inline int xen_irqs_disabled(struct pt_regs *regs) -{ - return !(ia64_psr(regs)->i); -} - -#define irq_ctx_init(cpu) do { } while (0) - -#endif /* _ASM_IA64_XEN_EVENTS_H */ diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h deleted file mode 100644 index ed28bcd5bb85c1..00000000000000 --- a/arch/ia64/include/asm/xen/hypercall.h +++ /dev/null @@ -1,265 +0,0 @@ -/****************************************************************************** - * hypercall.h - * - * Linux-specific hypervisor handling. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _ASM_IA64_XEN_HYPERCALL_H -#define _ASM_IA64_XEN_HYPERCALL_H - -#include -#include -#include -#include -struct xencomm_handle; -extern unsigned long __hypercall(unsigned long a1, unsigned long a2, - unsigned long a3, unsigned long a4, - unsigned long a5, unsigned long cmd); - -/* - * Assembler stubs for hyper-calls. - */ - -#define _hypercall0(type, name) \ -({ \ - long __res; \ - __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\ - (type)__res; \ -}) - -#define _hypercall1(type, name, a1) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - 0, 0, 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall2(type, name, a1, a2) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - 0, 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall3(type, name, a1, a2, a3) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall4(type, name, a1, a2, a3, a4) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - (unsigned long)a4, \ - 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - (unsigned long)a4, \ - (unsigned long)a5, \ - __HYPERVISOR_##name); \ - (type)__res; \ -}) - - -static inline int -xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, sched_op, cmd, arg); -} - -static inline long -HYPERVISOR_set_timer_op(u64 timeout) -{ - unsigned long timeout_hi = (unsigned long)(timeout >> 32); - unsigned long timeout_lo = (unsigned long)timeout; - return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); -} - -static inline int -xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list, - int nr_calls) -{ - return _hypercall2(int, multicall, call_list, nr_calls); -} - -static inline int -xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, memory_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, event_channel_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, xen_version, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_console_io(int cmd, int count, - struct xencomm_handle *str) -{ - return _hypercall3(int, console_io, cmd, count, str); -} - -static inline int -xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, physdev_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_grant_table_op(unsigned int cmd, - struct xencomm_handle *uop, - unsigned int count) -{ - return _hypercall3(int, grant_table_op, cmd, uop, count); -} - -int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); - -extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg); - -static inline int -xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, callback_op, cmd, arg); -} - -static inline long -xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg) -{ - return _hypercall3(long, vcpu_op, cmd, cpu, arg); -} - -static inline int -HYPERVISOR_physdev_op(int cmd, void *arg) -{ - switch (cmd) { - case PHYSDEVOP_eoi: - return _hypercall1(int, ia64_fast_eoi, - ((struct physdev_eoi *)arg)->irq); - default: - return xencomm_hypercall_physdev_op(cmd, arg); - } -} - -static inline long -xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg) -{ - return _hypercall1(long, opt_feature, arg); -} - -/* for balloon driver */ -#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0) - -/* Use xencomm to do hypercalls. */ -#define HYPERVISOR_sched_op xencomm_hypercall_sched_op -#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op -#define HYPERVISOR_callback_op xencomm_hypercall_callback_op -#define HYPERVISOR_multicall xencomm_hypercall_multicall -#define HYPERVISOR_xen_version xencomm_hypercall_xen_version -#define HYPERVISOR_console_io xencomm_hypercall_console_io -#define HYPERVISOR_memory_op xencomm_hypercall_memory_op -#define HYPERVISOR_suspend xencomm_hypercall_suspend -#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op -#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature - -/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */ -#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; }) - -static inline int -HYPERVISOR_shutdown( - unsigned int reason) -{ - struct sched_shutdown sched_shutdown = { - .reason = reason - }; - - int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); - - return rc; -} - -/* for netfront.c, netback.c */ -#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */ - -static inline void -MULTI_update_va_mapping( - struct multicall_entry *mcl, unsigned long va, - pte_t new_val, unsigned long flags) -{ - mcl->op = __HYPERVISOR_update_va_mapping; - mcl->result = 0; -} - -static inline void -MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, - void *uop, unsigned int count) -{ - mcl->op = __HYPERVISOR_grant_table_op; - mcl->args[0] = cmd; - mcl->args[1] = (unsigned long)uop; - mcl->args[2] = count; -} - -static inline void -MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, - int count, int *success_count, domid_t domid) -{ - mcl->op = __HYPERVISOR_mmu_update; - mcl->args[0] = (unsigned long)req; - mcl->args[1] = count; - mcl->args[2] = (unsigned long)success_count; - mcl->args[3] = domid; -} - -#endif /* _ASM_IA64_XEN_HYPERCALL_H */ diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h deleted file mode 100644 index 67455c2ed2b12d..00000000000000 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ /dev/null @@ -1,61 +0,0 @@ -/****************************************************************************** - * hypervisor.h - * - * Linux-specific hypervisor handling. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _ASM_IA64_XEN_HYPERVISOR_H -#define _ASM_IA64_XEN_HYPERVISOR_H - -#include -#include -#include /* to compile feature.c */ -#include /* to comiple xen-netfront.c */ -#include -#include - -#ifdef CONFIG_XEN -extern struct shared_info *HYPERVISOR_shared_info; -extern struct start_info *xen_start_info; - -void __init xen_setup_vcpu_info_placement(void); -void force_evtchn_callback(void); - -/* for drivers/xen/balloon/balloon.c */ -#ifdef CONFIG_XEN_SCRUB_PAGES -#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) -#else -#define scrub_pages(_p, _n) ((void)0) -#endif - -/* For setup_arch() in arch/ia64/kernel/setup.c */ -void xen_ia64_enable_opt_feature(void); -#endif - -#endif /* _ASM_IA64_XEN_HYPERVISOR_H */ diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h deleted file mode 100644 index c53a4761120823..00000000000000 --- a/arch/ia64/include/asm/xen/inst.h +++ /dev/null @@ -1,486 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/inst.h - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include - -#define ia64_ivt xen_ivt -#define DO_SAVE_MIN XEN_DO_SAVE_MIN - -#define __paravirt_switch_to xen_switch_to -#define __paravirt_leave_syscall xen_leave_syscall -#define __paravirt_work_processed_syscall xen_work_processed_syscall -#define __paravirt_leave_kernel xen_leave_kernel -#define __paravirt_pending_syscall_end xen_work_pending_syscall_end -#define __paravirt_work_processed_syscall_target \ - xen_work_processed_syscall - -#define paravirt_fsyscall_table xen_fsyscall_table -#define paravirt_fsys_bubble_down xen_fsys_bubble_down - -#define MOV_FROM_IFA(reg) \ - movl reg = XSI_IFA; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_ITIR(reg) \ - movl reg = XSI_ITIR; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_ISR(reg) \ - movl reg = XSI_ISR; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IHA(reg) \ - movl reg = XSI_IHA; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IPSR(pred, reg) \ -(pred) movl reg = XSI_IPSR; \ - ;; \ -(pred) ld8 reg = [reg] - -#define MOV_FROM_IIM(reg) \ - movl reg = XSI_IIM; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IIP(reg) \ - movl reg = XSI_IIP; \ - ;; \ - ld8 reg = [reg] - -.macro __MOV_FROM_IVR reg, clob - .ifc "\reg", "r8" - XEN_HYPER_GET_IVR - .exitm - .endif - .ifc "\clob", "r8" - XEN_HYPER_GET_IVR - ;; - mov \reg = r8 - .exitm - .endif - - mov \clob = r8 - ;; - XEN_HYPER_GET_IVR - ;; - mov \reg = r8 - ;; - mov r8 = \clob -.endm -#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob - -.macro __MOV_FROM_PSR pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_GET_PSR; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) XEN_HYPER_GET_PSR - ;; - (\pred) mov \reg = r8 - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) XEN_HYPER_GET_PSR - ;; - (\pred) mov \reg = r8 - (\pred) mov r8 = \clob -.endm -#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob - -/* assuming ar.itc is read with interrupt disabled. */ -#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ -(pred) movl clob = XSI_ITC_OFFSET; \ - ;; \ -(pred) ld8 clob = [clob]; \ -(pred) mov reg = ar.itc; \ - ;; \ -(pred) add reg = reg, clob; \ - ;; \ -(pred) movl clob = XSI_ITC_LAST; \ - ;; \ -(pred) ld8 clob = [clob]; \ - ;; \ -(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ - ;; \ -(pred_clob) add reg = 1, clob; \ - ;; \ -(pred) movl clob = XSI_ITC_LAST; \ - ;; \ -(pred) st8 [clob] = reg - - -#define MOV_TO_IFA(reg, clob) \ - movl clob = XSI_IFA; \ - ;; \ - st8 [clob] = reg \ - -#define MOV_TO_ITIR(pred, reg, clob) \ -(pred) movl clob = XSI_ITIR; \ - ;; \ -(pred) st8 [clob] = reg - -#define MOV_TO_IHA(pred, reg, clob) \ -(pred) movl clob = XSI_IHA; \ - ;; \ -(pred) st8 [clob] = reg - -#define MOV_TO_IPSR(pred, reg, clob) \ -(pred) movl clob = XSI_IPSR; \ - ;; \ -(pred) st8 [clob] = reg; \ - ;; - -#define MOV_TO_IFS(pred, reg, clob) \ -(pred) movl clob = XSI_IFS; \ - ;; \ -(pred) st8 [clob] = reg; \ - ;; - -#define MOV_TO_IIP(reg, clob) \ - movl clob = XSI_IIP; \ - ;; \ - st8 [clob] = reg - -.macro ____MOV_TO_KR kr, reg, clob0, clob1 - .ifc "\clob0", "r9" - .error "clob0 \clob0 must not be r9" - .endif - .ifc "\clob1", "r8" - .error "clob1 \clob1 must not be r8" - .endif - - .ifnc "\reg", "r9" - .ifnc "\clob1", "r9" - mov \clob1 = r9 - .endif - mov r9 = \reg - .endif - .ifnc "\clob0", "r8" - mov \clob0 = r8 - .endif - mov r8 = \kr - ;; - XEN_HYPER_SET_KR - - .ifnc "\reg", "r9" - .ifnc "\clob1", "r9" - mov r9 = \clob1 - .endif - .endif - .ifnc "\clob0", "r8" - mov r8 = \clob0 - .endif -.endm - -.macro __MOV_TO_KR kr, reg, clob0, clob1 - .ifc "\clob0", "r9" - ____MOV_TO_KR \kr, \reg, \clob1, \clob0 - .exitm - .endif - .ifc "\clob1", "r8" - ____MOV_TO_KR \kr, \reg, \clob1, \clob0 - .exitm - .endif - - ____MOV_TO_KR \kr, \reg, \clob0, \clob1 -.endm - -#define MOV_TO_KR(kr, reg, clob0, clob1) \ - __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1 - - -.macro __ITC_I pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_ITC_I - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_I - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_I - ;; - (\pred) mov r8 = \clob - ;; -.endm -#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob - -.macro __ITC_D pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_ITC_D - ;; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_D - ;; - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_D - ;; - (\pred) mov r8 = \clob - ;; -.endm -#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob - -.macro __ITC_I_AND_D pred_i, pred_d, reg, clob - .ifc "\reg", "r8" - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - .exitm - .endif - .ifc "\clob", "r8" - mov r8 = \reg - ;; - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - .exitm - .endif - - mov \clob = r8 - mov r8 = \reg - ;; - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - mov r8 = \clob - ;; -.endm -#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ - __ITC_I_AND_D pred_i, pred_d, reg, clob - -.macro __THASH pred, reg0, reg1, clob - .ifc "\reg0", "r8" - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - .exitm - .endc - .ifc "\reg1", "r8" - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - ;; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - ;; - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - (\pred) mov r8 = \clob - ;; -.endm -#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob - -#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ - mov clob0 = 1; \ - movl clob1 = XSI_PSR_IC; \ - ;; \ - st4 [clob1] = clob0 \ - ;; - -#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ - ;; \ - srlz.d; \ - mov clob1 = 1; \ - movl clob0 = XSI_PSR_IC; \ - ;; \ - st4 [clob0] = clob1 - -#define RSM_PSR_IC(clob) \ - movl clob = XSI_PSR_IC; \ - ;; \ - st4 [clob] = r0; \ - ;; - -/* pred will be clobbered */ -#define MASK_TO_PEND_OFS (-1) -#define SSM_PSR_I(pred, pred_clob, clob) \ -(pred) movl clob = XSI_PSR_I_ADDR \ - ;; \ -(pred) ld8 clob = [clob] \ - ;; \ - /* if (pred) vpsr.i = 1 */ \ - /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \ -(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \ - ;; \ - /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \ -(pred) ld1 clob = [clob] \ - ;; \ -(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \ - ;; \ -(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */ - -#define RSM_PSR_I(pred, clob0, clob1) \ - movl clob0 = XSI_PSR_I_ADDR; \ - mov clob1 = 1; \ - ;; \ - ld8 clob0 = [clob0]; \ - ;; \ -(pred) st1 [clob0] = clob1 - -#define RSM_PSR_I_IC(clob0, clob1, clob2) \ - movl clob0 = XSI_PSR_I_ADDR; \ - movl clob1 = XSI_PSR_IC; \ - ;; \ - ld8 clob0 = [clob0]; \ - mov clob2 = 1; \ - ;; \ - /* note: clears both vpsr.i and vpsr.ic! */ \ - st1 [clob0] = clob2; \ - st4 [clob1] = r0; \ - ;; - -#define RSM_PSR_DT \ - XEN_HYPER_RSM_PSR_DT - -#define RSM_PSR_BE_I(clob0, clob1) \ - RSM_PSR_I(p0, clob0, clob1); \ - rum psr.be - -#define SSM_PSR_DT_AND_SRLZ_I \ - XEN_HYPER_SSM_PSR_DT - -#define BSW_0(clob0, clob1, clob2) \ - ;; \ - /* r16-r31 all now hold bank1 values */ \ - mov clob2 = ar.unat; \ - movl clob0 = XSI_BANK1_R16; \ - movl clob1 = XSI_BANK1_R16 + 8; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \ - ;; \ - mov clob1 = ar.unat; \ - movl clob0 = XSI_B1NAT; \ - ;; \ - st8 [clob0] = clob1; \ - mov ar.unat = clob2; \ - movl clob0 = XSI_BANKNUM; \ - ;; \ - st4 [clob0] = r0 - - - /* FIXME: THIS CODE IS NOT NaT SAFE! */ -#define XEN_BSW_1(clob) \ - mov clob = ar.unat; \ - movl r30 = XSI_B1NAT; \ - ;; \ - ld8 r30 = [r30]; \ - mov r31 = 1; \ - ;; \ - mov ar.unat = r30; \ - movl r30 = XSI_BANKNUM; \ - ;; \ - st4 [r30] = r31; \ - movl r30 = XSI_BANK1_R16; \ - movl r31 = XSI_BANK1_R16+8; \ - ;; \ - ld8.fill r16 = [r30], 16; \ - ld8.fill r17 = [r31], 16; \ - ;; \ - ld8.fill r18 = [r30], 16; \ - ld8.fill r19 = [r31], 16; \ - ;; \ - ld8.fill r20 = [r30], 16; \ - ld8.fill r21 = [r31], 16; \ - ;; \ - ld8.fill r22 = [r30], 16; \ - ld8.fill r23 = [r31], 16; \ - ;; \ - ld8.fill r24 = [r30], 16; \ - ld8.fill r25 = [r31], 16; \ - ;; \ - ld8.fill r26 = [r30], 16; \ - ld8.fill r27 = [r31], 16; \ - ;; \ - ld8.fill r28 = [r30], 16; \ - ld8.fill r29 = [r31], 16; \ - ;; \ - ld8.fill r30 = [r30]; \ - ld8.fill r31 = [r31]; \ - ;; \ - mov ar.unat = clob - -#define BSW_1(clob0, clob1) XEN_BSW_1(clob1) - - -#define COVER \ - XEN_HYPER_COVER - -#define RFI \ - XEN_HYPER_RFI; \ - dv_serialize_data diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h deleted file mode 100644 index e88c5de27410bd..00000000000000 --- a/arch/ia64/include/asm/xen/interface.h +++ /dev/null @@ -1,363 +0,0 @@ -/****************************************************************************** - * arch-ia64/hypervisor-if.h - * - * Guest OS interface to IA64 Xen. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Copyright by those who contributed. (in alphabetical order) - * - * Anthony Xu - * Eddie Dong - * Fred Yang - * Kevin Tian - * Alex Williamson - * Chris Wright - * Christian Limpach - * Dietmar Hahn - * Hollis Blanchard - * Isaku Yamahata - * Jan Beulich - * John Levon - * Kazuhiro Suzuki - * Keir Fraser - * Kouya Shimura - * Masaki Kanno - * Matt Chapman - * Matthew Chapman - * Samuel Thibault - * Tomonari Horikoshi - * Tristan Gingold - * Tsunehisa Doi - * Yutaka Ezaki - * Zhang Xin - * Zhang xiantao - * dan.magenheimer@hp.com - * ian.pratt@cl.cam.ac.uk - * michael.fetterman@cl.cam.ac.uk - */ - -#ifndef _ASM_IA64_XEN_INTERFACE_H -#define _ASM_IA64_XEN_INTERFACE_H - -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef struct { type *p; } __guest_handle_ ## name - -#define DEFINE_GUEST_HANDLE_STRUCT(name) \ - __DEFINE_GUEST_HANDLE(name, struct name) -#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) -#define GUEST_HANDLE(name) __guest_handle_ ## name -#define GUEST_HANDLE_64(name) GUEST_HANDLE(name) -#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) - -#ifndef __ASSEMBLY__ -/* Explicitly size integers that represent pfns in the public interface - * with Xen so that we could have one ABI that works for 32 and 64 bit - * guests. */ -typedef unsigned long xen_pfn_t; -typedef unsigned long xen_ulong_t; -/* Guest handles for primitive C types. */ -__DEFINE_GUEST_HANDLE(uchar, unsigned char); -__DEFINE_GUEST_HANDLE(uint, unsigned int); -__DEFINE_GUEST_HANDLE(ulong, unsigned long); - -DEFINE_GUEST_HANDLE(char); -DEFINE_GUEST_HANDLE(int); -DEFINE_GUEST_HANDLE(long); -DEFINE_GUEST_HANDLE(void); -DEFINE_GUEST_HANDLE(uint64_t); -DEFINE_GUEST_HANDLE(uint32_t); - -DEFINE_GUEST_HANDLE(xen_pfn_t); -#define PRI_xen_pfn "lx" -#endif - -/* Arch specific VIRQs definition */ -#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ -#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ -#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ - -/* Maximum number of virtual CPUs in multi-processor guests. */ -/* keep sizeof(struct shared_page) <= PAGE_SIZE. - * this is checked in arch/ia64/xen/hypervisor.c. */ -#define MAX_VIRT_CPUS 64 - -#ifndef __ASSEMBLY__ - -#define INVALID_MFN (~0UL) - -union vac { - unsigned long value; - struct { - int a_int:1; - int a_from_int_cr:1; - int a_to_int_cr:1; - int a_from_psr:1; - int a_from_cpuid:1; - int a_cover:1; - int a_bsw:1; - long reserved:57; - }; -}; - -union vdc { - unsigned long value; - struct { - int d_vmsw:1; - int d_extint:1; - int d_ibr_dbr:1; - int d_pmc:1; - int d_to_pmd:1; - int d_itm:1; - long reserved:58; - }; -}; - -struct mapped_regs { - union vac vac; - union vdc vdc; - unsigned long virt_env_vaddr; - unsigned long reserved1[29]; - unsigned long vhpi; - unsigned long reserved2[95]; - union { - unsigned long vgr[16]; - unsigned long bank1_regs[16]; /* bank1 regs (r16-r31) - when bank0 active */ - }; - union { - unsigned long vbgr[16]; - unsigned long bank0_regs[16]; /* bank0 regs (r16-r31) - when bank1 active */ - }; - unsigned long vnat; - unsigned long vbnat; - unsigned long vcpuid[5]; - unsigned long reserved3[11]; - unsigned long vpsr; - unsigned long vpr; - unsigned long reserved4[76]; - union { - unsigned long vcr[128]; - struct { - unsigned long dcr; /* CR0 */ - unsigned long itm; - unsigned long iva; - unsigned long rsv1[5]; - unsigned long pta; /* CR8 */ - unsigned long rsv2[7]; - unsigned long ipsr; /* CR16 */ - unsigned long isr; - unsigned long rsv3; - unsigned long iip; - unsigned long ifa; - unsigned long itir; - unsigned long iipa; - unsigned long ifs; - unsigned long iim; /* CR24 */ - unsigned long iha; - unsigned long rsv4[38]; - unsigned long lid; /* CR64 */ - unsigned long ivr; - unsigned long tpr; - unsigned long eoi; - unsigned long irr[4]; - unsigned long itv; /* CR72 */ - unsigned long pmv; - unsigned long cmcv; - unsigned long rsv5[5]; - unsigned long lrr0; /* CR80 */ - unsigned long lrr1; - unsigned long rsv6[46]; - }; - }; - union { - unsigned long reserved5[128]; - struct { - unsigned long precover_ifs; - unsigned long unat; /* not sure if this is needed - until NaT arch is done */ - int interrupt_collection_enabled; /* virtual psr.ic */ - - /* virtual interrupt deliverable flag is - * evtchn_upcall_mask in shared info area now. - * interrupt_mask_addr is the address - * of evtchn_upcall_mask for current vcpu - */ - unsigned char *interrupt_mask_addr; - int pending_interruption; - unsigned char vpsr_pp; - unsigned char vpsr_dfh; - unsigned char hpsr_dfh; - unsigned char hpsr_mfh; - unsigned long reserved5_1[4]; - int metaphysical_mode; /* 1 = use metaphys mapping - 0 = use virtual */ - int banknum; /* 0 or 1, which virtual - register bank is active */ - unsigned long rrs[8]; /* region registers */ - unsigned long krs[8]; /* kernel registers */ - unsigned long tmp[16]; /* temp registers - (e.g. for hyperprivops) */ - - /* itc paravirtualization - * vAR.ITC = mAR.ITC + itc_offset - * itc_last is one which was lastly passed to - * the guest OS in order to prevent it from - * going backwords. - */ - unsigned long itc_offset; - unsigned long itc_last; - }; - }; -}; - -struct arch_vcpu_info { - /* nothing */ -}; - -/* - * This structure is used for magic page in domain pseudo physical address - * space and the result of XENMEM_machine_memory_map. - * As the XENMEM_machine_memory_map result, - * xen_memory_map::nr_entries indicates the size in bytes - * including struct xen_ia64_memmap_info. Not the number of entries. - */ -struct xen_ia64_memmap_info { - uint64_t efi_memmap_size; /* size of EFI memory map */ - uint64_t efi_memdesc_size; /* size of an EFI memory map - * descriptor */ - uint32_t efi_memdesc_version; /* memory descriptor version */ - void *memdesc[0]; /* array of efi_memory_desc_t */ -}; - -struct arch_shared_info { - /* PFN of the start_info page. */ - unsigned long start_info_pfn; - - /* Interrupt vector for event channel. */ - int evtchn_vector; - - /* PFN of memmap_info page */ - unsigned int memmap_info_num_pages; /* currently only = 1 case is - supported. */ - unsigned long memmap_info_pfn; - - uint64_t pad[31]; -}; - -struct xen_callback { - unsigned long ip; -}; -typedef struct xen_callback xen_callback_t; - -#endif /* !__ASSEMBLY__ */ - -#include - -/* Size of the shared_info area (this is not related to page size). */ -#define XSI_SHIFT 14 -#define XSI_SIZE (1 << XSI_SHIFT) -/* Log size of mapped_regs area (64 KB - only 4KB is used). */ -#define XMAPPEDREGS_SHIFT 12 -#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) -/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ -#define XMAPPEDREGS_OFS XSI_SIZE - -/* Hyperprivops. */ -#define HYPERPRIVOP_START 0x1 -#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) -#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) -#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) -#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) -#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) -#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) -#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) -#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) -#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) -#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) -#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) -#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) -#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) -#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) -#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) -#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) -#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) -#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) -#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) -#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) -#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) -#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) -#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) -#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) -#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) -#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) -#define HYPERPRIVOP_MAX (0x1a) - -/* Fast and light hypercalls. */ -#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 - -/* Xencomm macros. */ -#define XENCOMM_INLINE_MASK 0xf800000000000000UL -#define XENCOMM_INLINE_FLAG 0x8000000000000000UL - -#ifndef __ASSEMBLY__ - -/* - * Optimization features. - * The hypervisor may do some special optimizations for guests. This hypercall - * can be used to switch on/of these special optimizations. - */ -#define __HYPERVISOR_opt_feature 0x700UL - -#define XEN_IA64_OPTF_OFF 0x0 -#define XEN_IA64_OPTF_ON 0x1 - -/* - * If this feature is switched on, the hypervisor inserts the - * tlb entries without calling the guests traphandler. - * This is useful in guests using region 7 for identity mapping - * like the linux kernel does. - */ -#define XEN_IA64_OPTF_IDENT_MAP_REG7 1 - -/* Identity mapping of region 4 addresses in HVM. */ -#define XEN_IA64_OPTF_IDENT_MAP_REG4 2 - -/* Identity mapping of region 5 addresses in HVM. */ -#define XEN_IA64_OPTF_IDENT_MAP_REG5 3 - -#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) - -struct xen_ia64_opt_feature { - unsigned long cmd; /* Which feature */ - unsigned char on; /* Switch feature on/off */ - union { - struct { - /* The page protection bit mask of the pte. - * This will be or'ed with the pte. */ - unsigned long pgprot; - unsigned long key; /* A protection key for itir.*/ - }; - }; -}; - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_IA64_XEN_INTERFACE_H */ diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h deleted file mode 100644 index a9045098300318..00000000000000 --- a/arch/ia64/include/asm/xen/irq.h +++ /dev/null @@ -1,44 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/irq.h - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef _ASM_IA64_XEN_IRQ_H -#define _ASM_IA64_XEN_IRQ_H - -/* - * The flat IRQ space is divided into two regions: - * 1. A one-to-one mapping of real physical IRQs. This space is only used - * if we have physical device-access privilege. This region is at the - * start of the IRQ space so that existing device drivers do not need - * to be modified to translate physical IRQ numbers into our IRQ space. - * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These - * are bound using the provided bind/unbind functions. - */ - -#define XEN_PIRQ_BASE 0 -#define XEN_NR_PIRQS 256 - -#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS) -#define XEN_NR_DYNIRQS (NR_CPUS * 8) - -#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS) - -#endif /* _ASM_IA64_XEN_IRQ_H */ diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h deleted file mode 100644 index 00cf03e0cb8295..00000000000000 --- a/arch/ia64/include/asm/xen/minstate.h +++ /dev/null @@ -1,143 +0,0 @@ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -/* read ar.itc in advance, and use it before leaving bank 0 */ -#define XEN_ACCOUNT_GET_STAMP \ - MOV_FROM_ITC(pUStk, p6, r20, r2); -#else -#define XEN_ACCOUNT_GET_STAMP -#endif - -/* - * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves - * the minimum state necessary that allows us to turn psr.ic back - * on. - * - * Assumed state upon entry: - * psr.ic: off - * r31: contains saved predicates (pr) - * - * Upon exit, the state is as follows: - * psr.ic: off - * r2 = points to &pt_regs.r16 - * r8 = contents of ar.ccv - * r9 = contents of ar.csd - * r10 = contents of ar.ssd - * r11 = FPSR_DEFAULT - * r12 = kernel sp (kernel virtual address) - * r13 = points to current task_struct (kernel virtual address) - * p15 = TRUE if psr.i is set in cr.ipsr - * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: - * preserved - * CONFIG_XEN note: p6/p7 are not preserved - * - * Note that psr.ic is NOT turned on by this macro. This is so that - * we can pass interruption state as arguments to a handler. - */ -#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \ - mov r16=IA64_KR(CURRENT); /* M */ \ - mov r27=ar.rsc; /* M */ \ - mov r20=r1; /* A */ \ - mov r25=ar.unat; /* M */ \ - MOV_FROM_IPSR(p0,r29); /* M */ \ - MOV_FROM_IIP(r28); /* M */ \ - mov r21=ar.fpsr; /* M */ \ - mov r26=ar.pfs; /* I */ \ - __COVER; /* B;; (or nothing) */ \ - adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ - ;; \ - ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \ - st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \ - adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \ - /* switch from user to kernel RBS: */ \ - ;; \ - invala; /* M */ \ - /* SAVE_IFS;*/ /* see xen special handling below */ \ - cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ - ;; \ -(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ - ;; \ -(pUStk) mov.m r24=ar.rnat; \ -(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ -(pKStk) mov r1=sp; /* get sp */ \ - ;; \ -(pUStk) lfetch.fault.excl.nt1 [r22]; \ -(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ -(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ - ;; \ -(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ -(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ - ;; \ -(pUStk) mov r18=ar.bsp; \ -(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ - adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ - adds r16=PT(CR_IPSR),r1; \ - ;; \ - lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ - st8 [r16]=r29; /* save cr.ipsr */ \ - ;; \ - lfetch.fault.excl.nt1 [r17]; \ - tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ - mov r29=b0 \ - ;; \ - WORKAROUND; \ - adds r16=PT(R8),r1; /* initialize first base pointer */ \ - adds r17=PT(R9),r1; /* initialize second base pointer */ \ -(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r8,16; \ -.mem.offset 8,0; st8.spill [r17]=r9,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r10,24; \ - movl r8=XSI_PRECOVER_IFS; \ -.mem.offset 8,0; st8.spill [r17]=r11,24; \ - ;; \ - /* xen special handling for possibly lazy cover */ \ - /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \ - ld8 r30=[r8]; \ -(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ - st8 [r16]=r28,16; /* save cr.iip */ \ - ;; \ - st8 [r17]=r30,16; /* save cr.ifs */ \ - mov r8=ar.ccv; \ - mov r9=ar.csd; \ - mov r10=ar.ssd; \ - movl r11=FPSR_DEFAULT; /* L-unit */ \ - ;; \ - st8 [r16]=r25,16; /* save ar.unat */ \ - st8 [r17]=r26,16; /* save ar.pfs */ \ - shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ - ;; \ - st8 [r16]=r27,16; /* save ar.rsc */ \ -(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \ -(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \ - ;; /* avoid RAW on r16 & r17 */ \ -(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \ - st8 [r17]=r31,16; /* save predicates */ \ -(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \ - ;; \ - st8 [r16]=r29,16; /* save b0 */ \ - st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ - cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ -.mem.offset 8,0; st8.spill [r17]=r12,16; \ - adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r13,16; \ -.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \ - mov r13=IA64_KR(CURRENT); /* establish `current' */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r15,16; \ -.mem.offset 8,0; st8.spill [r17]=r14,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r2,16; \ -.mem.offset 8,0; st8.spill [r17]=r3,16; \ - XEN_ACCOUNT_GET_STAMP \ - adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ - ;; \ - EXTRA; \ - movl r1=__gp; /* establish kernel global pointer */ \ - ;; \ - ACCOUNT_SYS_ENTER \ - BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \ - ;; diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h deleted file mode 100644 index 96e42f97fa1ff9..00000000000000 --- a/arch/ia64/include/asm/xen/page-coherent.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H -#define _ASM_IA64_XEN_PAGE_COHERENT_H - -#include -#include -#include - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - void *vstart = (void*)__get_free_pages(flags, get_order(size)); - *dma_handle = virt_to_phys(vstart); - return vstart; -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - free_pages((unsigned long) cpu_addr, get_order(size)); -} - -static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) { } - -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) { } - -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) { } - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) { } - -#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */ diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h deleted file mode 100644 index 03441a780b5b56..00000000000000 --- a/arch/ia64/include/asm/xen/page.h +++ /dev/null @@ -1,65 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/page.h - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef _ASM_IA64_XEN_PAGE_H -#define _ASM_IA64_XEN_PAGE_H - -#define INVALID_P2M_ENTRY (~0UL) - -static inline unsigned long mfn_to_pfn(unsigned long mfn) -{ - return mfn; -} - -static inline unsigned long pfn_to_mfn(unsigned long pfn) -{ - return pfn; -} - -#define phys_to_machine_mapping_valid(_x) (1) - -static inline void *mfn_to_virt(unsigned long mfn) -{ - return __va(mfn << PAGE_SHIFT); -} - -static inline unsigned long virt_to_mfn(void *virt) -{ - return __pa(virt) >> PAGE_SHIFT; -} - -/* for tpmfront.c */ -static inline unsigned long virt_to_machine(void *virt) -{ - return __pa(virt); -} - -static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - /* nothing */ -} - -#define pte_mfn(_x) pte_pfn(_x) -#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */ -#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */ - -#endif /* _ASM_IA64_XEN_PAGE_H */ diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h deleted file mode 100644 index eae944e8884633..00000000000000 --- a/arch/ia64/include/asm/xen/patchlist.h +++ /dev/null @@ -1,38 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/patchlist.h - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#define __paravirt_start_gate_fsyscall_patchlist \ - __xen_start_gate_fsyscall_patchlist -#define __paravirt_end_gate_fsyscall_patchlist \ - __xen_end_gate_fsyscall_patchlist -#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ - __xen_start_gate_brl_fsys_bubble_down_patchlist -#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ - __xen_end_gate_brl_fsys_bubble_down_patchlist -#define __paravirt_start_gate_vtop_patchlist \ - __xen_start_gate_vtop_patchlist -#define __paravirt_end_gate_vtop_patchlist \ - __xen_end_gate_vtop_patchlist -#define __paravirt_start_gate_mckinley_e9_patchlist \ - __xen_start_gate_mckinley_e9_patchlist -#define __paravirt_end_gate_mckinley_e9_patchlist \ - __xen_end_gate_mckinley_e9_patchlist diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h deleted file mode 100644 index fb4ec5e0b066b8..00000000000000 --- a/arch/ia64/include/asm/xen/privop.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef _ASM_IA64_XEN_PRIVOP_H -#define _ASM_IA64_XEN_PRIVOP_H - -/* - * Copyright (C) 2005 Hewlett-Packard Co - * Dan Magenheimer - * - * Paravirtualizations of privileged operations for Xen/ia64 - * - * - * inline privop and paravirt_alt support - * Copyright (c) 2007 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - */ - -#ifndef __ASSEMBLY__ -#include /* arch-ia64.h requires uint64_t */ -#endif -#include - -/* At 1 MB, before per-cpu space but still addressable using addl instead - of movl. */ -#define XSI_BASE 0xfffffffffff00000 - -/* Address of mapped regs. */ -#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE) - -#ifdef __ASSEMBLY__ -#define XEN_HYPER_RFI break HYPERPRIVOP_RFI -#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT -#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT -#define XEN_HYPER_COVER break HYPERPRIVOP_COVER -#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D -#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I -#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I -#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR -#define XEN_HYPER_THASH break HYPERPRIVOP_THASH -#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D -#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR -#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR -#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4 - -#define XSI_IFS (XSI_BASE + XSI_IFS_OFS) -#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS) -#define XSI_IFA (XSI_BASE + XSI_IFA_OFS) -#define XSI_ISR (XSI_BASE + XSI_ISR_OFS) -#define XSI_IIM (XSI_BASE + XSI_IIM_OFS) -#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS) -#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) -#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS) -#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS) -#define XSI_IIP (XSI_BASE + XSI_IIP_OFS) -#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS) -#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) -#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) -#define XSI_IHA (XSI_BASE + XSI_IHA_OFS) -#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) -#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) -#endif - -#ifndef __ASSEMBLY__ - -/************************************************/ -/* Instructions paravirtualized for correctness */ -/************************************************/ - -/* "fc" and "thash" are privilege-sensitive instructions, meaning they - * may have different semantics depending on whether they are executed - * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't - * be allowed to execute directly, lest incorrect semantics result. */ -extern void xen_fc(void *addr); -extern unsigned long xen_thash(unsigned long addr); - -/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" - * is not currently used (though it may be in a long-format VHPT system!) - * and the semantics of cover only change if psr.ic is off which is very - * rare (and currently non-existent outside of assembly code */ - -/* There are also privilege-sensitive registers. These registers are - * readable at any privilege level but only writable at PL0. */ -extern unsigned long xen_get_cpuid(int index); -extern unsigned long xen_get_pmd(int index); - -#ifndef ASM_SUPPORTED -extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ -extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ -#endif - -/************************************************/ -/* Instructions paravirtualized for performance */ -/************************************************/ - -/* Xen uses memory-mapped virtual privileged registers for access to many - * performance-sensitive privileged registers. Some, like the processor - * status register (psr), are broken up into multiple memory locations. - * Others, like "pend", are abstractions based on privileged registers. - * "Pend" is guaranteed to be set if reading cr.ivr would return a - * (non-spurious) interrupt. */ -#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE) - -#define XSI_PSR_I \ - (*XEN_MAPPEDREGS->interrupt_mask_addr) -#define xen_get_virtual_psr_i() \ - (!XSI_PSR_I) -#define xen_set_virtual_psr_i(_val) \ - ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; }) -#define xen_set_virtual_psr_ic(_val) \ - ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; }) -#define xen_get_virtual_pend() \ - (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) - -#ifndef ASM_SUPPORTED -/* Although all privileged operations can be left to trap and will - * be properly handled by Xen, some are frequent enough that we use - * hyperprivops for performance. */ -extern unsigned long xen_get_psr(void); -extern unsigned long xen_get_ivr(void); -extern unsigned long xen_get_tpr(void); -extern void xen_hyper_ssm_i(void); -extern void xen_set_itm(unsigned long); -extern void xen_set_tpr(unsigned long); -extern void xen_eoi(unsigned long); -extern unsigned long xen_get_rr(unsigned long index); -extern void xen_set_rr(unsigned long index, unsigned long val); -extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, - unsigned long val2, unsigned long val3, - unsigned long val4); -extern void xen_set_kr(unsigned long index, unsigned long val); -extern void xen_ptcga(unsigned long addr, unsigned long size); -#endif /* !ASM_SUPPORTED */ - -#endif /* !__ASSEMBLY__ */ - -#endif /* _ASM_IA64_XEN_PRIVOP_H */ diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h deleted file mode 100644 index 20b2950c71b60e..00000000000000 --- a/arch/ia64/include/asm/xen/xcom_hcall.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2006 Tristan Gingold , Bull SAS - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_IA64_XEN_XCOM_HCALL_H -#define _ASM_IA64_XEN_XCOM_HCALL_H - -/* These function creates inline or mini descriptor for the parameters and - calls the corresponding xencomm_arch_hypercall_X. - Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless - they want to use their own wrapper. */ -extern int xencomm_hypercall_console_io(int cmd, int count, char *str); - -extern int xencomm_hypercall_event_channel_op(int cmd, void *op); - -extern int xencomm_hypercall_xen_version(int cmd, void *arg); - -extern int xencomm_hypercall_physdev_op(int cmd, void *op); - -extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, - unsigned int count); - -extern int xencomm_hypercall_sched_op(int cmd, void *arg); - -extern int xencomm_hypercall_multicall(void *call_list, int nr_calls); - -extern int xencomm_hypercall_callback_op(int cmd, void *arg); - -extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg); - -extern int xencomm_hypercall_suspend(unsigned long srec); - -extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg); - -extern long xencomm_hypercall_opt_feature(void *arg); - -#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */ diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h deleted file mode 100644 index cded677bebf2d9..00000000000000 --- a/arch/ia64/include/asm/xen/xencomm.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2006 Hollis Blanchard , IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_IA64_XEN_XENCOMM_H -#define _ASM_IA64_XEN_XENCOMM_H - -#include -#include - -/* Must be called before any hypercall. */ -extern void xencomm_initialize(void); -extern int xencomm_is_initialized(void); - -/* Check if virtual contiguity means physical contiguity - * where the passed address is a pointer value in virtual address. - * On ia64, identity mapping area in region 7 or the piece of region 5 - * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL] - */ -static inline int xencomm_is_phys_contiguous(unsigned long addr) -{ - return (PAGE_OFFSET <= addr && - addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) || - (KERNEL_START <= addr && - addr < KERNEL_START + KERNEL_TR_PAGE_SIZE); -} - -#endif /* _ASM_IA64_XEN_XENCOMM_H */ diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h index e90c40ec9edf28..f034020398962d 100644 --- a/arch/ia64/include/uapi/asm/break.h +++ b/arch/ia64/include/uapi/asm/break.h @@ -20,13 +20,4 @@ */ #define __IA64_BREAK_SYSCALL 0x100000 -/* - * Xen specific break numbers: - */ -#define __IA64_XEN_HYPERCALL 0x1000 -/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used - for xen hyperprivops */ -#define __IA64_XEN_HYPERPRIVOP_START 0x1 -#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a - #endif /* _ASM_IA64_BREAK_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 59d52e3aef125b..bfa19311e09c80 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -53,7 +53,6 @@ #include #include #include -#include #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ @@ -120,8 +119,6 @@ acpi_get_sysname(void) return "uv"; else return "sn2"; - } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { - return "xen"; } #ifdef CONFIG_INTEL_IOMMU diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 46c9e3007315de..60ef83e6db71eb 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -16,9 +16,6 @@ #include #include -#include -#include - #include "../kernel/sigframe.h" #include "../kernel/fsyscall_gtod_data.h" @@ -290,33 +287,4 @@ void foo(void) DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); -#ifdef CONFIG_XEN - BLANK(); - - DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); - DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN); - -#define DEFINE_MAPPED_REG_OFS(sym, field) \ - DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) - - DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); - DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); - DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); - DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); - DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); - DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); - DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); - DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); - DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); - DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); - DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); - DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); - DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); - DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); - DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); - DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); - DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); -#endif /* CONFIG_XEN */ } diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 991ca336b8a297..e6f80fcf013bbb 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -416,8 +416,6 @@ start_ap: default_setup_hook = 0 // Currently nothing needs to be done. - .weak xen_setup_hook - .global hypervisor_type hypervisor_type: data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT @@ -426,7 +424,6 @@ hypervisor_type: hypervisor_setup_hooks: data8 default_setup_hook - data8 xen_setup_hook num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 .previous diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c index ee564575148ed5..f6769cd54bd93c 100644 --- a/arch/ia64/kernel/nr-irqs.c +++ b/arch/ia64/kernel/nr-irqs.c @@ -10,15 +10,11 @@ #include #include #include -#include void foo(void) { union paravirt_nr_irqs_max { char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; -#ifdef CONFIG_XEN - char xen_nr_irqs[XEN_NR_IRQS]; -#endif }; DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h index 64d6d810c64b8e..1ad7512b5f656a 100644 --- a/arch/ia64/kernel/paravirt_inst.h +++ b/arch/ia64/kernel/paravirt_inst.h @@ -22,9 +22,6 @@ #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK #include -#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN) -#include -#include #else #include #endif diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h index 0684aa6c6507a3..67cffc3643a32f 100644 --- a/arch/ia64/kernel/paravirt_patchlist.h +++ b/arch/ia64/kernel/paravirt_patchlist.h @@ -20,9 +20,5 @@ * */ -#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) -#include -#else #include -#endif diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0ccb28fab27e73..84f8a52ac5ae2b 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -182,12 +182,6 @@ SECTIONS { __start_gate_section = .; *(.data..gate) __stop_gate_section = .; -#ifdef CONFIG_XEN - . = ALIGN(PAGE_SIZE); - __xen_start_gate_section = .; - *(.data..gate.xen) - __xen_stop_gate_section = .; -#endif } /* * make sure the gate page doesn't expose diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig deleted file mode 100644 index 5d8a06b0ddf7e8..00000000000000 --- a/arch/ia64/xen/Kconfig +++ /dev/null @@ -1,25 +0,0 @@ -# -# This Kconfig describes xen/ia64 options -# - -config XEN - bool "Xen hypervisor support" - default y - depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB - select XEN_XENCOMM - select NO_IDLE_HZ - # followings are required to save/restore. - select ARCH_SUSPEND_POSSIBLE - select SUSPEND - select PM_SLEEP - help - Enable Xen hypervisor support. Resulting kernel runs - both as a guest OS on Xen and natively on hardware. - -config XEN_XENCOMM - depends on XEN - bool - -config NO_IDLE_HZ - depends on XEN - bool diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile deleted file mode 100644 index e6f4a0a74228ad..00000000000000 --- a/arch/ia64/xen/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# -# Makefile for Xen components -# - -obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ - hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ - gate-data.o - -obj-$(CONFIG_IA64_GENERIC) += machvec.o - -# The gate DSO image is built using a special linker script. -include $(srctree)/arch/ia64/kernel/Makefile.gate - -# tell compiled for xen -CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN -AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN - -# use same file of native. -$(obj)/gate.o: $(src)/../kernel/gate.S FORCE - $(call if_changed_dep,as_o_S) -$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE - $(call if_changed_dep,cpp_lds_S) - - -AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN - -# xen multi compile -ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S -ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) -obj-y += $(ASM_PARAVIRT_OBJS) -define paravirtualized_xen -AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN -endef -$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o)))) - -$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE - $(call if_changed_dep,as_o_S) diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S deleted file mode 100644 index 6f95b6b32a4efd..00000000000000 --- a/arch/ia64/xen/gate-data.S +++ /dev/null @@ -1,3 +0,0 @@ - .section .data..gate.xen, "aw" - - .incbin "arch/ia64/xen/gate.so" diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c deleted file mode 100644 index c18281332f8472..00000000000000 --- a/arch/ia64/xen/grant-table.c +++ /dev/null @@ -1,94 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/grant-table.c - * - * Copyright (c) 2006 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include -#include -#include -#include - -#include -#include -#include - -#include - -/**************************************************************************** - * grant table hack - * cmd: GNTTABOP_xxx - */ - -int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, - unsigned long max_nr_gframes, - struct grant_entry **__shared) -{ - *__shared = __va(frames[0] << PAGE_SHIFT); - return 0; -} - -void arch_gnttab_unmap_shared(struct grant_entry *shared, - unsigned long nr_gframes) -{ - /* nothing */ -} - -static void -gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) -{ - uint32_t flags; - - flags = uop->flags; - - if (flags & GNTMAP_host_map) { - if (flags & GNTMAP_application_map) { - printk(KERN_DEBUG - "GNTMAP_application_map is not supported yet: " - "flags 0x%x\n", flags); - BUG(); - } - if (flags & GNTMAP_contains_pte) { - printk(KERN_DEBUG - "GNTMAP_contains_pte is not supported yet: " - "flags 0x%x\n", flags); - BUG(); - } - } else if (flags & GNTMAP_device_map) { - printk("GNTMAP_device_map is not supported yet 0x%x\n", flags); - BUG(); /* not yet. actually this flag is not used. */ - } else { - BUG(); - } -} - -int -HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) -{ - if (cmd == GNTTABOP_map_grant_ref) { - unsigned int i; - for (i = 0; i < count; i++) { - gnttab_map_grant_ref_pre( - (struct gnttab_map_grant_ref *)uop + i); - } - } - return xencomm_hypercall_grant_table_op(cmd, uop, count); -} - -EXPORT_SYMBOL(HYPERVISOR_grant_table_op); diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S deleted file mode 100644 index 08847aa12583bd..00000000000000 --- a/arch/ia64/xen/hypercall.S +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Support routines for Xen hypercalls - * - * Copyright (C) 2005 Dan Magenheimer - * Copyright (C) 2008 Yaozu (Eddie) Dong - */ - -#include -#include -#include - -#ifdef __INTEL_COMPILER -/* - * Hypercalls without parameter. - */ -#define __HCALL0(name,hcall) \ - GLOBAL_ENTRY(name); \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -/* - * Hypercalls with 1 parameter. - */ -#define __HCALL1(name,hcall) \ - GLOBAL_ENTRY(name); \ - mov r8=r32; \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -/* - * Hypercalls with 2 parameters. - */ -#define __HCALL2(name,hcall) \ - GLOBAL_ENTRY(name); \ - mov r8=r32; \ - mov r9=r33; \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR) -__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR) -__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR) -__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I) - -__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR) -__HCALL1(xen_eoi, HYPERPRIVOP_EOI) -__HCALL1(xen_thash, HYPERPRIVOP_THASH) -__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM) -__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR) -__HCALL1(xen_fc, HYPERPRIVOP_FC) -__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID) -__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD) - -__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) -__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) -__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) - -GLOBAL_ENTRY(xen_set_rr0_to_rr4) - mov r8=r32 - mov r9=r33 - mov r10=r34 - mov r11=r35 - mov r14=r36 - XEN_HYPER_SET_RR0_TO_RR4 - br.ret.sptk.many rp - ;; -END(xen_set_rr0_to_rr4) -#endif - -GLOBAL_ENTRY(xen_send_ipi) - mov r14=r32 - mov r15=r33 - mov r2=0x400 - break 0x1000 - ;; - br.ret.sptk.many rp - ;; -END(xen_send_ipi) - -GLOBAL_ENTRY(__hypercall) - mov r2=r37 - break 0x1000 - br.ret.sptk.many b0 - ;; -END(__hypercall) diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c deleted file mode 100644 index fab62528a80b42..00000000000000 --- a/arch/ia64/xen/hypervisor.c +++ /dev/null @@ -1,97 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/hypervisor.c - * - * Copyright (c) 2006 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include -#include -#include -#include - -#include "irq_xen.h" - -struct shared_info *HYPERVISOR_shared_info __read_mostly = - (struct shared_info *)XSI_BASE; -EXPORT_SYMBOL(HYPERVISOR_shared_info); - -DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); - -struct start_info *xen_start_info; -EXPORT_SYMBOL(xen_start_info); - -EXPORT_SYMBOL(xen_domain_type); - -EXPORT_SYMBOL(__hypercall); - -/* Stolen from arch/x86/xen/enlighten.c */ -/* - * Flag to determine whether vcpu info placement is available on all - * VCPUs. We assume it is to start with, and then set it to zero on - * the first failure. This is because it can succeed on some VCPUs - * and not others, since it can involve hypervisor memory allocation, - * or because the guest failed to guarantee all the appropriate - * constraints on all VCPUs (ie buffer can't cross a page boundary). - * - * Note that any particular CPU may be using a placed vcpu structure, - * but we can only optimise if the all are. - * - * 0: not available, 1: available - */ - -static void __init xen_vcpu_setup(int cpu) -{ - /* - * WARNING: - * before changing MAX_VIRT_CPUS, - * check that shared_info fits on a page - */ - BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); - per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; -} - -void __init xen_setup_vcpu_info_placement(void) -{ - int cpu; - - for_each_possible_cpu(cpu) - xen_vcpu_setup(cpu); -} - -void -xen_cpu_init(void) -{ - xen_smp_intr_init(); -} - -/************************************************************************** - * opt feature - */ -void -xen_ia64_enable_opt_feature(void) -{ - /* Enable region 7 identity map optimizations in Xen */ - struct xen_ia64_opt_feature optf; - - optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; - optf.on = XEN_IA64_OPTF_ON; - optf.pgprot = pgprot_val(PAGE_KERNEL); - optf.key = 0; /* No key on linux. */ - HYPERVISOR_opt_feature(&optf); -} diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c deleted file mode 100644 index efb74dafec4de2..00000000000000 --- a/arch/ia64/xen/irq_xen.c +++ /dev/null @@ -1,443 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/irq_xen.c - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include - -#include -#include -#include - -#include - -#include "irq_xen.h" - -/*************************************************************************** - * pv_irq_ops - * irq operations - */ - -static int -xen_assign_irq_vector(int irq) -{ - struct physdev_irq irq_op; - - irq_op.irq = irq; - if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) - return -ENOSPC; - - return irq_op.vector; -} - -static void -xen_free_irq_vector(int vector) -{ - struct physdev_irq irq_op; - - if (vector < IA64_FIRST_DEVICE_VECTOR || - vector > IA64_LAST_DEVICE_VECTOR) - return; - - irq_op.vector = vector; - if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) - printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n", - __func__, vector); -} - - -static DEFINE_PER_CPU(int, xen_timer_irq) = -1; -static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; -static DEFINE_PER_CPU(int, xen_resched_irq) = -1; -static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; -static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; -static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; -#define NAME_SIZE 15 -static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); -#undef NAME_SIZE - -struct saved_irq { - unsigned int irq; - struct irqaction *action; -}; -/* 16 should be far optimistic value, since only several percpu irqs - * are registered early. - */ -#define MAX_LATE_IRQ 16 -static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; -static unsigned short late_irq_cnt; -static unsigned short saved_irq_cnt; -static int xen_slab_ready; - -#ifdef CONFIG_SMP -#include - -/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, - * it ends up to issue several memory accesses upon percpu data and - * thus adds unnecessary traffic to other paths. - */ -static irqreturn_t -xen_dummy_handler(int irq, void *dev_id) -{ - return IRQ_HANDLED; -} - -static irqreturn_t -xen_resched_handler(int irq, void *dev_id) -{ - scheduler_ipi(); - return IRQ_HANDLED; -} - -static struct irqaction xen_ipi_irqaction = { - .handler = handle_IPI, - .flags = IRQF_DISABLED, - .name = "IPI" -}; - -static struct irqaction xen_resched_irqaction = { - .handler = xen_resched_handler, - .flags = IRQF_DISABLED, - .name = "resched" -}; - -static struct irqaction xen_tlb_irqaction = { - .handler = xen_dummy_handler, - .flags = IRQF_DISABLED, - .name = "tlb_flush" -}; -#endif - -/* - * This is xen version percpu irq registration, which needs bind - * to xen specific evtchn sub-system. One trick here is that xen - * evtchn binding interface depends on kmalloc because related - * port needs to be freed at device/cpu down. So we cache the - * registration on BSP before slab is ready and then deal them - * at later point. For rest instances happening after slab ready, - * we hook them to xen evtchn immediately. - * - * FIXME: MCA is not supported by far, and thus "nomca" boot param is - * required. - */ -static void -__xen_register_percpu_irq(unsigned int cpu, unsigned int vec, - struct irqaction *action, int save) -{ - int irq = 0; - - if (xen_slab_ready) { - switch (vec) { - case IA64_TIMER_VECTOR: - snprintf(per_cpu(xen_timer_name, cpu), - sizeof(per_cpu(xen_timer_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, - action->handler, action->flags, - per_cpu(xen_timer_name, cpu), action->dev_id); - per_cpu(xen_timer_irq, cpu) = irq; - break; - case IA64_IPI_RESCHEDULE: - snprintf(per_cpu(xen_resched_name, cpu), - sizeof(per_cpu(xen_resched_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, - action->handler, action->flags, - per_cpu(xen_resched_name, cpu), action->dev_id); - per_cpu(xen_resched_irq, cpu) = irq; - break; - case IA64_IPI_VECTOR: - snprintf(per_cpu(xen_ipi_name, cpu), - sizeof(per_cpu(xen_ipi_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, - action->handler, action->flags, - per_cpu(xen_ipi_name, cpu), action->dev_id); - per_cpu(xen_ipi_irq, cpu) = irq; - break; - case IA64_CMC_VECTOR: - snprintf(per_cpu(xen_cmc_name, cpu), - sizeof(per_cpu(xen_cmc_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, - action->handler, - action->flags, - per_cpu(xen_cmc_name, cpu), - action->dev_id); - per_cpu(xen_cmc_irq, cpu) = irq; - break; - case IA64_CMCP_VECTOR: - snprintf(per_cpu(xen_cmcp_name, cpu), - sizeof(per_cpu(xen_cmcp_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, - action->handler, - action->flags, - per_cpu(xen_cmcp_name, cpu), - action->dev_id); - per_cpu(xen_cmcp_irq, cpu) = irq; - break; - case IA64_CPEP_VECTOR: - snprintf(per_cpu(xen_cpep_name, cpu), - sizeof(per_cpu(xen_cpep_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, - action->handler, - action->flags, - per_cpu(xen_cpep_name, cpu), - action->dev_id); - per_cpu(xen_cpep_irq, cpu) = irq; - break; - case IA64_CPE_VECTOR: - case IA64_MCA_RENDEZ_VECTOR: - case IA64_PERFMON_VECTOR: - case IA64_MCA_WAKEUP_VECTOR: - case IA64_SPURIOUS_INT_VECTOR: - /* No need to complain, these aren't supported. */ - break; - default: - printk(KERN_WARNING "Percpu irq %d is unsupported " - "by xen!\n", vec); - break; - } - BUG_ON(irq < 0); - - if (irq > 0) { - /* - * Mark percpu. Without this, migrate_irqs() will - * mark the interrupt for migrations and trigger it - * on cpu hotplug. - */ - irq_set_status_flags(irq, IRQ_PER_CPU); - } - } - - /* For BSP, we cache registered percpu irqs, and then re-walk - * them when initializing APs - */ - if (!cpu && save) { - BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); - saved_percpu_irqs[saved_irq_cnt].irq = vec; - saved_percpu_irqs[saved_irq_cnt].action = action; - saved_irq_cnt++; - if (!xen_slab_ready) - late_irq_cnt++; - } -} - -static void -xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) -{ - __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); -} - -static void -xen_bind_early_percpu_irq(void) -{ - int i; - - xen_slab_ready = 1; - /* There's no race when accessing this cached array, since only - * BSP will face with such step shortly - */ - for (i = 0; i < late_irq_cnt; i++) - __xen_register_percpu_irq(smp_processor_id(), - saved_percpu_irqs[i].irq, - saved_percpu_irqs[i].action, 0); -} - -/* FIXME: There's no obvious point to check whether slab is ready. So - * a hack is used here by utilizing a late time hook. - */ - -#ifdef CONFIG_HOTPLUG_CPU -static int unbind_evtchn_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - if (action == CPU_DEAD) { - /* Unregister evtchn. */ - if (per_cpu(xen_cpep_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), - NULL); - per_cpu(xen_cpep_irq, cpu) = -1; - } - if (per_cpu(xen_cmcp_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), - NULL); - per_cpu(xen_cmcp_irq, cpu) = -1; - } - if (per_cpu(xen_cmc_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); - per_cpu(xen_cmc_irq, cpu) = -1; - } - if (per_cpu(xen_ipi_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); - per_cpu(xen_ipi_irq, cpu) = -1; - } - if (per_cpu(xen_resched_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), - NULL); - per_cpu(xen_resched_irq, cpu) = -1; - } - if (per_cpu(xen_timer_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), - NULL); - per_cpu(xen_timer_irq, cpu) = -1; - } - } - return NOTIFY_OK; -} - -static struct notifier_block unbind_evtchn_notifier = { - .notifier_call = unbind_evtchn_callback, - .priority = 0 -}; -#endif - -void xen_smp_intr_init_early(unsigned int cpu) -{ -#ifdef CONFIG_SMP - unsigned int i; - - for (i = 0; i < saved_irq_cnt; i++) - __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, - saved_percpu_irqs[i].action, 0); -#endif -} - -void xen_smp_intr_init(void) -{ -#ifdef CONFIG_SMP - unsigned int cpu = smp_processor_id(); - struct callback_register event = { - .type = CALLBACKTYPE_event, - .address = { .ip = (unsigned long)&xen_event_callback }, - }; - - if (cpu == 0) { - /* Initialization was already done for boot cpu. */ -#ifdef CONFIG_HOTPLUG_CPU - /* Register the notifier only once. */ - register_cpu_notifier(&unbind_evtchn_notifier); -#endif - return; - } - - /* This should be piggyback when setup vcpu guest context */ - BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); -#endif /* CONFIG_SMP */ -} - -void __init -xen_irq_init(void) -{ - struct callback_register event = { - .type = CALLBACKTYPE_event, - .address = { .ip = (unsigned long)&xen_event_callback }, - }; - - xen_init_IRQ(); - BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); - late_time_init = xen_bind_early_percpu_irq; -} - -void -xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) -{ -#ifdef CONFIG_SMP - /* TODO: we need to call vcpu_up here */ - if (unlikely(vector == ap_wakeup_vector)) { - /* XXX - * This should be in __cpu_up(cpu) in ia64 smpboot.c - * like x86. But don't want to modify it, - * keep it untouched. - */ - xen_smp_intr_init_early(cpu); - - xen_send_ipi(cpu, vector); - /* vcpu_prepare_and_up(cpu); */ - return; - } -#endif - - switch (vector) { - case IA64_IPI_VECTOR: - xen_send_IPI_one(cpu, XEN_IPI_VECTOR); - break; - case IA64_IPI_RESCHEDULE: - xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); - break; - case IA64_CMCP_VECTOR: - xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); - break; - case IA64_CPEP_VECTOR: - xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); - break; - case IA64_TIMER_VECTOR: { - /* this is used only once by check_sal_cache_flush() - at boot time */ - static int used = 0; - if (!used) { - xen_send_ipi(cpu, IA64_TIMER_VECTOR); - used = 1; - break; - } - /* fallthrough */ - } - default: - printk(KERN_WARNING "Unsupported IPI type 0x%x\n", - vector); - notify_remote_via_irq(0); /* defaults to 0 irq */ - break; - } -} - -static void __init -xen_register_ipi(void) -{ -#ifdef CONFIG_SMP - register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); - register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); - register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); -#endif -} - -static void -xen_resend_irq(unsigned int vector) -{ - (void)resend_irq_on_evtchn(vector); -} - -const struct pv_irq_ops xen_irq_ops __initconst = { - .register_ipi = xen_register_ipi, - - .assign_irq_vector = xen_assign_irq_vector, - .free_irq_vector = xen_free_irq_vector, - .register_percpu_irq = xen_register_percpu_irq, - - .resend_irq = xen_resend_irq, -}; diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h deleted file mode 100644 index 1778517b90feb6..00000000000000 --- a/arch/ia64/xen/irq_xen.h +++ /dev/null @@ -1,34 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/irq_xen.h - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef IRQ_XEN_H -#define IRQ_XEN_H - -extern void (*late_time_init)(void); -extern char xen_event_callback; -void __init xen_init_IRQ(void); - -extern const struct pv_irq_ops xen_irq_ops __initconst; -extern void xen_smp_intr_init(void); -extern void xen_send_ipi(int cpu, int vec); - -#endif /* IRQ_XEN_H */ diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c deleted file mode 100644 index 4ad588a7c27963..00000000000000 --- a/arch/ia64/xen/machvec.c +++ /dev/null @@ -1,4 +0,0 @@ -#define MACHVEC_PLATFORM_NAME xen -#define MACHVEC_PLATFORM_HEADER -#include - diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c deleted file mode 100644 index 419c8620945ae2..00000000000000 --- a/arch/ia64/xen/suspend.c +++ /dev/null @@ -1,59 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/suspend.c - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * suspend/resume - */ - -#include -#include -#include "time.h" - -void -xen_mm_pin_all(void) -{ - /* nothing */ -} - -void -xen_mm_unpin_all(void) -{ - /* nothing */ -} - -void -xen_arch_pre_suspend() -{ - /* nothing */ -} - -void -xen_arch_post_suspend(int suspend_cancelled) -{ - if (suspend_cancelled) - return; - - xen_ia64_enable_opt_feature(); - /* add more if necessary */ -} - -void xen_arch_resume(void) -{ - xen_timer_resume_on_aps(); -} diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c deleted file mode 100644 index 1f8244a78bee02..00000000000000 --- a/arch/ia64/xen/time.c +++ /dev/null @@ -1,257 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/time.c - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include -#include -#include -#include -#include - -#include - -#include - -#include - -#include "../kernel/fsyscall_gtod_data.h" - -static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); -static DEFINE_PER_CPU(unsigned long, xen_stolen_time); -static DEFINE_PER_CPU(unsigned long, xen_blocked_time); - -/* taken from i386/kernel/time-xen.c */ -static void xen_init_missing_ticks_accounting(int cpu) -{ - struct vcpu_register_runstate_memory_area area; - struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); - int rc; - - memset(runstate, 0, sizeof(*runstate)); - - area.addr.v = runstate; - rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, - &area); - WARN_ON(rc && rc != -ENOSYS); - - per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; - per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] - + runstate->time[RUNSTATE_offline]; -} - -/* - * Runstate accounting - */ -/* stolen from arch/x86/xen/time.c */ -static void get_runstate_snapshot(struct vcpu_runstate_info *res) -{ - u64 state_time; - struct vcpu_runstate_info *state; - - BUG_ON(preemptible()); - - state = &__get_cpu_var(xen_runstate); - - /* - * The runstate info is always updated by the hypervisor on - * the current CPU, so there's no need to use anything - * stronger than a compiler barrier when fetching it. - */ - do { - state_time = state->state_entry_time; - rmb(); - *res = *state; - rmb(); - } while (state->state_entry_time != state_time); -} - -#define NS_PER_TICK (1000000000LL/HZ) - -static unsigned long -consider_steal_time(unsigned long new_itm) -{ - unsigned long stolen, blocked; - unsigned long delta_itm = 0, stolentick = 0; - int cpu = smp_processor_id(); - struct vcpu_runstate_info runstate; - struct task_struct *p = current; - - get_runstate_snapshot(&runstate); - - /* - * Check for vcpu migration effect - * In this case, itc value is reversed. - * This causes huge stolen value. - * This function just checks and reject this effect. - */ - if (!time_after_eq(runstate.time[RUNSTATE_blocked], - per_cpu(xen_blocked_time, cpu))) - blocked = 0; - - if (!time_after_eq(runstate.time[RUNSTATE_runnable] + - runstate.time[RUNSTATE_offline], - per_cpu(xen_stolen_time, cpu))) - stolen = 0; - - if (!time_after(delta_itm + new_itm, ia64_get_itc())) - stolentick = ia64_get_itc() - new_itm; - - do_div(stolentick, NS_PER_TICK); - stolentick++; - - do_div(stolen, NS_PER_TICK); - - if (stolen > stolentick) - stolen = stolentick; - - stolentick -= stolen; - do_div(blocked, NS_PER_TICK); - - if (blocked > stolentick) - blocked = stolentick; - - if (stolen > 0 || blocked > 0) { - account_steal_ticks(stolen); - account_idle_ticks(blocked); - run_local_timers(); - - rcu_check_callbacks(cpu, user_mode(get_irq_regs())); - - scheduler_tick(); - run_posix_cpu_timers(p); - delta_itm += local_cpu_data->itm_delta * (stolen + blocked); - - if (cpu == time_keeper_id) - xtime_update(stolen + blocked); - - local_cpu_data->itm_next = delta_itm + new_itm; - - per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; - per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; - } - return delta_itm; -} - -static int xen_do_steal_accounting(unsigned long *new_itm) -{ - unsigned long delta_itm; - delta_itm = consider_steal_time(*new_itm); - *new_itm += delta_itm; - if (time_after(*new_itm, ia64_get_itc()) && delta_itm) - return 1; - - return 0; -} - -static void xen_itc_jitter_data_reset(void) -{ - u64 lcycle, ret; - - do { - lcycle = itc_jitter_data.itc_lastcycle; - ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0); - } while (unlikely(ret != lcycle)); -} - -/* based on xen_sched_clock() in arch/x86/xen/time.c. */ -/* - * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, - * something similar logic should be implemented here. - */ -/* - * Xen sched_clock implementation. Returns the number of unstolen - * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED - * states. - */ -static unsigned long long xen_sched_clock(void) -{ - struct vcpu_runstate_info runstate; - - unsigned long long now; - unsigned long long offset; - unsigned long long ret; - - /* - * Ideally sched_clock should be called on a per-cpu basis - * anyway, so preempt should already be disabled, but that's - * not current practice at the moment. - */ - preempt_disable(); - - /* - * both ia64_native_sched_clock() and xen's runstate are - * based on mAR.ITC. So difference of them makes sense. - */ - now = ia64_native_sched_clock(); - - get_runstate_snapshot(&runstate); - - WARN_ON(runstate.state != RUNSTATE_running); - - offset = 0; - if (now > runstate.state_entry_time) - offset = now - runstate.state_entry_time; - ret = runstate.time[RUNSTATE_blocked] + - runstate.time[RUNSTATE_running] + - offset; - - preempt_enable(); - - return ret; -} - -struct pv_time_ops xen_time_ops __initdata = { - .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, - .do_steal_accounting = xen_do_steal_accounting, - .clocksource_resume = xen_itc_jitter_data_reset, - .sched_clock = xen_sched_clock, -}; - -/* Called after suspend, to resume time. */ -static void xen_local_tick_resume(void) -{ - /* Just trigger a tick. */ - ia64_cpu_local_tick(); - touch_softlockup_watchdog(); -} - -void -xen_timer_resume(void) -{ - unsigned int cpu; - - xen_local_tick_resume(); - - for_each_online_cpu(cpu) - xen_init_missing_ticks_accounting(cpu); -} - -static void ia64_cpu_local_tick_fn(void *unused) -{ - xen_local_tick_resume(); - xen_init_missing_ticks_accounting(smp_processor_id()); -} - -void -xen_timer_resume_on_aps(void) -{ - smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1); -} diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h deleted file mode 100644 index f98d7e1a42f013..00000000000000 --- a/arch/ia64/xen/time.h +++ /dev/null @@ -1,24 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/time.h - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -extern struct pv_time_ops xen_time_ops __initdata; -void xen_timer_resume_on_aps(void); diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c deleted file mode 100644 index ccaf7431f7c89b..00000000000000 --- a/arch/ia64/xen/xcom_hcall.c +++ /dev/null @@ -1,441 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - * Tristan Gingold - * - * Copyright (c) 2007 - * Isaku Yamahata - * VA Linux Systems Japan K.K. - * consolidate mini and inline version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* Xencomm notes: - * This file defines hypercalls to be used by xencomm. The hypercalls simply - * create inlines or mini descriptors for pointers and then call the raw arch - * hypercall xencomm_arch_hypercall_XXX - * - * If the arch wants to directly use these hypercalls, simply define macros - * in asm/xen/hypercall.h, eg: - * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op - * - * The arch may also define HYPERVISOR_xxx as a function and do more operations - * before/after doing the hypercall. - * - * Note: because only inline or mini descriptors are created these functions - * must only be called with in kernel memory parameters. - */ - -int -xencomm_hypercall_console_io(int cmd, int count, char *str) -{ - /* xen early printk uses console io hypercall before - * xencomm initialization. In that case, we just ignore it. - */ - if (!xencomm_is_initialized()) - return 0; - - return xencomm_arch_hypercall_console_io - (cmd, count, xencomm_map_no_alloc(str, count)); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io); - -int -xencomm_hypercall_event_channel_op(int cmd, void *op) -{ - struct xencomm_handle *desc; - desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op)); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_event_channel_op(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op); - -int -xencomm_hypercall_xen_version(int cmd, void *arg) -{ - struct xencomm_handle *desc; - unsigned int argsize; - - switch (cmd) { - case XENVER_version: - /* do not actually pass an argument */ - return xencomm_arch_hypercall_xen_version(cmd, 0); - case XENVER_extraversion: - argsize = sizeof(struct xen_extraversion); - break; - case XENVER_compile_info: - argsize = sizeof(struct xen_compile_info); - break; - case XENVER_capabilities: - argsize = sizeof(struct xen_capabilities_info); - break; - case XENVER_changeset: - argsize = sizeof(struct xen_changeset_info); - break; - case XENVER_platform_parameters: - argsize = sizeof(struct xen_platform_parameters); - break; - case XENVER_get_features: - argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info); - break; - - default: - printk(KERN_DEBUG - "%s: unknown version op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_xen_version(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version); - -int -xencomm_hypercall_physdev_op(int cmd, void *op) -{ - unsigned int argsize; - - switch (cmd) { - case PHYSDEVOP_apic_read: - case PHYSDEVOP_apic_write: - argsize = sizeof(struct physdev_apic); - break; - case PHYSDEVOP_alloc_irq_vector: - case PHYSDEVOP_free_irq_vector: - argsize = sizeof(struct physdev_irq); - break; - case PHYSDEVOP_irq_status_query: - argsize = sizeof(struct physdev_irq_status_query); - break; - - default: - printk(KERN_DEBUG - "%s: unknown physdev op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_physdev_op - (cmd, xencomm_map_no_alloc(op, argsize)); -} - -static int -xencommize_grant_table_op(struct xencomm_mini **xc_area, - unsigned int cmd, void *op, unsigned int count, - struct xencomm_handle **desc) -{ - struct xencomm_handle *desc1; - unsigned int argsize; - - switch (cmd) { - case GNTTABOP_map_grant_ref: - argsize = sizeof(struct gnttab_map_grant_ref); - break; - case GNTTABOP_unmap_grant_ref: - argsize = sizeof(struct gnttab_unmap_grant_ref); - break; - case GNTTABOP_setup_table: - { - struct gnttab_setup_table *setup = op; - - argsize = sizeof(*setup); - - if (count != 1) - return -EINVAL; - desc1 = __xencomm_map_no_alloc - (xen_guest_handle(setup->frame_list), - setup->nr_frames * - sizeof(*xen_guest_handle(setup->frame_list)), - *xc_area); - if (desc1 == NULL) - return -EINVAL; - (*xc_area)++; - set_xen_guest_handle(setup->frame_list, (void *)desc1); - break; - } - case GNTTABOP_dump_table: - argsize = sizeof(struct gnttab_dump_table); - break; - case GNTTABOP_transfer: - argsize = sizeof(struct gnttab_transfer); - break; - case GNTTABOP_copy: - argsize = sizeof(struct gnttab_copy); - break; - case GNTTABOP_query_size: - argsize = sizeof(struct gnttab_query_size); - break; - default: - printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", - __func__, cmd); - BUG(); - } - - *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); - if (*desc == NULL) - return -EINVAL; - (*xc_area)++; - - return 0; -} - -int -xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, - unsigned int count) -{ - int rc; - struct xencomm_handle *desc; - XENCOMM_MINI_ALIGNED(xc_area, 2); - - rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc); - if (rc) - return rc; - - return xencomm_arch_hypercall_grant_table_op(cmd, desc, count); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op); - -int -xencomm_hypercall_sched_op(int cmd, void *arg) -{ - struct xencomm_handle *desc; - unsigned int argsize; - - switch (cmd) { - case SCHEDOP_yield: - case SCHEDOP_block: - argsize = 0; - break; - case SCHEDOP_shutdown: - argsize = sizeof(struct sched_shutdown); - break; - case SCHEDOP_poll: - { - struct sched_poll *poll = arg; - struct xencomm_handle *ports; - - argsize = sizeof(struct sched_poll); - ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), - sizeof(*xen_guest_handle(poll->ports))); - - set_xen_guest_handle(poll->ports, (void *)ports); - break; - } - default: - printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_sched_op(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op); - -int -xencomm_hypercall_multicall(void *call_list, int nr_calls) -{ - int rc; - int i; - struct multicall_entry *mce; - struct xencomm_handle *desc; - XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2); - - for (i = 0; i < nr_calls; i++) { - mce = (struct multicall_entry *)call_list + i; - - switch (mce->op) { - case __HYPERVISOR_update_va_mapping: - case __HYPERVISOR_mmu_update: - /* No-op on ia64. */ - break; - case __HYPERVISOR_grant_table_op: - rc = xencommize_grant_table_op - (&xc_area, - mce->args[0], (void *)mce->args[1], - mce->args[2], &desc); - if (rc) - return rc; - mce->args[1] = (unsigned long)desc; - break; - case __HYPERVISOR_memory_op: - default: - printk(KERN_DEBUG - "%s: unhandled multicall op entry op %lu\n", - __func__, mce->op); - return -ENOSYS; - } - } - - desc = xencomm_map_no_alloc(call_list, - nr_calls * sizeof(struct multicall_entry)); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_multicall(desc, nr_calls); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall); - -int -xencomm_hypercall_callback_op(int cmd, void *arg) -{ - unsigned int argsize; - switch (cmd) { - case CALLBACKOP_register: - argsize = sizeof(struct callback_register); - break; - case CALLBACKOP_unregister: - argsize = sizeof(struct callback_unregister); - break; - default: - printk(KERN_DEBUG - "%s: unknown callback op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_callback_op - (cmd, xencomm_map_no_alloc(arg, argsize)); -} - -static int -xencommize_memory_reservation(struct xencomm_mini *xc_area, - struct xen_memory_reservation *mop) -{ - struct xencomm_handle *desc; - - desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), - mop->nr_extents * - sizeof(*xen_guest_handle(mop->extent_start)), - xc_area); - if (desc == NULL) - return -EINVAL; - - set_xen_guest_handle(mop->extent_start, (void *)desc); - return 0; -} - -int -xencomm_hypercall_memory_op(unsigned int cmd, void *arg) -{ - GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} }; - struct xen_memory_reservation *xmr = NULL; - int rc; - struct xencomm_handle *desc; - unsigned int argsize; - XENCOMM_MINI_ALIGNED(xc_area, 2); - - switch (cmd) { - case XENMEM_increase_reservation: - case XENMEM_decrease_reservation: - case XENMEM_populate_physmap: - xmr = (struct xen_memory_reservation *)arg; - set_xen_guest_handle(extent_start_va[0], - xen_guest_handle(xmr->extent_start)); - - argsize = sizeof(*xmr); - rc = xencommize_memory_reservation(xc_area, xmr); - if (rc) - return rc; - xc_area++; - break; - - case XENMEM_maximum_ram_page: - argsize = 0; - break; - - case XENMEM_add_to_physmap: - argsize = sizeof(struct xen_add_to_physmap); - break; - - default: - printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - rc = xencomm_arch_hypercall_memory_op(cmd, desc); - - switch (cmd) { - case XENMEM_increase_reservation: - case XENMEM_decrease_reservation: - case XENMEM_populate_physmap: - set_xen_guest_handle(xmr->extent_start, - xen_guest_handle(extent_start_va[0])); - break; - } - - return rc; -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op); - -int -xencomm_hypercall_suspend(unsigned long srec) -{ - struct sched_shutdown arg; - - arg.reason = SHUTDOWN_suspend; - - return xencomm_arch_hypercall_sched_op( - SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg))); -} - -long -xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg) -{ - unsigned int argsize; - switch (cmd) { - case VCPUOP_register_runstate_memory_area: { - struct vcpu_register_runstate_memory_area *area = - (struct vcpu_register_runstate_memory_area *)arg; - argsize = sizeof(*arg); - set_xen_guest_handle(area->addr.h, - (void *)xencomm_map_no_alloc(area->addr.v, - sizeof(area->addr.v))); - break; - } - - default: - printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_vcpu_op(cmd, cpu, - xencomm_map_no_alloc(arg, argsize)); -} - -long -xencomm_hypercall_opt_feature(void *arg) -{ - return xencomm_arch_hypercall_opt_feature( - xencomm_map_no_alloc(arg, - sizeof(struct xen_ia64_opt_feature))); -} diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c deleted file mode 100644 index 3e8d350fdf39bf..00000000000000 --- a/arch/ia64/xen/xen_pv_ops.c +++ /dev/null @@ -1,1141 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/xen_pv_ops.c - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "irq_xen.h" -#include "time.h" - -/*************************************************************************** - * general info - */ -static struct pv_info xen_info __initdata = { - .kernel_rpl = 2, /* or 1: determin at runtime */ - .paravirt_enabled = 1, - .name = "Xen/ia64", -}; - -#define IA64_RSC_PL_SHIFT 2 -#define IA64_RSC_PL_BIT_SIZE 2 -#define IA64_RSC_PL_MASK \ - (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT) - -static void __init -xen_info_init(void) -{ - /* Xenified Linux/ia64 may run on pl = 1 or 2. - * determin at run time. */ - unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC); - unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT; - xen_info.kernel_rpl = rpl; -} - -/*************************************************************************** - * pv_init_ops - * initialization hooks. - */ - -static void -xen_panic_hypercall(struct unw_frame_info *info, void *arg) -{ - current->thread.ksp = (__u64)info->sw - 16; - HYPERVISOR_shutdown(SHUTDOWN_crash); - /* we're never actually going to get here... */ -} - -static int -xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) -{ - unw_init_running(xen_panic_hypercall, NULL); - /* we're never actually going to get here... */ - return NOTIFY_DONE; -} - -static struct notifier_block xen_panic_block = { - xen_panic_event, NULL, 0 /* try to go last */ -}; - -static void xen_pm_power_off(void) -{ - local_irq_disable(); - HYPERVISOR_shutdown(SHUTDOWN_poweroff); -} - -static void __init -xen_banner(void) -{ - printk(KERN_INFO - "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld " - "flags=0x%x\n", - xen_info.kernel_rpl, - HYPERVISOR_shared_info->arch.start_info_pfn, - xen_start_info->nr_pages, xen_start_info->flags); -} - -static int __init -xen_reserve_memory(struct rsvd_region *region) -{ - region->start = (unsigned long)__va( - (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT)); - region->end = region->start + PAGE_SIZE; - return 1; -} - -static void __init -xen_arch_setup_early(void) -{ - struct shared_info *s; - BUG_ON(!xen_pv_domain()); - - s = HYPERVISOR_shared_info; - xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT); - - /* Must be done before any hypercall. */ - xencomm_initialize(); - - xen_setup_features(); - /* Register a call for panic conditions. */ - atomic_notifier_chain_register(&panic_notifier_list, - &xen_panic_block); - pm_power_off = xen_pm_power_off; - - xen_ia64_enable_opt_feature(); -} - -static void __init -xen_arch_setup_console(char **cmdline_p) -{ - add_preferred_console("xenboot", 0, NULL); - add_preferred_console("tty", 0, NULL); - /* use hvc_xen */ - add_preferred_console("hvc", 0, NULL); - -#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE) - conswitchp = NULL; -#endif -} - -static int __init -xen_arch_setup_nomca(void) -{ - return 1; -} - -static void __init -xen_post_smp_prepare_boot_cpu(void) -{ - xen_setup_vcpu_info_placement(); -} - -#ifdef ASM_SUPPORTED -static unsigned long __init_or_module -xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); -#endif -static void __init -xen_patch_branch(unsigned long tag, unsigned long type); - -static const struct pv_init_ops xen_init_ops __initconst = { - .banner = xen_banner, - - .reserve_memory = xen_reserve_memory, - - .arch_setup_early = xen_arch_setup_early, - .arch_setup_console = xen_arch_setup_console, - .arch_setup_nomca = xen_arch_setup_nomca, - - .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, -#ifdef ASM_SUPPORTED - .patch_bundle = xen_patch_bundle, -#endif - .patch_branch = xen_patch_branch, -}; - -/*************************************************************************** - * pv_fsys_data - * addresses for fsys - */ - -extern unsigned long xen_fsyscall_table[NR_syscalls]; -extern char xen_fsys_bubble_down[]; -struct pv_fsys_data xen_fsys_data __initdata = { - .fsyscall_table = (unsigned long *)xen_fsyscall_table, - .fsys_bubble_down = (void *)xen_fsys_bubble_down, -}; - -/*************************************************************************** - * pv_patchdata - * patchdata addresses - */ - -#define DECLARE(name) \ - extern unsigned long __xen_start_gate_##name##_patchlist[]; \ - extern unsigned long __xen_end_gate_##name##_patchlist[] - -DECLARE(fsyscall); -DECLARE(brl_fsys_bubble_down); -DECLARE(vtop); -DECLARE(mckinley_e9); - -extern unsigned long __xen_start_gate_section[]; - -#define ASSIGN(name) \ - .start_##name##_patchlist = \ - (unsigned long)__xen_start_gate_##name##_patchlist, \ - .end_##name##_patchlist = \ - (unsigned long)__xen_end_gate_##name##_patchlist - -static struct pv_patchdata xen_patchdata __initdata = { - ASSIGN(fsyscall), - ASSIGN(brl_fsys_bubble_down), - ASSIGN(vtop), - ASSIGN(mckinley_e9), - - .gate_section = (void*)__xen_start_gate_section, -}; - -/*************************************************************************** - * pv_cpu_ops - * intrinsics hooks. - */ - -#ifndef ASM_SUPPORTED -static void -xen_set_itm_with_offset(unsigned long val) -{ - /* ia64_cpu_local_tick() calls this with interrupt enabled. */ - /* WARN_ON(!irqs_disabled()); */ - xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); -} - -static unsigned long -xen_get_itm_with_offset(void) -{ - /* unused at this moment */ - printk(KERN_DEBUG "%s is called.\n", __func__); - - WARN_ON(!irqs_disabled()); - return ia64_native_getreg(_IA64_REG_CR_ITM) + - XEN_MAPPEDREGS->itc_offset; -} - -/* ia64_set_itc() is only called by - * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). - * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. - */ -static void -xen_set_itc(unsigned long val) -{ - unsigned long mitc; - - WARN_ON(!irqs_disabled()); - mitc = ia64_native_getreg(_IA64_REG_AR_ITC); - XEN_MAPPEDREGS->itc_offset = val - mitc; - XEN_MAPPEDREGS->itc_last = val; -} - -static unsigned long -xen_get_itc(void) -{ - unsigned long res; - unsigned long itc_offset; - unsigned long itc_last; - unsigned long ret_itc_last; - - itc_offset = XEN_MAPPEDREGS->itc_offset; - do { - itc_last = XEN_MAPPEDREGS->itc_last; - res = ia64_native_getreg(_IA64_REG_AR_ITC); - res += itc_offset; - if (itc_last >= res) - res = itc_last + 1; - ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, - itc_last, res); - } while (unlikely(ret_itc_last != itc_last)); - return res; - -#if 0 - /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. - Should it be paravirtualized instead? */ - WARN_ON(!irqs_disabled()); - itc_offset = XEN_MAPPEDREGS->itc_offset; - itc_last = XEN_MAPPEDREGS->itc_last; - res = ia64_native_getreg(_IA64_REG_AR_ITC); - res += itc_offset; - if (itc_last >= res) - res = itc_last + 1; - XEN_MAPPEDREGS->itc_last = res; - return res; -#endif -} - -static void xen_setreg(int regnum, unsigned long val) -{ - switch (regnum) { - case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: - xen_set_kr(regnum - _IA64_REG_AR_KR0, val); - break; - case _IA64_REG_AR_ITC: - xen_set_itc(val); - break; - case _IA64_REG_CR_TPR: - xen_set_tpr(val); - break; - case _IA64_REG_CR_ITM: - xen_set_itm_with_offset(val); - break; - case _IA64_REG_CR_EOI: - xen_eoi(val); - break; - default: - ia64_native_setreg_func(regnum, val); - break; - } -} - -static unsigned long xen_getreg(int regnum) -{ - unsigned long res; - - switch (regnum) { - case _IA64_REG_PSR: - res = xen_get_psr(); - break; - case _IA64_REG_AR_ITC: - res = xen_get_itc(); - break; - case _IA64_REG_CR_ITM: - res = xen_get_itm_with_offset(); - break; - case _IA64_REG_CR_IVR: - res = xen_get_ivr(); - break; - case _IA64_REG_CR_TPR: - res = xen_get_tpr(); - break; - default: - res = ia64_native_getreg_func(regnum); - break; - } - return res; -} - -/* turning on interrupts is a bit more complicated.. write to the - * memory-mapped virtual psr.i bit first (to avoid race condition), - * then if any interrupts were pending, we have to execute a hyperprivop - * to ensure the pending interrupt gets delivered; else we're done! */ -static void -xen_ssm_i(void) -{ - int old = xen_get_virtual_psr_i(); - xen_set_virtual_psr_i(1); - barrier(); - if (!old && xen_get_virtual_pend()) - xen_hyper_ssm_i(); -} - -/* turning off interrupts can be paravirtualized simply by writing - * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */ -static void -xen_rsm_i(void) -{ - xen_set_virtual_psr_i(0); - barrier(); -} - -static unsigned long -xen_get_psr_i(void) -{ - return xen_get_virtual_psr_i() ? IA64_PSR_I : 0; -} - -static void -xen_intrin_local_irq_restore(unsigned long mask) -{ - if (mask & IA64_PSR_I) - xen_ssm_i(); - else - xen_rsm_i(); -} -#else -#define __DEFINE_FUNC(name, code) \ - extern const char xen_ ## name ## _direct_start[]; \ - extern const char xen_ ## name ## _direct_end[]; \ - asm (".align 32\n" \ - ".proc xen_" #name "\n" \ - "xen_" #name ":\n" \ - "xen_" #name "_direct_start:\n" \ - code \ - "xen_" #name "_direct_end:\n" \ - "br.cond.sptk.many b6\n" \ - ".endp xen_" #name "\n") - -#define DEFINE_VOID_FUNC0(name, code) \ - extern void \ - xen_ ## name (void); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC1(name, code) \ - extern void \ - xen_ ## name (unsigned long arg); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC1_VOID(name, code) \ - extern void \ - xen_ ## name (void *arg); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC2(name, code) \ - extern void \ - xen_ ## name (unsigned long arg0, \ - unsigned long arg1); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_FUNC0(name, code) \ - extern unsigned long \ - xen_ ## name (void); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_FUNC1(name, type, code) \ - extern unsigned long \ - xen_ ## name (type arg); \ - __DEFINE_FUNC(name, code) - -#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) - -/* - * static void xen_set_itm_with_offset(unsigned long val) - * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); - */ -/* 2 bundles */ -DEFINE_VOID_FUNC1(set_itm_with_offset, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r3 = [r2]\n" - ";;\n" - "sub r8 = r8, r3\n" - "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); - -/* - * static unsigned long xen_get_itm_with_offset(void) - * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; - */ -/* 2 bundles */ -DEFINE_FUNC0(get_itm_with_offset, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r3 = [r2]\n" - "mov r8 = cr.itm\n" - ";;\n" - "add r8 = r8, r2\n"); - -/* - * static void xen_set_itc(unsigned long val) - * unsigned long mitc; - * - * WARN_ON(!irqs_disabled()); - * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); - * XEN_MAPPEDREGS->itc_offset = val - mitc; - * XEN_MAPPEDREGS->itc_last = val; - */ -/* 2 bundles */ -DEFINE_VOID_FUNC1(set_itc, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_LAST_OFS) "\n" - "mov r3 = ar.itc\n" - ";;\n" - "sub r3 = r8, r3\n" - "st8 [r2] = r8, " - __stringify(XSI_ITC_LAST_OFS) " - " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "st8 [r2] = r3\n"); - -/* - * static unsigned long xen_get_itc(void) - * unsigned long res; - * unsigned long itc_offset; - * unsigned long itc_last; - * unsigned long ret_itc_last; - * - * itc_offset = XEN_MAPPEDREGS->itc_offset; - * do { - * itc_last = XEN_MAPPEDREGS->itc_last; - * res = ia64_native_getreg(_IA64_REG_AR_ITC); - * res += itc_offset; - * if (itc_last >= res) - * res = itc_last + 1; - * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, - * itc_last, res); - * } while (unlikely(ret_itc_last != itc_last)); - * return res; - */ -/* 5 bundles */ -DEFINE_FUNC0(get_itc, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - /* r9 = itc_offset */ - /* r2 = XSI_ITC_OFFSET */ - "888:\n" - "mov r8 = ar.itc\n" /* res = ar.itc */ - ";;\n" - "ld8 r3 = [r2]\n" /* r3 = itc_last */ - "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ - ";;\n" - "cmp.gtu p6, p0 = r3, r8\n" - ";;\n" - "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ - ";;\n" - "mov ar.ccv = r8\n" - ";;\n" - "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" - ";;\n" - "cmp.ne p6, p0 = r10, r3\n" - "(p6) hint @pause\n" - "(p6) br.cond.spnt 888b\n"); - -DEFINE_VOID_FUNC1_VOID(fc, - "break " __stringify(HYPERPRIVOP_FC) "\n"); - -/* - * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR - * masked_addr = *psr_i_addr_addr - * pending_intr_addr = masked_addr - 1 - * if (val & IA64_PSR_I) { - * masked = *masked_addr - * *masked_addr = 0:xen_set_virtual_psr_i(1) - * compiler barrier - * if (masked) { - * uint8_t pending = *pending_intr_addr; - * if (pending) - * XEN_HYPER_SSM_I - * } - * } else { - * *masked_addr = 1:xen_set_virtual_psr_i(0) - * } - */ -/* 6 bundles */ -DEFINE_VOID_FUNC1(intrin_local_irq_restore, - /* r8 = input value: 0 or IA64_PSR_I - * p6 = (flags & IA64_PSR_I) - * = if clause - * p7 = !(flags & IA64_PSR_I) - * = else clause - */ - "cmp.ne p6, p7 = r8, r0\n" - "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - /* r9 = XEN_PSR_I_ADDR */ - "ld8 r9 = [r9]\n" - ";;\n" - - /* r10 = masked previous value */ - "(p6) ld1.acq r10 = [r9]\n" - ";;\n" - - /* p8 = !masked interrupt masked previously? */ - "(p6) cmp.ne.unc p8, p0 = r10, r0\n" - - /* p7 = else clause */ - "(p7) mov r11 = 1\n" - ";;\n" - /* masked = 1 */ - "(p7) st1.rel [r9] = r11\n" - - /* p6 = if clause */ - /* masked = 0 - * r9 = masked_addr - 1 - * = pending_intr_addr - */ - "(p8) st1.rel [r9] = r0, -1\n" - ";;\n" - /* r8 = pending_intr */ - "(p8) ld1.acq r11 = [r9]\n" - ";;\n" - /* p9 = interrupt pending? */ - "(p8) cmp.ne.unc p9, p10 = r11, r0\n" - ";;\n" - "(p10) mf\n" - /* issue hypercall to trigger interrupt */ - "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); - -DEFINE_VOID_FUNC2(ptcga, - "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); -DEFINE_VOID_FUNC2(set_rr, - "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); - -/* - * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; - * tmp = *tmp - * tmp = *tmp; - * psr_i = tmp? 0: IA64_PSR_I; - */ -/* 4 bundles */ -DEFINE_FUNC0(get_psr_i, - "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ - "mov r8 = 0\n" /* psr_i = 0 */ - ";;\n" - "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ - ";;\n" - "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ - ";;\n" - "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); - -DEFINE_FUNC1(thash, unsigned long, - "break " __stringify(HYPERPRIVOP_THASH) "\n"); -DEFINE_FUNC1(get_cpuid, int, - "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); -DEFINE_FUNC1(get_pmd, int, - "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); -DEFINE_FUNC1(get_rr, unsigned long, - "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); - -/* - * void xen_privop_ssm_i(void) - * - * int masked = !xen_get_virtual_psr_i(); - * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) - * xen_set_virtual_psr_i(1) - * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 - * // compiler barrier - * if (masked) { - * uint8_t* pend_int_addr = - * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; - * uint8_t pending = *pend_int_addr; - * if (pending) - * XEN_HYPER_SSM_I - * } - */ -/* 4 bundles */ -DEFINE_VOID_FUNC0(ssm_i, - "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ - ";;\n" - "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ - ";;\n" - "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt - * r8 = XEN_PSR_I_ADDR - 1 - * = pend_int_addr - */ - "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I - * previously interrupt - * masked? - */ - ";;\n" - "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ - ";;\n" - "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ - ";;\n" - /* issue hypercall to get interrupt */ - "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" - ";;\n"); - -/* - * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr - * = XEN_PSR_I_ADDR_ADDR; - * psr_i_addr = *psr_i_addr_addr; - * *psr_i_addr = 1; - */ -/* 2 bundles */ -DEFINE_VOID_FUNC0(rsm_i, - "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - /* r8 = XEN_PSR_I_ADDR */ - "mov r9 = 1\n" - ";;\n" - "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ - ";;\n" - "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ - -extern void -xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, - unsigned long val2, unsigned long val3, - unsigned long val4); -__DEFINE_FUNC(set_rr0_to_rr4, - "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); - - -extern unsigned long xen_getreg(int regnum); -#define __DEFINE_GET_REG(id, privop) \ - "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ - ";;\n" \ - "cmp.eq p6, p0 = r2, r8\n" \ - ";;\n" \ - "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ - "(p6) br.cond.sptk.many b6\n" \ - ";;\n" - -__DEFINE_FUNC(getreg, - __DEFINE_GET_REG(PSR, PSR) - - /* get_itc */ - "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_get_itc\n" - ";;\n" - - /* get itm */ - "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_get_itm_with_offset\n" - ";;\n" - - __DEFINE_GET_REG(CR_IVR, IVR) - __DEFINE_GET_REG(CR_TPR, TPR) - - /* fall back */ - "movl r2 = ia64_native_getreg_func\n" - ";;\n" - "mov b7 = r2\n" - ";;\n" - "br.cond.sptk.many b7\n"); - -extern void xen_setreg(int regnum, unsigned long val); -#define __DEFINE_SET_REG(id, privop) \ - "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ - ";;\n" \ - "cmp.eq p6, p0 = r2, r9\n" \ - ";;\n" \ - "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ - "(p6) br.cond.sptk.many b6\n" \ - ";;\n" - -__DEFINE_FUNC(setreg, - /* kr0 .. kr 7*/ - /* - * if (_IA64_REG_AR_KR0 <= regnum && - * regnum <= _IA64_REG_AR_KR7) { - * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 - * register __val asm ("r9") = val - * "break HYPERPRIVOP_SET_KR" - * } - */ - "mov r17 = r9\n" - "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" - ";;\n" - "cmp.ge p6, p0 = r9, r2\n" - "sub r17 = r17, r2\n" - ";;\n" - "(p6) cmp.ge.unc p7, p0 = " - __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) - ", r17\n" - ";;\n" - "(p7) mov r9 = r8\n" - ";;\n" - "(p7) mov r8 = r17\n" - "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" - - /* set itm */ - "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_set_itm_with_offset\n" - - /* set itc */ - "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_set_itc\n" - - __DEFINE_SET_REG(CR_TPR, SET_TPR) - __DEFINE_SET_REG(CR_EOI, EOI) - - /* fall back */ - "movl r2 = ia64_native_setreg_func\n" - ";;\n" - "mov b7 = r2\n" - ";;\n" - "br.cond.sptk.many b7\n"); -#endif - -static const struct pv_cpu_ops xen_cpu_ops __initconst = { - .fc = xen_fc, - .thash = xen_thash, - .get_cpuid = xen_get_cpuid, - .get_pmd = xen_get_pmd, - .getreg = xen_getreg, - .setreg = xen_setreg, - .ptcga = xen_ptcga, - .get_rr = xen_get_rr, - .set_rr = xen_set_rr, - .set_rr0_to_rr4 = xen_set_rr0_to_rr4, - .ssm_i = xen_ssm_i, - .rsm_i = xen_rsm_i, - .get_psr_i = xen_get_psr_i, - .intrin_local_irq_restore - = xen_intrin_local_irq_restore, -}; - -/****************************************************************************** - * replacement of hand written assembly codes. - */ - -extern char xen_switch_to; -extern char xen_leave_syscall; -extern char xen_work_processed_syscall; -extern char xen_leave_kernel; - -const struct pv_cpu_asm_switch xen_cpu_asm_switch = { - .switch_to = (unsigned long)&xen_switch_to, - .leave_syscall = (unsigned long)&xen_leave_syscall, - .work_processed_syscall = (unsigned long)&xen_work_processed_syscall, - .leave_kernel = (unsigned long)&xen_leave_kernel, -}; - -/*************************************************************************** - * pv_iosapic_ops - * iosapic read/write hooks. - */ -static void -xen_pcat_compat_init(void) -{ - /* nothing */ -} - -static struct irq_chip* -xen_iosapic_get_irq_chip(unsigned long trigger) -{ - return NULL; -} - -static unsigned int -xen_iosapic_read(char __iomem *iosapic, unsigned int reg) -{ - struct physdev_apic apic_op; - int ret; - - apic_op.apic_physbase = (unsigned long)iosapic - - __IA64_UNCACHED_OFFSET; - apic_op.reg = reg; - ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); - if (ret) - return ret; - return apic_op.value; -} - -static void -xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) -{ - struct physdev_apic apic_op; - - apic_op.apic_physbase = (unsigned long)iosapic - - __IA64_UNCACHED_OFFSET; - apic_op.reg = reg; - apic_op.value = val; - HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); -} - -static struct pv_iosapic_ops xen_iosapic_ops __initdata = { - .pcat_compat_init = xen_pcat_compat_init, - .__get_irq_chip = xen_iosapic_get_irq_chip, - - .__read = xen_iosapic_read, - .__write = xen_iosapic_write, -}; - -/*************************************************************************** - * pv_ops initialization - */ - -void __init -xen_setup_pv_ops(void) -{ - xen_info_init(); - pv_info = xen_info; - pv_init_ops = xen_init_ops; - pv_fsys_data = xen_fsys_data; - pv_patchdata = xen_patchdata; - pv_cpu_ops = xen_cpu_ops; - pv_iosapic_ops = xen_iosapic_ops; - pv_irq_ops = xen_irq_ops; - pv_time_ops = xen_time_ops; - - paravirt_cpu_asm_init(&xen_cpu_asm_switch); -} - -#ifdef ASM_SUPPORTED -/*************************************************************************** - * binary pacthing - * pv_init_ops.patch_bundle - */ - -#define DEFINE_FUNC_GETREG(name, privop) \ - DEFINE_FUNC0(get_ ## name, \ - "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") - -DEFINE_FUNC_GETREG(psr, PSR); -DEFINE_FUNC_GETREG(eflag, EFLAG); -DEFINE_FUNC_GETREG(ivr, IVR); -DEFINE_FUNC_GETREG(tpr, TPR); - -#define DEFINE_FUNC_SET_KR(n) \ - DEFINE_VOID_FUNC0(set_kr ## n, \ - ";;\n" \ - "mov r9 = r8\n" \ - "mov r8 = " #n "\n" \ - "break " __stringify(HYPERPRIVOP_SET_KR) "\n") - -DEFINE_FUNC_SET_KR(0); -DEFINE_FUNC_SET_KR(1); -DEFINE_FUNC_SET_KR(2); -DEFINE_FUNC_SET_KR(3); -DEFINE_FUNC_SET_KR(4); -DEFINE_FUNC_SET_KR(5); -DEFINE_FUNC_SET_KR(6); -DEFINE_FUNC_SET_KR(7); - -#define __DEFINE_FUNC_SETREG(name, privop) \ - DEFINE_VOID_FUNC0(name, \ - "break "__stringify(HYPERPRIVOP_ ## privop) "\n") - -#define DEFINE_FUNC_SETREG(name, privop) \ - __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) - -DEFINE_FUNC_SETREG(eflag, EFLAG); -DEFINE_FUNC_SETREG(tpr, TPR); -__DEFINE_FUNC_SETREG(eoi, EOI); - -extern const char xen_check_events[]; -extern const char __xen_intrin_local_irq_restore_direct_start[]; -extern const char __xen_intrin_local_irq_restore_direct_end[]; -extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; - -asm ( - ".align 32\n" - ".proc xen_check_events\n" - "xen_check_events:\n" - /* masked = 0 - * r9 = masked_addr - 1 - * = pending_intr_addr - */ - "st1.rel [r9] = r0, -1\n" - ";;\n" - /* r8 = pending_intr */ - "ld1.acq r11 = [r9]\n" - ";;\n" - /* p9 = interrupt pending? */ - "cmp.ne p9, p10 = r11, r0\n" - ";;\n" - "(p10) mf\n" - /* issue hypercall to trigger interrupt */ - "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" - "br.cond.sptk.many b6\n" - ".endp xen_check_events\n" - "\n" - ".align 32\n" - ".proc __xen_intrin_local_irq_restore_direct\n" - "__xen_intrin_local_irq_restore_direct:\n" - "__xen_intrin_local_irq_restore_direct_start:\n" - "1:\n" - "{\n" - "cmp.ne p6, p7 = r8, r0\n" - "mov r17 = ip\n" /* get ip to calc return address */ - "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "}\n" - "{\n" - /* r9 = XEN_PSR_I_ADDR */ - "ld8 r9 = [r9]\n" - ";;\n" - /* r10 = masked previous value */ - "(p6) ld1.acq r10 = [r9]\n" - "adds r17 = 1f - 1b, r17\n" /* calculate return address */ - ";;\n" - "}\n" - "{\n" - /* p8 = !masked interrupt masked previously? */ - "(p6) cmp.ne.unc p8, p0 = r10, r0\n" - "\n" - /* p7 = else clause */ - "(p7) mov r11 = 1\n" - ";;\n" - "(p8) mov b6 = r17\n" /* set return address */ - "}\n" - "{\n" - /* masked = 1 */ - "(p7) st1.rel [r9] = r11\n" - "\n" - "[99:]\n" - "(p8) brl.cond.dptk.few xen_check_events\n" - "}\n" - /* pv calling stub is 5 bundles. fill nop to adjust return address */ - "{\n" - "nop 0\n" - "nop 0\n" - "nop 0\n" - "}\n" - "1:\n" - "__xen_intrin_local_irq_restore_direct_end:\n" - ".endp __xen_intrin_local_irq_restore_direct\n" - "\n" - ".align 8\n" - "__xen_intrin_local_irq_restore_direct_reloc:\n" - "data8 99b\n" -); - -static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] -__initdata_or_module = -{ -#define XEN_PATCH_BUNDLE_ELEM(name, type) \ - { \ - (void*)xen_ ## name ## _direct_start, \ - (void*)xen_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_ ## type, \ - } - - XEN_PATCH_BUNDLE_ELEM(fc, FC), - XEN_PATCH_BUNDLE_ELEM(thash, THASH), - XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), - XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), - XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), - XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), - XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), - XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), - XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), - XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), - XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), - { - (void*)__xen_intrin_local_irq_restore_direct_start, - (void*)__xen_intrin_local_irq_restore_direct_end, - PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, - }, - -#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ - { \ - xen_get_ ## name ## _direct_start, \ - xen_get_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ - } - - XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), - XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), - - XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), - XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), - - XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), - XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), - - -#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ - { \ - xen_ ## name ## _direct_start, \ - xen_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ - } - -#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ - __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) - - XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), - - XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), - XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), - __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), - - XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), - XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), -}; - -static unsigned long __init_or_module -xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) -{ - const unsigned long nelems = sizeof(xen_patch_bundle_elems) / - sizeof(xen_patch_bundle_elems[0]); - unsigned long used; - const struct paravirt_patch_bundle_elem *found; - - used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, - xen_patch_bundle_elems, nelems, - &found); - - if (found == NULL) - /* fallback */ - return ia64_native_patch_bundle(sbundle, ebundle, type); - if (used == 0) - return used; - - /* relocation */ - switch (type) { - case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { - unsigned long reloc = - __xen_intrin_local_irq_restore_direct_reloc; - unsigned long reloc_offset = reloc - (unsigned long) - __xen_intrin_local_irq_restore_direct_start; - unsigned long tag = (unsigned long)sbundle + reloc_offset; - paravirt_patch_reloc_brl(tag, xen_check_events); - break; - } - default: - /* nothing */ - break; - } - return used; -} -#endif /* ASM_SUPPOTED */ - -const struct paravirt_patch_branch_target xen_branch_target[] -__initconst = { -#define PARAVIRT_BR_TARGET(name, type) \ - { \ - &xen_ ## name, \ - PARAVIRT_PATCH_TYPE_BR_ ## type, \ - } - PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), - PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), - PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), - PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), -}; - -static void __init -xen_patch_branch(unsigned long tag, unsigned long type) -{ - __paravirt_patch_apply_branch(tag, type, xen_branch_target, - ARRAY_SIZE(xen_branch_target)); -} diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c deleted file mode 100644 index 73d903ca2d64ce..00000000000000 --- a/arch/ia64/xen/xencomm.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) 2006 Hollis Blanchard , IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include - -static unsigned long kernel_virtual_offset; -static int is_xencomm_initialized; - -/* for xen early printk. It uses console io hypercall which uses xencomm. - * However early printk may use it before xencomm initialization. - */ -int -xencomm_is_initialized(void) -{ - return is_xencomm_initialized; -} - -void -xencomm_initialize(void) -{ - kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); - is_xencomm_initialized = 1; -} - -/* Translate virtual address to physical address. */ -unsigned long -xencomm_vtop(unsigned long vaddr) -{ - struct page *page; - struct vm_area_struct *vma; - - if (vaddr == 0) - return 0UL; - - if (REGION_NUMBER(vaddr) == 5) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; - - /* On ia64, TASK_SIZE refers to current. It is not initialized - during boot. - Furthermore the kernel is relocatable and __pa() doesn't - work on addresses. */ - if (vaddr >= KERNEL_START - && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) - return vaddr - kernel_virtual_offset; - - /* In kernel area -- virtually mapped. */ - pgd = pgd_offset_k(vaddr); - if (pgd_none(*pgd) || pgd_bad(*pgd)) - return ~0UL; - - pud = pud_offset(pgd, vaddr); - if (pud_none(*pud) || pud_bad(*pud)) - return ~0UL; - - pmd = pmd_offset(pud, vaddr); - if (pmd_none(*pmd) || pmd_bad(*pmd)) - return ~0UL; - - ptep = pte_offset_kernel(pmd, vaddr); - if (!ptep) - return ~0UL; - - return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); - } - - if (vaddr > TASK_SIZE) { - /* percpu variables */ - if (REGION_NUMBER(vaddr) == 7 && - REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) - ia64_tpa(vaddr); - - /* kernel address */ - return __pa(vaddr); - } - - /* XXX double-check (lack of) locking */ - vma = find_extend_vma(current->mm, vaddr); - if (!vma) - return ~0UL; - - /* We assume the page is modified. */ - page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); - if (IS_ERR_OR_NULL(page)) - return ~0UL; - - return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); -} diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S deleted file mode 100644 index 3e71d50584d906..00000000000000 --- a/arch/ia64/xen/xenivt.S +++ /dev/null @@ -1,52 +0,0 @@ -/* - * arch/ia64/xen/ivt.S - * - * Copyright (C) 2005 Hewlett-Packard Co - * Dan Magenheimer - * - * Copyright (c) 2008 Isaku Yamahata - * VA Linux Systems Japan K.K. - * pv_ops. - */ - -#include -#include -#include - -#include "../kernel/minstate.h" - - .section .text,"ax" -GLOBAL_ENTRY(xen_event_callback) - mov r31=pr // prepare to save predicates - ;; - SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 - ;; - movl r3=XSI_PSR_IC - mov r14=1 - ;; - st4 [r3]=r14 - ;; - adds r3=8,r2 // set up second base pointer for SAVE_REST - srlz.i // ensure everybody knows psr.ic is back on - ;; - SAVE_REST - ;; -1: - alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group - add out0=16,sp // pass pointer to pt_regs as first arg - ;; - br.call.sptk.many b0=xen_evtchn_do_upcall - ;; - movl r20=XSI_PSR_I_ADDR - ;; - ld8 r20=[r20] - ;; - adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending - ;; - ld1 r20=[r20] - ;; - cmp.ne p6,p0=r20,r0 // if there are pending events, - (p6) br.spnt.few 1b // call evtchn_do_upcall again. - br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is - // paravirtualized as xen_leave_kernel -END(xen_event_callback) diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S deleted file mode 100644 index e29519ebe2d20b..00000000000000 --- a/arch/ia64/xen/xensetup.S +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Support routines for Xen - * - * Copyright (C) 2005 Dan Magenheimer - */ - -#include -#include -#include -#include -#include -#include -#include -#include - - .section .data..read_mostly - .align 8 - .global xen_domain_type -xen_domain_type: - data4 XEN_NATIVE_ASM - .previous - - __INIT -ENTRY(startup_xen) - // Calculate load offset. - // The constant, LOAD_OFFSET, can't be used because the boot - // loader doesn't always load to the LMA specified by the vmlinux.lds. - mov r9=ip // must be the first instruction to make sure - // that r9 = the physical address of startup_xen. - // Usually r9 = startup_xen - LOAD_OFFSET - movl r8=startup_xen - ;; - sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET. - - mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN - movl r11=_start - ;; - add r11=r11,r9 - movl r8=hypervisor_type - ;; - add r8=r8,r9 - mov b0=r11 - ;; - st8 [r8]=r10 - br.cond.sptk.many b0 - ;; -END(startup_xen) - - ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") - ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") - ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") - ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET) - -#define isBP p3 // are we the Bootstrap Processor? - -GLOBAL_ENTRY(xen_setup_hook) - mov r8=XEN_PV_DOMAIN_ASM -(isBP) movl r9=xen_domain_type;; -(isBP) st4 [r9]=r8 - movl r10=xen_ivt;; - - mov cr.iva=r10 - - /* Set xsi base. */ -#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600 -(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA -(isBP) movl r28=XSI_BASE;; -(isBP) break 0x1000;; - - /* setup pv_ops */ -(isBP) mov r4=rp - ;; -(isBP) br.call.sptk.many rp=xen_setup_pv_ops - ;; -(isBP) mov rp=r4 - ;; - - br.ret.sptk.many rp - ;; -END(xen_setup_hook) diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h index 6976621efd3f5d..1a40265e8d883f 100644 --- a/arch/m32r/include/asm/barrier.h +++ b/arch/m32r/include/asm/barrier.h @@ -11,84 +11,6 @@ #define nop() __asm__ __volatile__ ("nop" : : ) -/* - * Memory barrier. - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - */ -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * - * - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif +#include #endif /* _ASM_M32R_BARRIER_H */ diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 75f25a8e300170..dbdd2231c75ddc 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -87,6 +87,30 @@ config MMU_SUN3 bool depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE +config KEXEC + bool "kexec system call" + depends on M68KCLASSIC + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. As of this writing the exact hardware + interface is strongly in flux, so no good recommendation can be + made. + +config BOOTINFO_PROC + bool "Export bootinfo in procfs" + depends on KEXEC && M68KCLASSIC + help + Say Y to export the bootinfo used to boot the kernel in a + "bootinfo" file in procfs. This is useful with kexec. + menu "Platform setup" source arch/m68k/Kconfig.cpu diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c index 99449fbf9a7268..ba03cec3f711e0 100644 --- a/arch/m68k/amiga/chipram.c +++ b/arch/m68k/amiga/chipram.c @@ -87,7 +87,7 @@ void *amiga_chip_alloc_res(unsigned long size, struct resource *res) atomic_sub(size, &chipavail); pr_debug("amiga_chip_alloc_res: returning %pR\n", res); - return (void *)ZTWO_VADDR(res->start); + return ZTWO_VADDR(res->start); } void amiga_chip_free(void *ptr) diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index b819390e29cdb7..9625b71322275e 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c @@ -28,6 +28,8 @@ #include #include +#include +#include #include #include #include @@ -140,46 +142,46 @@ static struct resource ram_resource[NUM_MEMINFO]; * Parse an Amiga-specific record in the bootinfo */ -int amiga_parse_bootinfo(const struct bi_record *record) +int __init amiga_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_AMIGA_MODEL: - amiga_model = *data; + amiga_model = be32_to_cpup(data); break; case BI_AMIGA_ECLOCK: - amiga_eclock = *data; + amiga_eclock = be32_to_cpup(data); break; case BI_AMIGA_CHIPSET: - amiga_chipset = *data; + amiga_chipset = be32_to_cpup(data); break; case BI_AMIGA_CHIP_SIZE: - amiga_chip_size = *(const int *)data; + amiga_chip_size = be32_to_cpup(data); break; case BI_AMIGA_VBLANK: - amiga_vblank = *(const unsigned char *)data; + amiga_vblank = *(const __u8 *)data; break; case BI_AMIGA_PSFREQ: - amiga_psfreq = *(const unsigned char *)data; + amiga_psfreq = *(const __u8 *)data; break; case BI_AMIGA_AUTOCON: #ifdef CONFIG_ZORRO if (zorro_num_autocon < ZORRO_NUM_AUTO) { - const struct ConfigDev *cd = (struct ConfigDev *)data; - struct zorro_dev *dev = &zorro_autocon[zorro_num_autocon++]; + const struct ConfigDev *cd = data; + struct zorro_dev_init *dev = &zorro_autocon_init[zorro_num_autocon++]; dev->rom = cd->cd_Rom; - dev->slotaddr = cd->cd_SlotAddr; - dev->slotsize = cd->cd_SlotSize; - dev->resource.start = (unsigned long)cd->cd_BoardAddr; - dev->resource.end = dev->resource.start + cd->cd_BoardSize - 1; + dev->slotaddr = be16_to_cpu(cd->cd_SlotAddr); + dev->slotsize = be16_to_cpu(cd->cd_SlotSize); + dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr); + dev->boardsize = be32_to_cpu(cd->cd_BoardSize); } else printk("amiga_parse_bootinfo: too many AutoConfig devices\n"); #endif /* CONFIG_ZORRO */ @@ -358,6 +360,14 @@ static void __init amiga_identify(void) #undef AMIGAHW_ANNOUNCE } + +static unsigned long amiga_random_get_entropy(void) +{ + /* VPOSR/VHPOSR provide at least 17 bits of data changing at 1.79 MHz */ + return *(unsigned long *)&amiga_custom.vposr; +} + + /* * Setup the Amiga configuration info */ @@ -395,6 +405,8 @@ void __init config_amiga(void) mach_heartbeat = amiga_heartbeat; #endif + mach_random_get_entropy = amiga_random_get_entropy; + /* Fill in the clock value (based on the 700 kHz E-Clock) */ amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */ @@ -608,6 +620,8 @@ static void amiga_mem_console_write(struct console *co, const char *s, static int __init amiga_savekmsg_setup(char *arg) { + bool registered; + if (!MACH_IS_AMIGA || strcmp(arg, "mem")) return 0; @@ -618,14 +632,16 @@ static int __init amiga_savekmsg_setup(char *arg) /* Just steal the block, the chipram allocator isn't functional yet */ amiga_chip_size -= SAVEKMSG_MAXMEM; - savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size); + savekmsg = ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size); savekmsg->magic1 = SAVEKMSG_MAGIC1; savekmsg->magic2 = SAVEKMSG_MAGIC2; savekmsg->magicptr = ZTWO_PADDR(savekmsg); savekmsg->size = 0; + registered = !!amiga_console_driver.write; amiga_console_driver.write = amiga_mem_console_write; - register_console(&amiga_console_driver); + if (!registered) + register_console(&amiga_console_driver); return 0; } @@ -707,11 +723,16 @@ void amiga_serial_gets(struct console *co, char *s, int len) static int __init amiga_debug_setup(char *arg) { - if (MACH_IS_AMIGA && !strcmp(arg, "ser")) { - /* no initialization required (?) */ - amiga_console_driver.write = amiga_serial_console_write; + bool registered; + + if (!MACH_IS_AMIGA || strcmp(arg, "ser")) + return 0; + + /* no initialization required (?) */ + registered = !!amiga_console_driver.write; + amiga_console_driver.write = amiga_serial_console_write; + if (!registered) register_console(&amiga_console_driver); - } return 0; } diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c index dacd9f911f7171..d34029d7b05892 100644 --- a/arch/m68k/amiga/platform.c +++ b/arch/m68k/amiga/platform.c @@ -13,6 +13,7 @@ #include #include +#include #ifdef CONFIG_ZORRO @@ -66,10 +67,12 @@ static int __init z_dev_present(zorro_id id) { unsigned int i; - for (i = 0; i < zorro_num_autocon; i++) - if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) && - zorro_autocon[i].rom.er_Product == ZORRO_PROD(id)) + for (i = 0; i < zorro_num_autocon; i++) { + const struct ExpansionRom *rom = &zorro_autocon_init[i].rom; + if (be16_to_cpu(rom->er_Manufacturer) == ZORRO_MANUF(id) && + rom->er_Product == ZORRO_PROD(id)) return 1; + } return 0; } diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c index 3ea56b90e7188d..9268c0f96376b3 100644 --- a/arch/m68k/apollo/config.c +++ b/arch/m68k/apollo/config.c @@ -1,3 +1,4 @@ +#include #include #include #include @@ -9,6 +10,8 @@ #include #include +#include +#include #include #include #include @@ -43,26 +46,25 @@ static const char *apollo_models[] = { [APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)" }; -int apollo_parse_bootinfo(const struct bi_record *record) { - +int __init apollo_parse_bootinfo(const struct bi_record *record) +{ int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch(record->tag) { - case BI_APOLLO_MODEL: - apollo_model=*data; - break; + switch (be16_to_cpu(record->tag)) { + case BI_APOLLO_MODEL: + apollo_model = be32_to_cpup(data); + break; - default: - unknown=1; + default: + unknown=1; } return unknown; } -void dn_setup_model(void) { - - +static void __init dn_setup_model(void) +{ printk("Apollo hardware found: "); printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]); diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c index 20cde4e9fc772e..3e73a63c066f8a 100644 --- a/arch/m68k/atari/ataints.c +++ b/arch/m68k/atari/ataints.c @@ -333,6 +333,9 @@ void __init atari_init_IRQ(void) m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq, IRQ_MFP_TIMER1, 8); + irq_set_status_flags(IRQ_MFP_TIMER1, IRQ_IS_POLLED); + irq_set_status_flags(IRQ_MFP_TIMER2, IRQ_IS_POLLED); + /* prepare timer D data for use as poll interrupt */ /* set Timer D data Register - needs to be > 0 */ st_mfp.tim_dt_d = 254; /* < 100 Hz */ diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index fb2d0bd9b3adab..01a62161b08a1a 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c @@ -37,6 +37,8 @@ #include #include +#include +#include #include #include #include @@ -129,14 +131,14 @@ static int __init scc_test(volatile char *ctla) int __init atari_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const u_long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_ATARI_MCH_COOKIE: - atari_mch_cookie = *data; + atari_mch_cookie = be32_to_cpup(data); break; case BI_ATARI_MCH_TYPE: - atari_mch_type = *data; + atari_mch_type = be32_to_cpup(data); break; default: unknown = 1; diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c index a547ba9683d14a..03cb5e08d7cf9f 100644 --- a/arch/m68k/atari/debug.c +++ b/arch/m68k/atari/debug.c @@ -287,6 +287,8 @@ static void __init atari_init_midi_port(int cflag) static int __init atari_debug_setup(char *arg) { + bool registered; + if (!MACH_IS_ATARI) return 0; @@ -294,6 +296,7 @@ static int __init atari_debug_setup(char *arg) /* defaults to ser2 for a Falcon and ser1 otherwise */ arg = MACH_IS_FALCON ? "ser2" : "ser1"; + registered = !!atari_console_driver.write; if (!strcmp(arg, "ser1")) { /* ST-MFP Modem1 serial port */ atari_init_mfp_port(B9600|CS8); @@ -317,7 +320,7 @@ static int __init atari_debug_setup(char *arg) sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x20; /* strobe H */ atari_console_driver.write = atari_par_console_write; } - if (atari_console_driver.write) + if (atari_console_driver.write && !registered) register_console(&atari_console_driver); return 0; diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 8943aa4c18e63c..478623dbb2092b 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c @@ -28,6 +28,8 @@ #include #include +#include +#include #include #include #include @@ -50,9 +52,9 @@ void bvme6000_set_vectors (void); static irq_handler_t tick_handler; -int bvme6000_parse_bootinfo(const struct bi_record *bi) +int __init bvme6000_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE) + if (be16_to_cpu(bi->tag) == BI_VME_TYPE) return 0; else return 1; diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 19325e117eeaae..559ff3af8ff7b1 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -52,7 +52,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -63,11 +62,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -85,6 +84,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -98,6 +108,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -130,6 +141,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -144,11 +156,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -156,6 +175,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -170,6 +190,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -183,11 +206,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -195,10 +220,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -216,6 +244,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -262,6 +291,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -271,10 +301,10 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_3COM is not set CONFIG_A2065=y CONFIG_ARIADNE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -285,6 +315,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -311,7 +342,6 @@ CONFIG_JOYSTICK_AMIGA=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -345,10 +375,6 @@ CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y CONFIG_AMIGA_BUILTIN_SERIAL=y CONFIG_SERIAL_CONSOLE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -385,7 +411,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -444,10 +470,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -480,6 +506,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 14dc6ccda7f453..cb1f55df69b6c8 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -193,10 +218,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -244,12 +273,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -258,6 +289,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -279,7 +311,6 @@ CONFIG_INPUT_EVDEV=m # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m CONFIG_SERIO=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -302,10 +333,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -342,7 +369,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -401,10 +428,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -437,6 +464,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 6d5370c914b265..e880cfbb62d9e4 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -192,10 +217,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -211,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -249,10 +278,10 @@ CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m CONFIG_EQUALIZER=m -CONFIG_MII=y CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -260,6 +289,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_ATARILANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -267,6 +297,7 @@ CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -291,7 +322,6 @@ CONFIG_MOUSE_ATARI=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -320,10 +350,6 @@ CONFIG_NFBLOCK=y CONFIG_NFCON=y CONFIG_NFETH=y CONFIG_ATARI_DSP56K=m -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -360,7 +386,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -419,10 +445,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -455,6 +481,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index c015ddb6fd8063..4aa4f45e52a870 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_BVME6000_NET=y @@ -257,6 +288,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ec7382d8afff53..7cd9d9f456fb61 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -193,10 +218,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -244,6 +273,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -251,6 +281,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_HPLANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -259,6 +290,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -282,7 +314,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_HP_SDC_RTC=m CONFIG_SERIO_SERPORT=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -304,10 +335,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -344,7 +371,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -403,10 +430,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -439,6 +466,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 7d46fbec70424d..31f5bd061d1466 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -195,11 +220,13 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -212,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -261,6 +289,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -268,6 +297,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y @@ -279,6 +309,7 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -302,7 +333,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_SERIO=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_PMACZILOG=y @@ -327,10 +357,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -367,7 +393,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -426,10 +452,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -462,6 +489,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index b17a8837f0e101..4e5adff326eec8 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -58,7 +58,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -69,11 +68,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -91,6 +90,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -104,6 +114,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -136,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -150,11 +162,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -162,6 +181,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -176,6 +196,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -189,11 +212,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -204,11 +229,13 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -230,6 +257,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -290,10 +318,10 @@ CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y CONFIG_DUMMY=m CONFIG_EQUALIZER=m -CONFIG_MII=y CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -308,10 +336,10 @@ CONFIG_HPLANCE=y CONFIG_MVME147_NET=y CONFIG_SUN3LANCE=y CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set CONFIG_BVME6000_NET=y CONFIG_MVME16x_NET=y @@ -325,6 +353,7 @@ CONFIG_APNE=y CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m @@ -357,7 +386,6 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_HP_SDC_RTC=m CONFIG_SERIO_Q40KBD=y -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_PMACZILOG=y @@ -405,10 +433,6 @@ CONFIG_NFETH=y CONFIG_ATARI_DSP56K=m CONFIG_AMIGA_BUILTIN_SERIAL=y CONFIG_SERIAL_CONSOLE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -445,7 +469,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -504,10 +528,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -540,6 +565,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 5586c6529fce36..02cdbac5565e51 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -47,7 +47,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -58,11 +57,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -80,6 +79,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -93,6 +103,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -125,6 +136,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -139,11 +151,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -151,6 +170,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -165,6 +185,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -178,11 +201,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -190,10 +215,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -205,6 +233,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -242,6 +271,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -249,6 +279,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_MVME147_NET=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -257,6 +288,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index e5e8262bbacdd0..05a990a9dbd469 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MVME16x_NET=y @@ -257,6 +288,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index be1496ed9b6602..568e2a98f976ee 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -209,6 +237,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -249,6 +278,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -257,10 +287,10 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -269,6 +299,7 @@ CONFIG_NE2000=m # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m @@ -293,7 +324,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_SERIO_Q40KBD=y -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -318,10 +348,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -358,7 +384,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -417,10 +443,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -453,6 +479,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 54674d61e00141..60b0aeac5742e8 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -188,10 +213,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_MARVELL is not set @@ -255,6 +286,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m CONFIG_KEYBOARD_SUNKBD=y # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 832d9539f44194..21bda331eebb04 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -188,10 +213,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -255,6 +286,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m CONFIG_KEYBOARD_SUNKBD=y # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 121a6660ad4e5c..71b78ecee75c64 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c @@ -9,6 +9,7 @@ * the GNU General Public License (GPL), incorporated herein by reference. */ +#include #include #include #include @@ -70,7 +71,7 @@ static void nf_poweroff(void) nf_call(id); } -void nf_init(void) +void __init nf_init(void) { unsigned long id, version; char buf[256]; diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c index b7609f791522a4..2e5a787ea11b8f 100644 --- a/arch/m68k/hp300/config.c +++ b/arch/m68k/hp300/config.c @@ -14,6 +14,8 @@ #include #include +#include +#include #include #include #include /* readb() and writeb() */ @@ -70,15 +72,15 @@ extern int hp300_setup_serial_console(void) __init; int __init hp300_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_HP300_MODEL: - hp300_model = *data; + hp300_model = be32_to_cpup(data); break; case BI_HP300_UART_SCODE: - hp300_uart_scode = *data; + hp300_uart_scode = be32_to_cpup(data); break; case BI_HP300_UART_ADDR: diff --git a/arch/m68k/include/asm/amigahw.h b/arch/m68k/include/asm/amigahw.h index 7a19b5686a4a69..5ad568110f1728 100644 --- a/arch/m68k/include/asm/amigahw.h +++ b/arch/m68k/include/asm/amigahw.h @@ -18,26 +18,7 @@ #include - /* - * Different Amiga models - */ - -#define AMI_UNKNOWN (0) -#define AMI_500 (1) -#define AMI_500PLUS (2) -#define AMI_600 (3) -#define AMI_1000 (4) -#define AMI_1200 (5) -#define AMI_2000 (6) -#define AMI_2500 (7) -#define AMI_3000 (8) -#define AMI_3000T (9) -#define AMI_3000PLUS (10) -#define AMI_4000 (11) -#define AMI_4000T (12) -#define AMI_CDTV (13) -#define AMI_CD32 (14) -#define AMI_DRACO (15) +#include /* @@ -46,11 +27,6 @@ extern unsigned long amiga_chipset; -#define CS_STONEAGE (0) -#define CS_OCS (1) -#define CS_ECS (2) -#define CS_AGA (3) - /* * Miscellaneous @@ -266,7 +242,7 @@ struct CIA { #define zTwoBase (0x80000000) #define ZTWO_PADDR(x) (((unsigned long)(x))-zTwoBase) -#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase) +#define ZTWO_VADDR(x) ((void __iomem *)(((unsigned long)(x))+zTwoBase)) #define CUSTOM_PHYSADDR (0xdff000) #define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR))) diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h index 6c19e0c22411a2..87fc899d32eeba 100644 --- a/arch/m68k/include/asm/apollohw.h +++ b/arch/m68k/include/asm/apollohw.h @@ -5,18 +5,11 @@ #include -/* - apollo models -*/ +#include + extern u_long apollo_model; -#define APOLLO_UNKNOWN (0) -#define APOLLO_DN3000 (1) -#define APOLLO_DN3010 (2) -#define APOLLO_DN3500 (3) -#define APOLLO_DN4000 (4) -#define APOLLO_DN4500 (5) /* see scn2681 data sheet for more info. diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index d887050e6da643..972c8f33f05532 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h @@ -21,7 +21,7 @@ #define _LINUX_ATARIHW_H_ #include -#include +#include #include extern u_long atari_mch_cookie; diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h index 445ce22c23cb05..15c5f77c16142b 100644 --- a/arch/m68k/include/asm/barrier.h +++ b/arch/m68k/include/asm/barrier.h @@ -1,20 +1,8 @@ #ifndef _M68K_BARRIER_H #define _M68K_BARRIER_H -/* - * Force strict CPU ordering. - * Not really required on m68k... - */ #define nop() do { asm volatile ("nop"); barrier(); } while (0) -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define read_barrier_depends() ((void)0) -#define set_mb(var, value) ({ (var) = (value); wmb(); }) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() ((void)0) +#include #endif /* _M68K_BARRIER_H */ diff --git a/arch/m68k/include/asm/bootinfo.h b/arch/m68k/include/asm/bootinfo.h index 67e7a78ad96be0..8e213267f8e716 100644 --- a/arch/m68k/include/asm/bootinfo.h +++ b/arch/m68k/include/asm/bootinfo.h @@ -6,373 +6,23 @@ ** This file is subject to the terms and conditions of the GNU General Public ** License. See the file COPYING in the main directory of this archive ** for more details. -** -** Created 09/29/92 by Greg Harp -** -** 5/2/94 Roman Hodek: -** Added bi_atari part of the machine dependent union bi_un; for now it -** contains just a model field to distinguish between TT and Falcon. -** 26/7/96 Roman Zippel: -** Renamed to setup.h; added some useful macros to allow gcc some -** optimizations if possible. -** 5/10/96 Geert Uytterhoeven: -** Redesign of the boot information structure; renamed to bootinfo.h again -** 27/11/96 Geert Uytterhoeven: -** Backwards compatibility with bootinfo interface version 1.0 */ #ifndef _M68K_BOOTINFO_H #define _M68K_BOOTINFO_H +#include - /* - * Bootinfo definitions - * - * This is an easily parsable and extendable structure containing all - * information to be passed from the bootstrap to the kernel. - * - * This way I hope to keep all future changes back/forewards compatible. - * Thus, keep your fingers crossed... - * - * This structure is copied right after the kernel bss by the bootstrap - * routine. - */ #ifndef __ASSEMBLY__ -struct bi_record { - unsigned short tag; /* tag ID */ - unsigned short size; /* size of record (in bytes) */ - unsigned long data[0]; /* data */ -}; - -#endif /* __ASSEMBLY__ */ - - - /* - * Tag Definitions - * - * Machine independent tags start counting from 0x0000 - * Machine dependent tags start counting from 0x8000 - */ - -#define BI_LAST 0x0000 /* last record (sentinel) */ -#define BI_MACHTYPE 0x0001 /* machine type (u_long) */ -#define BI_CPUTYPE 0x0002 /* cpu type (u_long) */ -#define BI_FPUTYPE 0x0003 /* fpu type (u_long) */ -#define BI_MMUTYPE 0x0004 /* mmu type (u_long) */ -#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */ - /* (struct mem_info) */ -#define BI_RAMDISK 0x0006 /* ramdisk address and size */ - /* (struct mem_info) */ -#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ - /* (string) */ - - /* - * Amiga-specific tags - */ - -#define BI_AMIGA_MODEL 0x8000 /* model (u_long) */ -#define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */ - /* (struct ConfigDev) */ -#define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (u_long) */ -#define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (u_char) */ -#define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (u_char) */ -#define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (u_long) */ -#define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (u_long) */ -#define BI_AMIGA_SERPER 0x8007 /* serial port period (u_short) */ - - /* - * Atari-specific tags - */ - -#define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (u_long) */ -#define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (u_long) */ - /* (values are ATARI_MACH_* defines */ - -/* mch_cookie values (upper word) */ -#define ATARI_MCH_ST 0 -#define ATARI_MCH_STE 1 -#define ATARI_MCH_TT 2 -#define ATARI_MCH_FALCON 3 - -/* mch_type values */ -#define ATARI_MACH_NORMAL 0 /* no special machine type */ -#define ATARI_MACH_MEDUSA 1 /* Medusa 040 */ -#define ATARI_MACH_HADES 2 /* Hades 040 or 060 */ -#define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */ - - /* - * VME-specific tags - */ - -#define BI_VME_TYPE 0x8000 /* VME sub-architecture (u_long) */ -#define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */ - -/* BI_VME_TYPE codes */ -#define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */ -#define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */ -#define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */ -#define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */ -#define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */ -#define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */ -#define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */ -#define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */ -#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */ - -/* BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on - * Motorola VME boards. Contains board number, Bug version, board - * configuration options, etc. See include/asm/mvme16xhw.h for details. - */ - - - /* - * Macintosh-specific tags (all u_long) - */ - -#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */ -#define BI_MAC_VADDR 0x8001 /* Mac video base address */ -#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */ -#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */ -#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */ -#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */ -#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */ -#define BI_MAC_BTIME 0x8007 /* Mac boot time */ -#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */ -#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */ -#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */ -#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */ - - /* - * Macintosh hardware profile data - unused, see macintosh.h for - * reasonable type values - */ - -#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ -#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */ -#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */ -#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */ -#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */ -#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */ -#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */ -#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */ -#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */ -#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */ -#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */ -#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */ -#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */ -#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */ -#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */ -#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */ -#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */ -#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */ - - /* - * Mac: compatibility with old booter data format (temporarily) - * Fields unused with the new bootinfo can be deleted now; instead of - * adding new fields the struct might be splitted into a hardware address - * part and a hardware type part - */ - -#ifndef __ASSEMBLY__ - -struct mac_booter_data -{ - unsigned long videoaddr; - unsigned long videorow; - unsigned long videodepth; - unsigned long dimensions; - unsigned long args; - unsigned long boottime; - unsigned long gmtbias; - unsigned long bootver; - unsigned long videological; - unsigned long sccbase; - unsigned long id; - unsigned long memsize; - unsigned long serialmf; - unsigned long serialhsk; - unsigned long serialgpi; - unsigned long printmf; - unsigned long printhsk; - unsigned long printgpi; - unsigned long cpuid; - unsigned long rombase; - unsigned long adbdelay; - unsigned long timedbra; -}; - -extern struct mac_booter_data - mac_bi_data; - +#ifdef CONFIG_BOOTINFO_PROC +extern void save_bootinfo(const struct bi_record *bi); +#else +static inline void save_bootinfo(const struct bi_record *bi) {} #endif - /* - * Apollo-specific tags - */ - -#define BI_APOLLO_MODEL 0x8000 /* model (u_long) */ - - /* - * HP300-specific tags - */ - -#define BI_HP300_MODEL 0x8000 /* model (u_long) */ -#define BI_HP300_UART_SCODE 0x8001 /* UART select code (u_long) */ -#define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (u_long) */ - - /* - * Stuff for bootinfo interface versioning - * - * At the start of kernel code, a 'struct bootversion' is located. - * bootstrap checks for a matching version of the interface before booting - * a kernel, to avoid user confusion if kernel and bootstrap don't work - * together :-) - * - * If incompatible changes are made to the bootinfo interface, the major - * number below should be stepped (and the minor reset to 0) for the - * appropriate machine. If a change is backward-compatible, the minor - * should be stepped. "Backwards-compatible" means that booting will work, - * but certain features may not. - */ - -#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */ -#define MK_BI_VERSION(major,minor) (((major)<<16)+(minor)) -#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff) -#define BI_VERSION_MINOR(v) ((v) & 0xffff) - -#ifndef __ASSEMBLY__ - -struct bootversion { - unsigned short branch; - unsigned long magic; - struct { - unsigned long machtype; - unsigned long version; - } machversions[0]; -}; - #endif /* __ASSEMBLY__ */ -#define AMIGA_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define ATARI_BOOTI_VERSION MK_BI_VERSION( 2, 1 ) -#define MAC_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define MVME147_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define MVME16x_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define BVME6000_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define Q40_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define HP300_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) - -#ifdef BOOTINFO_COMPAT_1_0 - - /* - * Backwards compatibility with bootinfo interface version 1.0 - */ - -#define COMPAT_AMIGA_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) -#define COMPAT_ATARI_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) -#define COMPAT_MAC_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) - -#include - -#define COMPAT_NUM_AUTO 16 - -struct compat_bi_Amiga { - int model; - int num_autocon; - struct ConfigDev autocon[COMPAT_NUM_AUTO]; - unsigned long chip_size; - unsigned char vblank; - unsigned char psfreq; - unsigned long eclock; - unsigned long chipset; - unsigned long hw_present; -}; - -struct compat_bi_Atari { - unsigned long hw_present; - unsigned long mch_cookie; -}; - -#ifndef __ASSEMBLY__ - -struct compat_bi_Macintosh -{ - unsigned long videoaddr; - unsigned long videorow; - unsigned long videodepth; - unsigned long dimensions; - unsigned long args; - unsigned long boottime; - unsigned long gmtbias; - unsigned long bootver; - unsigned long videological; - unsigned long sccbase; - unsigned long id; - unsigned long memsize; - unsigned long serialmf; - unsigned long serialhsk; - unsigned long serialgpi; - unsigned long printmf; - unsigned long printhsk; - unsigned long printgpi; - unsigned long cpuid; - unsigned long rombase; - unsigned long adbdelay; - unsigned long timedbra; -}; - -#endif - -struct compat_mem_info { - unsigned long addr; - unsigned long size; -}; - -#define COMPAT_NUM_MEMINFO 4 - -#define COMPAT_CPUB_68020 0 -#define COMPAT_CPUB_68030 1 -#define COMPAT_CPUB_68040 2 -#define COMPAT_CPUB_68060 3 -#define COMPAT_FPUB_68881 5 -#define COMPAT_FPUB_68882 6 -#define COMPAT_FPUB_68040 7 -#define COMPAT_FPUB_68060 8 - -#define COMPAT_CPU_68020 (1< -/* This information was taken from NetBSD */ -#define HP_320 (0) /* 16MHz 68020+HP MMU+16K external cache */ -#define HP_330 (1) /* 16MHz 68020+68851 MMU */ -#define HP_340 (2) /* 16MHz 68030 */ -#define HP_345 (3) /* 50MHz 68030+32K external cache */ -#define HP_350 (4) /* 25MHz 68020+HP MMU+32K external cache */ -#define HP_360 (5) /* 25MHz 68030 */ -#define HP_370 (6) /* 33MHz 68030+64K external cache */ -#define HP_375 (7) /* 50MHz 68030+32K external cache */ -#define HP_380 (8) /* 25MHz 68040 */ -#define HP_385 (9) /* 33MHz 68040 */ -#define HP_400 (10) /* 50MHz 68030+32K external cache */ -#define HP_425T (11) /* 25MHz 68040 - model 425t */ -#define HP_425S (12) /* 25MHz 68040 - model 425s */ -#define HP_425E (13) /* 25MHz 68040 - model 425e */ -#define HP_433T (14) /* 33MHz 68040 - model 433t */ -#define HP_433S (15) /* 33MHz 68040 - model 433s */ +extern unsigned long hp300_model; #endif /* _M68K_HP300HW_H */ diff --git a/arch/m68k/include/asm/kexec.h b/arch/m68k/include/asm/kexec.h new file mode 100644 index 00000000000000..3df97abac147ed --- /dev/null +++ b/arch/m68k/include/asm/kexec.h @@ -0,0 +1,29 @@ +#ifndef _ASM_M68K_KEXEC_H +#define _ASM_M68K_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +#define KEXEC_ARCH KEXEC_ARCH_68K + +#ifndef __ASSEMBLY__ + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Dummy implementation for now */ +} + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_KEXEC */ + +#endif /* _ASM_M68K_KEXEC_H */ diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index aeeedf8b2d259d..fe3fc9ae1b69d0 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h @@ -254,6 +254,8 @@ extern volatile __u8 *via1,*via2; extern int rbv_present,via_alt_mapping; +struct irq_desc; + extern void via_register_interrupts(void); extern void via_irq_enable(int); extern void via_irq_disable(int); diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index 682a1a2ff55f93..d323b2c2d07d4e 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h @@ -4,6 +4,9 @@ #include #include +#include + + /* * Apple Macintoshisms */ @@ -74,65 +77,29 @@ struct mac_model #define MAC_FLOPPY_SWIM_IOP 3 #define MAC_FLOPPY_AV 4 -/* - * Gestalt numbers - */ +extern struct mac_model *macintosh_config; -#define MAC_MODEL_II 6 -#define MAC_MODEL_IIX 7 -#define MAC_MODEL_IICX 8 -#define MAC_MODEL_SE30 9 -#define MAC_MODEL_IICI 11 -#define MAC_MODEL_IIFX 13 /* And well numbered it is too */ -#define MAC_MODEL_IISI 18 -#define MAC_MODEL_LC 19 -#define MAC_MODEL_Q900 20 -#define MAC_MODEL_PB170 21 -#define MAC_MODEL_Q700 22 -#define MAC_MODEL_CLII 23 /* aka: P200 */ -#define MAC_MODEL_PB140 25 -#define MAC_MODEL_Q950 26 /* aka: WGS95 */ -#define MAC_MODEL_LCIII 27 /* aka: P450 */ -#define MAC_MODEL_PB210 29 -#define MAC_MODEL_C650 30 -#define MAC_MODEL_PB230 32 -#define MAC_MODEL_PB180 33 -#define MAC_MODEL_PB160 34 -#define MAC_MODEL_Q800 35 /* aka: WGS80 */ -#define MAC_MODEL_Q650 36 -#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */ -#define MAC_MODEL_PB250 38 -#define MAC_MODEL_IIVI 44 -#define MAC_MODEL_P600 45 /* aka: P600CD */ -#define MAC_MODEL_IIVX 48 -#define MAC_MODEL_CCL 49 /* aka: P250 */ -#define MAC_MODEL_PB165C 50 -#define MAC_MODEL_C610 52 /* aka: WGS60 */ -#define MAC_MODEL_Q610 53 -#define MAC_MODEL_PB145 54 /* aka: PB145B */ -#define MAC_MODEL_P520 56 /* aka: LC520 */ -#define MAC_MODEL_C660 60 -#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */ -#define MAC_MODEL_PB180C 71 -#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */ -#define MAC_MODEL_PB270C 77 -#define MAC_MODEL_Q840 78 -#define MAC_MODEL_P550 80 /* aka: LC550, P560 */ -#define MAC_MODEL_CCLII 83 /* aka: P275 */ -#define MAC_MODEL_PB165 84 -#define MAC_MODEL_PB190 85 /* aka: PB190CS */ -#define MAC_MODEL_TV 88 -#define MAC_MODEL_P475 89 /* aka: LC475, P476 */ -#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */ -#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */ -#define MAC_MODEL_Q605 94 -#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */ -#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */ -#define MAC_MODEL_P588 99 /* aka: LC580, P580 */ -#define MAC_MODEL_PB280 102 -#define MAC_MODEL_PB280C 103 -#define MAC_MODEL_PB150 115 -extern struct mac_model *macintosh_config; + /* + * Internal representation of the Mac hardware, filled in from bootinfo + */ + +struct mac_booter_data +{ + unsigned long videoaddr; + unsigned long videorow; + unsigned long videodepth; + unsigned long dimensions; + unsigned long boottime; + unsigned long gmtbias; + unsigned long videological; + unsigned long sccbase; + unsigned long id; + unsigned long memsize; + unsigned long cpuid; + unsigned long rombase; +}; + +extern struct mac_booter_data mac_bi_data; #endif diff --git a/arch/m68k/include/asm/mc146818rtc.h b/arch/m68k/include/asm/mc146818rtc.h index 9f70a01f73dc99..05b43bf5cdf390 100644 --- a/arch/m68k/include/asm/mc146818rtc.h +++ b/arch/m68k/include/asm/mc146818rtc.h @@ -10,16 +10,16 @@ #include -#define RTC_PORT(x) (TT_RTC_BAS + 2*(x)) +#define ATARI_RTC_PORT(x) (TT_RTC_BAS + 2*(x)) #define RTC_ALWAYS_BCD 0 #define CMOS_READ(addr) ({ \ -atari_outb_p((addr),RTC_PORT(0)); \ -atari_inb_p(RTC_PORT(1)); \ +atari_outb_p((addr), ATARI_RTC_PORT(0)); \ +atari_inb_p(ATARI_RTC_PORT(1)); \ }) #define CMOS_WRITE(val, addr) ({ \ -atari_outb_p((addr),RTC_PORT(0)); \ -atari_outb_p((val),RTC_PORT(1)); \ +atari_outb_p((addr), ATARI_RTC_PORT(0)); \ +atari_outb_p((val), ATARI_RTC_PORT(1)); \ }) #endif /* CONFIG_ATARI */ diff --git a/arch/m68k/include/asm/mvme16xhw.h b/arch/m68k/include/asm/mvme16xhw.h index 6117f56653d28d..1eb89de631e5c0 100644 --- a/arch/m68k/include/asm/mvme16xhw.h +++ b/arch/m68k/include/asm/mvme16xhw.h @@ -3,23 +3,6 @@ #include -/* Board ID data structure - pointer to this retrieved from Bug by head.S */ - -/* Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) */ - -extern long mvme_bdid_ptr; - -typedef struct { - char bdid[4]; - u_char rev, mth, day, yr; - u_short size, reserved; - u_short brdno; - char brdsuffix[2]; - u_long options; - u_short clun, dlun, ctype, dnum; - u_long option2; -} t_bdid, *p_bdid; - typedef struct { u_char ack_icr, diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h index 65e78a2dad64a9..8f2023f8c1c457 100644 --- a/arch/m68k/include/asm/setup.h +++ b/arch/m68k/include/asm/setup.h @@ -22,6 +22,7 @@ #ifndef _M68K_SETUP_H #define _M68K_SETUP_H +#include #include @@ -297,14 +298,14 @@ extern int m68k_is040or060; #define NUM_MEMINFO 4 #ifndef __ASSEMBLY__ -struct mem_info { +struct m68k_mem_info { unsigned long addr; /* physical address of memory chunk */ unsigned long size; /* length of memory chunk (in bytes) */ }; extern int m68k_num_memory; /* # of memory blocks found (and used) */ extern int m68k_realnum_memory; /* real # of memory blocks found */ -extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */ +extern struct m68k_mem_info m68k_memory[NUM_MEMINFO];/* memory description */ #endif #endif /* _M68K_SETUP_H */ diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h index 6759dad954f61d..efc1f48923573d 100644 --- a/arch/m68k/include/asm/timex.h +++ b/arch/m68k/include/asm/timex.h @@ -28,4 +28,14 @@ static inline cycles_t get_cycles(void) return 0; } +extern unsigned long (*mach_random_get_entropy)(void); + +static inline unsigned long random_get_entropy(void) +{ + if (mach_random_get_entropy) + return mach_random_get_entropy(); + return 0; +} +#define random_get_entropy random_get_entropy + #endif diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index 1fef45ada0973c..6a2d257bdfb222 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -11,6 +11,14 @@ generic-y += termbits.h generic-y += termios.h header-y += a.out.h +header-y += bootinfo.h +header-y += bootinfo-amiga.h +header-y += bootinfo-apollo.h +header-y += bootinfo-atari.h +header-y += bootinfo-hp300.h +header-y += bootinfo-mac.h +header-y += bootinfo-q40.h +header-y += bootinfo-vme.h header-y += byteorder.h header-y += cachectl.h header-y += fcntl.h diff --git a/arch/m68k/include/uapi/asm/bootinfo-amiga.h b/arch/m68k/include/uapi/asm/bootinfo-amiga.h new file mode 100644 index 00000000000000..daad3c58d2da58 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-amiga.h @@ -0,0 +1,63 @@ +/* +** asm/bootinfo-amiga.h -- Amiga-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_AMIGA_H +#define _UAPI_ASM_M68K_BOOTINFO_AMIGA_H + + + /* + * Amiga-specific tags + */ + +#define BI_AMIGA_MODEL 0x8000 /* model (__be32) */ +#define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */ + /* (AmigaOS struct ConfigDev) */ +#define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (__be32) */ +#define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (__u8) */ +#define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (__u8) */ +#define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (__be32) */ +#define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (__be32) */ +#define BI_AMIGA_SERPER 0x8007 /* serial port period (__be16) */ + + + /* + * Amiga models (BI_AMIGA_MODEL) + */ + +#define AMI_UNKNOWN 0 +#define AMI_500 1 +#define AMI_500PLUS 2 +#define AMI_600 3 +#define AMI_1000 4 +#define AMI_1200 5 +#define AMI_2000 6 +#define AMI_2500 7 +#define AMI_3000 8 +#define AMI_3000T 9 +#define AMI_3000PLUS 10 +#define AMI_4000 11 +#define AMI_4000T 12 +#define AMI_CDTV 13 +#define AMI_CD32 14 +#define AMI_DRACO 15 + + + /* + * Amiga chipsets (BI_AMIGA_CHIPSET) + */ + +#define CS_STONEAGE 0 +#define CS_OCS 1 +#define CS_ECS 2 +#define CS_AGA 3 + + + /* + * Latest Amiga bootinfo version + */ + +#define AMIGA_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_AMIGA_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-apollo.h b/arch/m68k/include/uapi/asm/bootinfo-apollo.h new file mode 100644 index 00000000000000..a93e0af1c6fe11 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-apollo.h @@ -0,0 +1,28 @@ +/* +** asm/bootinfo-apollo.h -- Apollo-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_APOLLO_H +#define _UAPI_ASM_M68K_BOOTINFO_APOLLO_H + + + /* + * Apollo-specific tags + */ + +#define BI_APOLLO_MODEL 0x8000 /* model (__be32) */ + + + /* + * Apollo models (BI_APOLLO_MODEL) + */ + +#define APOLLO_UNKNOWN 0 +#define APOLLO_DN3000 1 +#define APOLLO_DN3010 2 +#define APOLLO_DN3500 3 +#define APOLLO_DN4000 4 +#define APOLLO_DN4500 5 + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_APOLLO_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-atari.h b/arch/m68k/include/uapi/asm/bootinfo-atari.h new file mode 100644 index 00000000000000..a817854049bb56 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-atari.h @@ -0,0 +1,44 @@ +/* +** asm/bootinfo-atari.h -- Atari-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_ATARI_H +#define _UAPI_ASM_M68K_BOOTINFO_ATARI_H + + + /* + * Atari-specific tags + */ + +#define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (__be32) */ +#define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (__be32) */ + + + /* + * mch_cookie values (upper word of BI_ATARI_MCH_COOKIE) + */ + +#define ATARI_MCH_ST 0 +#define ATARI_MCH_STE 1 +#define ATARI_MCH_TT 2 +#define ATARI_MCH_FALCON 3 + + + /* + * Atari machine types (BI_ATARI_MCH_TYPE) + */ + +#define ATARI_MACH_NORMAL 0 /* no special machine type */ +#define ATARI_MACH_MEDUSA 1 /* Medusa 040 */ +#define ATARI_MACH_HADES 2 /* Hades 040 or 060 */ +#define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */ + + + /* + * Latest Atari bootinfo version + */ + +#define ATARI_BOOTI_VERSION MK_BI_VERSION(2, 1) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_ATARI_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-hp300.h b/arch/m68k/include/uapi/asm/bootinfo-hp300.h new file mode 100644 index 00000000000000..c90cb71ed89a95 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-hp300.h @@ -0,0 +1,50 @@ +/* +** asm/bootinfo-hp300.h -- HP9000/300-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_HP300_H +#define _UAPI_ASM_M68K_BOOTINFO_HP300_H + + + /* + * HP9000/300-specific tags + */ + +#define BI_HP300_MODEL 0x8000 /* model (__be32) */ +#define BI_HP300_UART_SCODE 0x8001 /* UART select code (__be32) */ +#define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (__be32) */ + + + /* + * HP9000/300 and /400 models (BI_HP300_MODEL) + * + * This information was taken from NetBSD + */ + +#define HP_320 0 /* 16MHz 68020+HP MMU+16K external cache */ +#define HP_330 1 /* 16MHz 68020+68851 MMU */ +#define HP_340 2 /* 16MHz 68030 */ +#define HP_345 3 /* 50MHz 68030+32K external cache */ +#define HP_350 4 /* 25MHz 68020+HP MMU+32K external cache */ +#define HP_360 5 /* 25MHz 68030 */ +#define HP_370 6 /* 33MHz 68030+64K external cache */ +#define HP_375 7 /* 50MHz 68030+32K external cache */ +#define HP_380 8 /* 25MHz 68040 */ +#define HP_385 9 /* 33MHz 68040 */ + +#define HP_400 10 /* 50MHz 68030+32K external cache */ +#define HP_425T 11 /* 25MHz 68040 - model 425t */ +#define HP_425S 12 /* 25MHz 68040 - model 425s */ +#define HP_425E 13 /* 25MHz 68040 - model 425e */ +#define HP_433T 14 /* 33MHz 68040 - model 433t */ +#define HP_433S 15 /* 33MHz 68040 - model 433s */ + + + /* + * Latest HP9000/300 bootinfo version + */ + +#define HP300_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_HP300_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-mac.h b/arch/m68k/include/uapi/asm/bootinfo-mac.h new file mode 100644 index 00000000000000..b44ff73898a9cb --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-mac.h @@ -0,0 +1,119 @@ +/* +** asm/bootinfo-mac.h -- Macintosh-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_MAC_H +#define _UAPI_ASM_M68K_BOOTINFO_MAC_H + + + /* + * Macintosh-specific tags (all __be32) + */ + +#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */ +#define BI_MAC_VADDR 0x8001 /* Mac video base address */ +#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */ +#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */ +#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */ +#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */ +#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */ +#define BI_MAC_BTIME 0x8007 /* Mac boot time */ +#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */ +#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */ +#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */ +#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */ + + + /* + * Macintosh hardware profile data - unused, see macintosh.h for + * reasonable type values + */ + +#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ +#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */ +#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */ +#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */ +#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */ +#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */ +#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */ +#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */ +#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */ +#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */ +#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */ +#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */ +#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */ +#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */ +#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */ +#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */ +#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */ +#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */ + + + /* + * Macintosh Gestalt numbers (BI_MAC_MODEL) + */ + +#define MAC_MODEL_II 6 +#define MAC_MODEL_IIX 7 +#define MAC_MODEL_IICX 8 +#define MAC_MODEL_SE30 9 +#define MAC_MODEL_IICI 11 +#define MAC_MODEL_IIFX 13 /* And well numbered it is too */ +#define MAC_MODEL_IISI 18 +#define MAC_MODEL_LC 19 +#define MAC_MODEL_Q900 20 +#define MAC_MODEL_PB170 21 +#define MAC_MODEL_Q700 22 +#define MAC_MODEL_CLII 23 /* aka: P200 */ +#define MAC_MODEL_PB140 25 +#define MAC_MODEL_Q950 26 /* aka: WGS95 */ +#define MAC_MODEL_LCIII 27 /* aka: P450 */ +#define MAC_MODEL_PB210 29 +#define MAC_MODEL_C650 30 +#define MAC_MODEL_PB230 32 +#define MAC_MODEL_PB180 33 +#define MAC_MODEL_PB160 34 +#define MAC_MODEL_Q800 35 /* aka: WGS80 */ +#define MAC_MODEL_Q650 36 +#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */ +#define MAC_MODEL_PB250 38 +#define MAC_MODEL_IIVI 44 +#define MAC_MODEL_P600 45 /* aka: P600CD */ +#define MAC_MODEL_IIVX 48 +#define MAC_MODEL_CCL 49 /* aka: P250 */ +#define MAC_MODEL_PB165C 50 +#define MAC_MODEL_C610 52 /* aka: WGS60 */ +#define MAC_MODEL_Q610 53 +#define MAC_MODEL_PB145 54 /* aka: PB145B */ +#define MAC_MODEL_P520 56 /* aka: LC520 */ +#define MAC_MODEL_C660 60 +#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */ +#define MAC_MODEL_PB180C 71 +#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */ +#define MAC_MODEL_PB270C 77 +#define MAC_MODEL_Q840 78 +#define MAC_MODEL_P550 80 /* aka: LC550, P560 */ +#define MAC_MODEL_CCLII 83 /* aka: P275 */ +#define MAC_MODEL_PB165 84 +#define MAC_MODEL_PB190 85 /* aka: PB190CS */ +#define MAC_MODEL_TV 88 +#define MAC_MODEL_P475 89 /* aka: LC475, P476 */ +#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */ +#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */ +#define MAC_MODEL_Q605 94 +#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */ +#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */ +#define MAC_MODEL_P588 99 /* aka: LC580, P580 */ +#define MAC_MODEL_PB280 102 +#define MAC_MODEL_PB280C 103 +#define MAC_MODEL_PB150 115 + + + /* + * Latest Macintosh bootinfo version + */ + +#define MAC_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-q40.h b/arch/m68k/include/uapi/asm/bootinfo-q40.h new file mode 100644 index 00000000000000..c79fea7e555bc0 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-q40.h @@ -0,0 +1,16 @@ +/* +** asm/bootinfo-q40.h -- Q40-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_Q40_H +#define _UAPI_ASM_M68K_BOOTINFO_Q40_H + + + /* + * Latest Q40 bootinfo version + */ + +#define Q40_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_Q40_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-vme.h b/arch/m68k/include/uapi/asm/bootinfo-vme.h new file mode 100644 index 00000000000000..a135eb41d6725d --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-vme.h @@ -0,0 +1,70 @@ +/* +** asm/bootinfo-vme.h -- VME-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_VME_H +#define _UAPI_ASM_M68K_BOOTINFO_VME_H + + +#include + + + /* + * VME-specific tags + */ + +#define BI_VME_TYPE 0x8000 /* VME sub-architecture (__be32) */ +#define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */ + + + /* + * VME models (BI_VME_TYPE) + */ + +#define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */ +#define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */ +#define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */ +#define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */ +#define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */ +#define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */ +#define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */ +#define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */ +#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */ + + +#ifndef __ASSEMBLY__ + +/* + * Board ID data structure - pointer to this retrieved from Bug by head.S + * + * BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on + * Motorola VME boards. Contains board number, Bug version, board + * configuration options, etc. + * + * Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) + */ + +typedef struct { + char bdid[4]; + __u8 rev, mth, day, yr; + __be16 size, reserved; + __be16 brdno; + char brdsuffix[2]; + __be32 options; + __be16 clun, dlun, ctype, dnum; + __be32 option2; +} t_bdid, *p_bdid; + +#endif /* __ASSEMBLY__ */ + + + /* + * Latest VME bootinfo versions + */ + +#define MVME147_BOOTI_VERSION MK_BI_VERSION(2, 0) +#define MVME16x_BOOTI_VERSION MK_BI_VERSION(2, 0) +#define BVME6000_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_VME_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo.h b/arch/m68k/include/uapi/asm/bootinfo.h new file mode 100644 index 00000000000000..cdeb26a015b09c --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo.h @@ -0,0 +1,174 @@ +/* + * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure + * + * Copyright 1992 by Greg Harp + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_H +#define _UAPI_ASM_M68K_BOOTINFO_H + + +#include + + +#ifndef __ASSEMBLY__ + + /* + * Bootinfo definitions + * + * This is an easily parsable and extendable structure containing all + * information to be passed from the bootstrap to the kernel. + * + * This way I hope to keep all future changes back/forewards compatible. + * Thus, keep your fingers crossed... + * + * This structure is copied right after the kernel by the bootstrap + * routine. + */ + +struct bi_record { + __be16 tag; /* tag ID */ + __be16 size; /* size of record (in bytes) */ + __be32 data[0]; /* data */ +}; + + +struct mem_info { + __be32 addr; /* physical address of memory chunk */ + __be32 size; /* length of memory chunk (in bytes) */ +}; + +#endif /* __ASSEMBLY__ */ + + + /* + * Tag Definitions + * + * Machine independent tags start counting from 0x0000 + * Machine dependent tags start counting from 0x8000 + */ + +#define BI_LAST 0x0000 /* last record (sentinel) */ +#define BI_MACHTYPE 0x0001 /* machine type (__be32) */ +#define BI_CPUTYPE 0x0002 /* cpu type (__be32) */ +#define BI_FPUTYPE 0x0003 /* fpu type (__be32) */ +#define BI_MMUTYPE 0x0004 /* mmu type (__be32) */ +#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */ + /* (struct mem_info) */ +#define BI_RAMDISK 0x0006 /* ramdisk address and size */ + /* (struct mem_info) */ +#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ + /* (string) */ + + + /* + * Linux/m68k Architectures (BI_MACHTYPE) + */ + +#define MACH_AMIGA 1 +#define MACH_ATARI 2 +#define MACH_MAC 3 +#define MACH_APOLLO 4 +#define MACH_SUN3 5 +#define MACH_MVME147 6 +#define MACH_MVME16x 7 +#define MACH_BVME6000 8 +#define MACH_HP300 9 +#define MACH_Q40 10 +#define MACH_SUN3X 11 +#define MACH_M54XX 12 + + + /* + * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE) + * + * Note: we may rely on the following equalities: + * + * CPU_68020 == MMU_68851 + * CPU_68030 == MMU_68030 + * CPU_68040 == FPU_68040 == MMU_68040 + * CPU_68060 == FPU_68060 == MMU_68060 + */ + +#define CPUB_68020 0 +#define CPUB_68030 1 +#define CPUB_68040 2 +#define CPUB_68060 3 +#define CPUB_COLDFIRE 4 + +#define CPU_68020 (1 << CPUB_68020) +#define CPU_68030 (1 << CPUB_68030) +#define CPU_68040 (1 << CPUB_68040) +#define CPU_68060 (1 << CPUB_68060) +#define CPU_COLDFIRE (1 << CPUB_COLDFIRE) + +#define FPUB_68881 0 +#define FPUB_68882 1 +#define FPUB_68040 2 /* Internal FPU */ +#define FPUB_68060 3 /* Internal FPU */ +#define FPUB_SUNFPA 4 /* Sun-3 FPA */ +#define FPUB_COLDFIRE 5 /* ColdFire FPU */ + +#define FPU_68881 (1 << FPUB_68881) +#define FPU_68882 (1 << FPUB_68882) +#define FPU_68040 (1 << FPUB_68040) +#define FPU_68060 (1 << FPUB_68060) +#define FPU_SUNFPA (1 << FPUB_SUNFPA) +#define FPU_COLDFIRE (1 << FPUB_COLDFIRE) + +#define MMUB_68851 0 +#define MMUB_68030 1 /* Internal MMU */ +#define MMUB_68040 2 /* Internal MMU */ +#define MMUB_68060 3 /* Internal MMU */ +#define MMUB_APOLLO 4 /* Custom Apollo */ +#define MMUB_SUN3 5 /* Custom Sun-3 */ +#define MMUB_COLDFIRE 6 /* Internal MMU */ + +#define MMU_68851 (1 << MMUB_68851) +#define MMU_68030 (1 << MMUB_68030) +#define MMU_68040 (1 << MMUB_68040) +#define MMU_68060 (1 << MMUB_68060) +#define MMU_SUN3 (1 << MMUB_SUN3) +#define MMU_APOLLO (1 << MMUB_APOLLO) +#define MMU_COLDFIRE (1 << MMUB_COLDFIRE) + + + /* + * Stuff for bootinfo interface versioning + * + * At the start of kernel code, a 'struct bootversion' is located. + * bootstrap checks for a matching version of the interface before booting + * a kernel, to avoid user confusion if kernel and bootstrap don't work + * together :-) + * + * If incompatible changes are made to the bootinfo interface, the major + * number below should be stepped (and the minor reset to 0) for the + * appropriate machine. If a change is backward-compatible, the minor + * should be stepped. "Backwards-compatible" means that booting will work, + * but certain features may not. + */ + +#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */ +#define MK_BI_VERSION(major, minor) (((major) << 16) + (minor)) +#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff) +#define BI_VERSION_MINOR(v) ((v) & 0xffff) + +#ifndef __ASSEMBLY__ + +struct bootversion { + __be16 branch; + __be32 magic; + struct { + __be32 machtype; + __be32 version; + } machversions[0]; +} __packed; + +#endif /* __ASSEMBLY__ */ + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_H */ diff --git a/arch/m68k/include/uapi/asm/setup.h b/arch/m68k/include/uapi/asm/setup.h index 85579bff455ccb..6a6dc636761eaa 100644 --- a/arch/m68k/include/uapi/asm/setup.h +++ b/arch/m68k/include/uapi/asm/setup.h @@ -6,98 +6,11 @@ ** This file is subject to the terms and conditions of the GNU General Public ** License. See the file COPYING in the main directory of this archive ** for more details. -** -** Created 09/29/92 by Greg Harp -** -** 5/2/94 Roman Hodek: -** Added bi_atari part of the machine dependent union bi_un; for now it -** contains just a model field to distinguish between TT and Falcon. -** 26/7/96 Roman Zippel: -** Renamed to setup.h; added some useful macros to allow gcc some -** optimizations if possible. -** 5/10/96 Geert Uytterhoeven: -** Redesign of the boot information structure; moved boot information -** structure to bootinfo.h */ #ifndef _UAPI_M68K_SETUP_H #define _UAPI_M68K_SETUP_H - - - /* - * Linux/m68k Architectures - */ - -#define MACH_AMIGA 1 -#define MACH_ATARI 2 -#define MACH_MAC 3 -#define MACH_APOLLO 4 -#define MACH_SUN3 5 -#define MACH_MVME147 6 -#define MACH_MVME16x 7 -#define MACH_BVME6000 8 -#define MACH_HP300 9 -#define MACH_Q40 10 -#define MACH_SUN3X 11 -#define MACH_M54XX 12 - #define COMMAND_LINE_SIZE 256 - - - /* - * CPU, FPU and MMU types - * - * Note: we may rely on the following equalities: - * - * CPU_68020 == MMU_68851 - * CPU_68030 == MMU_68030 - * CPU_68040 == FPU_68040 == MMU_68040 - * CPU_68060 == FPU_68060 == MMU_68060 - */ - -#define CPUB_68020 0 -#define CPUB_68030 1 -#define CPUB_68040 2 -#define CPUB_68060 3 -#define CPUB_COLDFIRE 4 - -#define CPU_68020 (1< +#include +#include +#include +#include +#include + +#include +#include + + +static char bootinfo_tmp[1536] __initdata; + +static void *bootinfo_copy; +static size_t bootinfo_size; + +static ssize_t bootinfo_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, bootinfo_copy, + bootinfo_size); +} + +static const struct file_operations bootinfo_fops = { + .read = bootinfo_read, + .llseek = default_llseek, +}; + +void __init save_bootinfo(const struct bi_record *bi) +{ + const void *start = bi; + size_t size = sizeof(bi->tag); + + while (be16_to_cpu(bi->tag) != BI_LAST) { + uint16_t n = be16_to_cpu(bi->size); + size += n; + bi = (struct bi_record *)((unsigned long)bi + n); + } + + if (size > sizeof(bootinfo_tmp)) { + pr_err("Cannot save %zu bytes of bootinfo\n", size); + return; + } + + pr_info("Saving %zu bytes of bootinfo\n", size); + memcpy(bootinfo_tmp, start, size); + bootinfo_size = size; +} + +static int __init init_bootinfo_procfs(void) +{ + /* + * This cannot go into save_bootinfo() because kmalloc and proc don't + * work yet when it is called. + */ + struct proc_dir_entry *pde; + + if (!bootinfo_size) + return -EINVAL; + + bootinfo_copy = kmalloc(bootinfo_size, GFP_KERNEL); + if (!bootinfo_copy) + return -ENOMEM; + + memcpy(bootinfo_copy, bootinfo_tmp, bootinfo_size); + + pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL); + if (!pde) { + kfree(bootinfo_copy); + return -ENOMEM; + } + + return 0; +} + +arch_initcall(init_bootinfo_procfs); diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index ac85f16534af92..4c99bab7e6647c 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S @@ -23,7 +23,7 @@ ** 98/04/25 Phil Blundell: added HP300 support ** 1998/08/30 David Kilzer: Added support for font_desc structures ** for linux-2.1.115 -** 9/02/11 Richard Zidlicky: added Q40 support (initial vesion 99/01/01) +** 1999/02/11 Richard Zidlicky: added Q40 support (initial version 99/01/01) ** 2004/05/13 Kars de Jong: Finalised HP300 support ** ** This file is subject to the terms and conditions of the GNU General Public @@ -257,6 +257,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include #include @@ -1532,7 +1538,7 @@ L(cache_done): /* * Find a tag record in the bootinfo structure - * The bootinfo structure is located right after the kernel bss + * The bootinfo structure is located right after the kernel * Returns: d0: size (-1 if not found) * a0: data pointer (end-of-records if not found) */ @@ -2909,7 +2915,9 @@ func_start serial_init,%d0/%d1/%a0/%a1 #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) movel %pc@(L(mac_sccbase)),%a0 - /* Reset SCC device */ + /* Reset SCC register pointer */ + moveb %a0@(mac_scc_cha_a_ctrl_offset),%d0 + /* Reset SCC device: write register pointer then register value */ moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ @@ -3896,8 +3904,6 @@ BVME_SCC_DATA_A = 0xffb0000f #endif #if defined(CONFIG_MAC) -L(mac_booter_data): - .long 0 L(mac_videobase): .long 0 L(mac_videodepth): diff --git a/arch/m68k/kernel/machine_kexec.c b/arch/m68k/kernel/machine_kexec.c new file mode 100644 index 00000000000000..d4affc917d9da1 --- /dev/null +++ b/arch/m68k/kernel/machine_kexec.c @@ -0,0 +1,58 @@ +/* + * machine_kexec.c - handle transition of Linux booting another kernel + */ +#include +#include +#include +#include + +#include +#include +#include + +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; + +int machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_shutdown(void) +{ +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ +} + +typedef void (*relocate_kernel_t)(unsigned long ptr, + unsigned long start, + unsigned long cpu_mmu_flags) __noreturn; + +void machine_kexec(struct kimage *image) +{ + void *reboot_code_buffer; + unsigned long cpu_mmu_flags; + + reboot_code_buffer = page_address(image->control_code_page); + + memcpy(reboot_code_buffer, relocate_new_kernel, + relocate_new_kernel_size); + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + pr_info("Will call new kernel at 0x%08lx. Bye...\n", image->start); + __flush_cache_all(); + cpu_mmu_flags = m68k_cputype | m68k_mmutype << 8; + ((relocate_kernel_t) reboot_code_buffer)(image->head & PAGE_MASK, + image->start, + cpu_mmu_flags); +} diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S new file mode 100644 index 00000000000000..3e09a89067add5 --- /dev/null +++ b/arch/m68k/kernel/relocate_kernel.S @@ -0,0 +1,159 @@ +#include + +#include +#include +#include + + +#define MMU_BASE 8 /* MMU flags base in cpu_mmu_flags */ + +.text + +ENTRY(relocate_new_kernel) + movel %sp@(4),%a0 /* a0 = ptr */ + movel %sp@(8),%a1 /* a1 = start */ + movel %sp@(12),%d1 /* d1 = cpu_mmu_flags */ + movew #PAGE_MASK,%d2 /* d2 = PAGE_MASK */ + + /* Disable MMU */ + + btst #MMU_BASE + MMUB_68851,%d1 + jeq 3f + +1: /* 68851 or 68030 */ + + lea %pc@(.Lcopy),%a4 +2: addl #0x00000000,%a4 /* virt_to_phys() */ + + .section ".m68k_fixup","aw" + .long M68K_FIXUP_MEMOFFSET, 2b+2 + .previous + + .chip 68030 + pmove %tc,%d0 /* Disable MMU */ + bclr #7,%d0 + pmove %d0,%tc + jmp %a4@ /* Jump to physical .Lcopy */ + .chip 68k + +3: + btst #MMU_BASE + MMUB_68030,%d1 + jne 1b + + btst #MMU_BASE + MMUB_68040,%d1 + jeq 6f + +4: /* 68040 or 68060 */ + + lea %pc@(.Lcont040),%a4 +5: addl #0x00000000,%a4 /* virt_to_phys() */ + + .section ".m68k_fixup","aw" + .long M68K_FIXUP_MEMOFFSET, 5b+2 + .previous + + movel %a4,%d0 + andl #0xff000000,%d0 + orw #0xe020,%d0 /* Map 16 MiB, enable, cacheable */ + .chip 68040 + movec %d0,%itt0 + movec %d0,%dtt0 + .chip 68k + jmp %a4@ /* Jump to physical .Lcont040 */ + +.Lcont040: + moveq #0,%d0 + .chip 68040 + movec %d0,%tc /* Disable MMU */ + movec %d0,%itt0 + movec %d0,%itt1 + movec %d0,%dtt0 + movec %d0,%dtt1 + .chip 68k + jra .Lcopy + +6: + btst #MMU_BASE + MMUB_68060,%d1 + jne 4b + +.Lcopy: + movel %a0@+,%d0 /* d0 = entry = *ptr */ + jeq .Lflush + + btst #2,%d0 /* entry & IND_DONE? */ + jne .Lflush + + btst #1,%d0 /* entry & IND_INDIRECTION? */ + jeq 1f + andw %d2,%d0 + movel %d0,%a0 /* ptr = entry & PAGE_MASK */ + jra .Lcopy + +1: + btst #0,%d0 /* entry & IND_DESTINATION? */ + jeq 2f + andw %d2,%d0 + movel %d0,%a2 /* a2 = dst = entry & PAGE_MASK */ + jra .Lcopy + +2: + btst #3,%d0 /* entry & IND_SOURCE? */ + jeq .Lcopy + + andw %d2,%d0 + movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */ + movew #PAGE_SIZE/32 - 1,%d0 /* d0 = PAGE_SIZE/32 - 1 */ +3: + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + dbf %d0, 3b + jra .Lcopy + +.Lflush: + /* Flush all caches */ + + btst #CPUB_68020,%d1 + jeq 2f + +1: /* 68020 or 68030 */ + .chip 68030 + movec %cacr,%d0 + orw #0x808,%d0 + movec %d0,%cacr + .chip 68k + jra .Lreincarnate + +2: + btst #CPUB_68030,%d1 + jne 1b + + btst #CPUB_68040,%d1 + jeq 4f + +3: /* 68040 or 68060 */ + .chip 68040 + nop + cpusha %bc + nop + cinva %bc + nop + .chip 68k + jra .Lreincarnate + +4: + btst #CPUB_68060,%d1 + jne 3b + +.Lreincarnate: + jmp %a1@ + +relocate_new_kernel_end: + +ENTRY(relocate_new_kernel_size) + .long relocate_new_kernel_end - relocate_new_kernel diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index e67e53159573a8..5b8ec4d5f8e8d7 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -71,12 +72,12 @@ EXPORT_SYMBOL(m68k_num_memory); int m68k_realnum_memory; EXPORT_SYMBOL(m68k_realnum_memory); unsigned long m68k_memoffset; -struct mem_info m68k_memory[NUM_MEMINFO]; +struct m68k_mem_info m68k_memory[NUM_MEMINFO]; EXPORT_SYMBOL(m68k_memory); -struct mem_info m68k_ramdisk; +static struct m68k_mem_info m68k_ramdisk __initdata; -static char m68k_command_line[CL_SIZE]; +static char m68k_command_line[CL_SIZE] __initdata; void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL; /* machine dependent irq functions */ @@ -143,11 +144,16 @@ extern void paging_init(void); static void __init m68k_parse_bootinfo(const struct bi_record *record) { - while (record->tag != BI_LAST) { + uint16_t tag; + + save_bootinfo(record); + + while ((tag = be16_to_cpu(record->tag)) != BI_LAST) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; + uint16_t size = be16_to_cpu(record->size); - switch (record->tag) { + switch (tag) { case BI_MACHTYPE: case BI_CPUTYPE: case BI_FPUTYPE: @@ -157,20 +163,27 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) case BI_MEMCHUNK: if (m68k_num_memory < NUM_MEMINFO) { - m68k_memory[m68k_num_memory].addr = data[0]; - m68k_memory[m68k_num_memory].size = data[1]; + const struct mem_info *m = data; + m68k_memory[m68k_num_memory].addr = + be32_to_cpu(m->addr); + m68k_memory[m68k_num_memory].size = + be32_to_cpu(m->size); m68k_num_memory++; } else - printk("m68k_parse_bootinfo: too many memory chunks\n"); + pr_warn("%s: too many memory chunks\n", + __func__); break; case BI_RAMDISK: - m68k_ramdisk.addr = data[0]; - m68k_ramdisk.size = data[1]; + { + const struct mem_info *m = data; + m68k_ramdisk.addr = be32_to_cpu(m->addr); + m68k_ramdisk.size = be32_to_cpu(m->size); + } break; case BI_COMMAND_LINE: - strlcpy(m68k_command_line, (const char *)data, + strlcpy(m68k_command_line, data, sizeof(m68k_command_line)); break; @@ -197,17 +210,16 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) unknown = 1; } if (unknown) - printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n", - record->tag); - record = (struct bi_record *)((unsigned long)record + - record->size); + pr_warn("%s: unknown tag 0x%04x ignored\n", __func__, + tag); + record = (struct bi_record *)((unsigned long)record + size); } m68k_realnum_memory = m68k_num_memory; #ifdef CONFIG_SINGLE_MEMORY_CHUNK if (m68k_num_memory > 1) { - printk("Ignoring last %i chunks of physical memory\n", - (m68k_num_memory - 1)); + pr_warn("%s: ignoring last %i chunks of physical memory\n", + __func__, (m68k_num_memory - 1)); m68k_num_memory = 1; } #endif @@ -219,7 +231,7 @@ void __init setup_arch(char **cmdline_p) int i; #endif - /* The bootinfo is located right after the kernel bss */ + /* The bootinfo is located right after the kernel */ if (!CPU_IS_COLDFIRE) m68k_parse_bootinfo((const struct bi_record *)_end); @@ -247,7 +259,7 @@ void __init setup_arch(char **cmdline_p) asm (".chip 68060; movec %%pcr,%0; .chip 68k" : "=d" (pcr)); if (((pcr >> 8) & 0xff) <= 5) { - printk("Enabling workaround for errata I14\n"); + pr_warn("Enabling workaround for errata I14\n"); asm (".chip 68060; movec %0,%%pcr; .chip 68k" : : "d" (pcr | 0x20)); } @@ -336,12 +348,12 @@ void __init setup_arch(char **cmdline_p) panic("No configuration setup"); } + paging_init(); + #ifdef CONFIG_NATFEAT nf_init(); #endif - paging_init(); - #ifndef CONFIG_SUN3 for (i = 1; i < m68k_num_memory; i++) free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr, @@ -353,7 +365,7 @@ void __init setup_arch(char **cmdline_p) BOOTMEM_DEFAULT); initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); initrd_end = initrd_start + m68k_ramdisk.size; - printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end); + pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end); } #endif @@ -538,9 +550,9 @@ void check_bugs(void) { #ifndef CONFIG_M68KFPU_EMU if (m68k_fputype == 0) { - printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, " + pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, " "WHICH IS REQUIRED BY LINUX/M68K ***\n"); - printk(KERN_EMERG "Upgrade your hardware or join the FPU " + pr_emerg("Upgrade your hardware or join the FPU " "emulation project\n"); panic("no FPU"); } diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 7eb9792009f899..958f1adb9d0c42 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -28,6 +28,10 @@ #include #include + +unsigned long (*mach_random_get_entropy)(void); + + /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 88fcd8c70e7bed..6c9ca24830e982 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -133,9 +133,7 @@ static inline void access_error060 (struct frame *fp) { unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */ -#ifdef DEBUG - printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); -#endif + pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); if (fslw & MMU060_BPE) { /* branch prediction error -> clear branch cache */ @@ -162,9 +160,7 @@ static inline void access_error060 (struct frame *fp) } if (fslw & MMU060_W) errorcode |= 2; -#ifdef DEBUG - printk("errorcode = %d\n", errorcode ); -#endif + pr_debug("errorcode = %ld\n", errorcode); do_page_fault(&fp->ptregs, addr, errorcode); } else if (fslw & (MMU060_SEE)){ /* Software Emulation Error. @@ -173,8 +169,9 @@ static inline void access_error060 (struct frame *fp) send_fault_sig(&fp->ptregs); } else if (!(fslw & (MMU060_RE|MMU060_WE)) || send_fault_sig(&fp->ptregs) > 0) { - printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr); - printk( "68060 access error, fslw=%lx\n", fslw ); + pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, + fp->un.fmt4.effaddr); + pr_err("68060 access error, fslw=%lx\n", fslw); trap_c( fp ); } } @@ -225,9 +222,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba, set_fs(old_fs); -#ifdef DEBUG - printk("do_040writeback1, res=%d\n",res); -#endif + pr_debug("do_040writeback1, res=%d\n", res); return res; } @@ -249,7 +244,7 @@ static inline void do_040writebacks(struct frame *fp) int res = 0; #if 0 if (fp->un.fmt7.wb1s & WBV_040) - printk("access_error040: cannot handle 1st writeback. oops.\n"); + pr_err("access_error040: cannot handle 1st writeback. oops.\n"); #endif if ((fp->un.fmt7.wb2s & WBV_040) && @@ -302,14 +297,12 @@ static inline void access_error040(struct frame *fp) unsigned short ssw = fp->un.fmt7.ssw; unsigned long mmusr; -#ifdef DEBUG - printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); - printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, + pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); + pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, fp->un.fmt7.wb2s, fp->un.fmt7.wb3s); - printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", + pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", fp->un.fmt7.wb2a, fp->un.fmt7.wb3a, fp->un.fmt7.wb2d, fp->un.fmt7.wb3d); -#endif if (ssw & ATC_040) { unsigned long addr = fp->un.fmt7.faddr; @@ -324,9 +317,7 @@ static inline void access_error040(struct frame *fp) /* MMU error, get the MMUSR info for this access */ mmusr = probe040(!(ssw & RW_040), addr, ssw); -#ifdef DEBUG - printk("mmusr = %lx\n", mmusr); -#endif + pr_debug("mmusr = %lx\n", mmusr); errorcode = 1; if (!(mmusr & MMU_R_040)) { /* clear the invalid atc entry */ @@ -340,14 +331,10 @@ static inline void access_error040(struct frame *fp) errorcode |= 2; if (do_page_fault(&fp->ptregs, addr, errorcode)) { -#ifdef DEBUG - printk("do_page_fault() !=0\n"); -#endif + pr_debug("do_page_fault() !=0\n"); if (user_mode(&fp->ptregs)){ /* delay writebacks after signal delivery */ -#ifdef DEBUG - printk(".. was usermode - return\n"); -#endif + pr_debug(".. was usermode - return\n"); return; } /* disable writeback into user space from kernel @@ -355,9 +342,7 @@ static inline void access_error040(struct frame *fp) * the writeback won't do good) */ disable_wb: -#ifdef DEBUG - printk(".. disabling wb2\n"); -#endif + pr_debug(".. disabling wb2\n"); if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr) fp->un.fmt7.wb2s &= ~WBV_040; if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr) @@ -371,7 +356,7 @@ static inline void access_error040(struct frame *fp) current->thread.signo = SIGBUS; current->thread.faddr = fp->un.fmt7.faddr; if (send_fault_sig(&fp->ptregs) >= 0) - printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, + pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, fp->un.fmt7.faddr); goto disable_wb; } @@ -394,19 +379,17 @@ static inline void bus_error030 (struct frame *fp) unsigned short ssw = fp->un.fmtb.ssw; extern unsigned long _sun3_map_test_start, _sun3_map_test_end; -#ifdef DEBUG if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_debug("Instruction fault at %#010lx\n", ssw & FC ? fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 : fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); -#endif /* * Check if this page should be demand-mapped. This needs to go before @@ -429,7 +412,7 @@ static inline void bus_error030 (struct frame *fp) return; /* instruction fault or kernel data fault! */ if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); if (ssw & DF) { /* was this fault incurred testing bus mappings? */ @@ -439,12 +422,12 @@ static inline void bus_error030 (struct frame *fp) return; } - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); } - printk ("BAD KERNEL BUSERR\n"); + pr_err("BAD KERNEL BUSERR\n"); die_if_kernel("Oops", &fp->ptregs,0); force_sig(SIGKILL, current); @@ -473,12 +456,11 @@ static inline void bus_error030 (struct frame *fp) else if (buserr_type & SUN3_BUSERR_INVALID) errorcode = 0x00; else { -#ifdef DEBUG - printk ("*** unexpected busfault type=%#04x\n", buserr_type); - printk ("invalid %s access at %#lx from pc %#lx\n", - !(ssw & RW) ? "write" : "read", addr, - fp->ptregs.pc); -#endif + pr_debug("*** unexpected busfault type=%#04x\n", + buserr_type); + pr_debug("invalid %s access at %#lx from pc %#lx\n", + !(ssw & RW) ? "write" : "read", addr, + fp->ptregs.pc); die_if_kernel ("Oops", &fp->ptregs, buserr_type); force_sig (SIGBUS, current); return; @@ -509,9 +491,7 @@ static inline void bus_error030 (struct frame *fp) if (!mmu_emu_handle_fault(addr, 1, 0)) do_page_fault (&fp->ptregs, addr, 0); } else { -#ifdef DEBUG - printk ("protection fault on insn access (segv).\n"); -#endif + pr_debug("protection fault on insn access (segv).\n"); force_sig (SIGSEGV, current); } } @@ -525,22 +505,22 @@ static inline void bus_error030 (struct frame *fp) unsigned short ssw = fp->un.fmtb.ssw; #ifdef DEBUG unsigned long desc; +#endif - printk ("pid = %x ", current->pid); - printk ("SSW=%#06x ", ssw); + pr_debug("pid = %x ", current->pid); + pr_debug("SSW=%#06x ", ssw); if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_debug("Instruction fault at %#010lx\n", ssw & FC ? fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 : fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); -#endif /* ++andreas: If a data fault and an instruction fault happen at the same time map in both pages. */ @@ -554,27 +534,23 @@ static inline void bus_error030 (struct frame *fp) "pmove %%psr,%1" : "=a&" (desc), "=m" (temp) : "a" (addr), "d" (ssw)); + pr_debug("mmusr is %#x for addr %#lx in task %p\n", + temp, addr, current); + pr_debug("descriptor address is 0x%p, contents %#lx\n", + __va(desc), *(unsigned long *)__va(desc)); #else asm volatile ("ptestr %2,%1@,#7\n\t" "pmove %%psr,%0" : "=m" (temp) : "a" (addr), "d" (ssw)); #endif mmusr = temp; - -#ifdef DEBUG - printk("mmusr is %#x for addr %#lx in task %p\n", - mmusr, addr, current); - printk("descriptor address is %#lx, contents %#lx\n", - __va(desc), *(unsigned long *)__va(desc)); -#endif - errorcode = (mmusr & MMU_I) ? 0 : 1; if (!(ssw & RW) || (ssw & RM)) errorcode |= 2; if (mmusr & (MMU_I | MMU_WP)) { if (ssw & 4) { - printk("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); @@ -587,9 +563,10 @@ static inline void bus_error030 (struct frame *fp) } else if (!(mmusr & MMU_I)) { /* probably a 020 cas fault */ if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0) - printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr); + pr_err("unexpected bus error (%#x,%#x)\n", ssw, + mmusr); } else if (mmusr & (MMU_B|MMU_L|MMU_S)) { - printk("invalid %s access at %#lx from pc %#lx\n", + pr_err("invalid %s access at %#lx from pc %#lx\n", !(ssw & RW) ? "write" : "read", addr, fp->ptregs.pc); die_if_kernel("Oops",&fp->ptregs,mmusr); @@ -600,7 +577,7 @@ static inline void bus_error030 (struct frame *fp) static volatile long tlong; #endif - printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", + pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", !(ssw & RW) ? "write" : "read", addr, fp->ptregs.pc, ssw); asm volatile ("ptestr #1,%1@,#0\n\t" @@ -609,18 +586,16 @@ static inline void bus_error030 (struct frame *fp) : "a" (addr)); mmusr = temp; - printk ("level 0 mmusr is %#x\n", mmusr); + pr_err("level 0 mmusr is %#x\n", mmusr); #if 0 asm volatile ("pmove %%tt0,%0" : "=m" (tlong)); - printk("tt0 is %#lx, ", tlong); + pr_debug("tt0 is %#lx, ", tlong); asm volatile ("pmove %%tt1,%0" : "=m" (tlong)); - printk("tt1 is %#lx\n", tlong); -#endif -#ifdef DEBUG - printk("Unknown SIGSEGV - 1\n"); + pr_debug("tt1 is %#lx\n", tlong); #endif + pr_debug("Unknown SIGSEGV - 1\n"); die_if_kernel("Oops",&fp->ptregs,mmusr); force_sig(SIGSEGV, current); return; @@ -641,10 +616,9 @@ static inline void bus_error030 (struct frame *fp) return; if (fp->ptregs.sr & PS_S) { - printk("Instruction fault at %#010lx\n", - fp->ptregs.pc); + pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); buserr: - printk ("BAD KERNEL BUSERR\n"); + pr_err("BAD KERNEL BUSERR\n"); die_if_kernel("Oops",&fp->ptregs,0); force_sig(SIGKILL, current); return; @@ -668,28 +642,22 @@ static inline void bus_error030 (struct frame *fp) "pmove %%psr,%1" : "=a&" (desc), "=m" (temp) : "a" (addr)); + pr_debug("mmusr is %#x for addr %#lx in task %p\n", + temp, addr, current); + pr_debug("descriptor address is 0x%p, contents %#lx\n", + __va(desc), *(unsigned long *)__va(desc)); #else asm volatile ("ptestr #1,%1@,#7\n\t" "pmove %%psr,%0" : "=m" (temp) : "a" (addr)); #endif mmusr = temp; - -#ifdef DEBUG - printk ("mmusr is %#x for addr %#lx in task %p\n", - mmusr, addr, current); - printk ("descriptor address is %#lx, contents %#lx\n", - __va(desc), *(unsigned long *)__va(desc)); -#endif - if (mmusr & MMU_I) do_page_fault (&fp->ptregs, addr, 0); else if (mmusr & (MMU_B|MMU_L|MMU_S)) { - printk ("invalid insn access at %#lx from pc %#lx\n", + pr_err("invalid insn access at %#lx from pc %#lx\n", addr, fp->ptregs.pc); -#ifdef DEBUG - printk("Unknown SIGSEGV - 2\n"); -#endif + pr_debug("Unknown SIGSEGV - 2\n"); die_if_kernel("Oops",&fp->ptregs,mmusr); force_sig(SIGSEGV, current); return; @@ -791,9 +759,7 @@ asmlinkage void buserr_c(struct frame *fp) if (user_mode(&fp->ptregs)) current->thread.esp0 = (unsigned long) fp; -#ifdef DEBUG - printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format); -#endif + pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format); #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) if (CPU_IS_COLDFIRE) { @@ -836,9 +802,7 @@ asmlinkage void buserr_c(struct frame *fp) #endif default: die_if_kernel("bad frame format",&fp->ptregs,0); -#ifdef DEBUG - printk("Unknown SIGSEGV - 4\n"); -#endif + pr_debug("Unknown SIGSEGV - 4\n"); force_sig(SIGSEGV, current); } } @@ -852,7 +816,7 @@ void show_trace(unsigned long *stack) unsigned long addr; int i; - printk("Call Trace:"); + pr_info("Call Trace:"); addr = (unsigned long)stack + THREAD_SIZE - 1; endstack = (unsigned long *)(addr & -THREAD_SIZE); i = 0; @@ -869,13 +833,13 @@ void show_trace(unsigned long *stack) if (__kernel_text_address(addr)) { #ifndef CONFIG_KALLSYMS if (i % 5 == 0) - printk("\n "); + pr_cont("\n "); #endif - printk(" [<%08lx>] %pS\n", addr, (void *)addr); + pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr); i++; } } - printk("\n"); + pr_cont("\n"); } void show_registers(struct pt_regs *regs) @@ -887,81 +851,87 @@ void show_registers(struct pt_regs *regs) int i; print_modules(); - printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); - printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); - printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", + pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); + pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); + pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", regs->d0, regs->d1, regs->d2, regs->d3); - printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", + pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", regs->d4, regs->d5, regs->a0, regs->a1); - printk("Process %s (pid: %d, task=%p)\n", + pr_info("Process %s (pid: %d, task=%p)\n", current->comm, task_pid_nr(current), current); addr = (unsigned long)&fp->un; - printk("Frame format=%X ", regs->format); + pr_info("Frame format=%X ", regs->format); switch (regs->format) { case 0x2: - printk("instr addr=%08lx\n", fp->un.fmt2.iaddr); + pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr); addr += sizeof(fp->un.fmt2); break; case 0x3: - printk("eff addr=%08lx\n", fp->un.fmt3.effaddr); + pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr); addr += sizeof(fp->un.fmt3); break; case 0x4: - printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n" - : "eff addr=%08lx pc=%08lx\n"), - fp->un.fmt4.effaddr, fp->un.fmt4.pc); + if (CPU_IS_060) + pr_cont("fault addr=%08lx fslw=%08lx\n", + fp->un.fmt4.effaddr, fp->un.fmt4.pc); + else + pr_cont("eff addr=%08lx pc=%08lx\n", + fp->un.fmt4.effaddr, fp->un.fmt4.pc); addr += sizeof(fp->un.fmt4); break; case 0x7: - printk("eff addr=%08lx ssw=%04x faddr=%08lx\n", + pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n", fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr); - printk("wb 1 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0); - printk("wb 2 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d); - printk("wb 3 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d); - printk("push data: %08lx %08lx %08lx %08lx\n", + pr_info("push data: %08lx %08lx %08lx %08lx\n", fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2, fp->un.fmt7.pd3); addr += sizeof(fp->un.fmt7); break; case 0x9: - printk("instr addr=%08lx\n", fp->un.fmt9.iaddr); + pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr); addr += sizeof(fp->un.fmt9); break; case 0xa: - printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", + pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb, fp->un.fmta.daddr, fp->un.fmta.dobuf); addr += sizeof(fp->un.fmta); break; case 0xb: - printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", + pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb, fp->un.fmtb.daddr, fp->un.fmtb.dobuf); - printk("baddr=%08lx dibuf=%08lx ver=%x\n", + pr_info("baddr=%08lx dibuf=%08lx ver=%x\n", fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver); addr += sizeof(fp->un.fmtb); break; default: - printk("\n"); + pr_cont("\n"); } show_stack(NULL, (unsigned long *)addr); - printk("Code:"); + pr_info("Code:"); set_fs(KERNEL_DS); cp = (u16 *)regs->pc; for (i = -8; i < 16; i++) { if (get_user(c, cp + i) && i >= 0) { - printk(" Bad PC value."); + pr_cont(" Bad PC value."); break; } - printk(i ? " %04x" : " <%04x>", c); + if (i) + pr_cont(" %04x", c); + else + pr_cont(" <%04x>", c); } set_fs(old_fs); - printk ("\n"); + pr_cont("\n"); } void show_stack(struct task_struct *task, unsigned long *stack) @@ -978,16 +948,16 @@ void show_stack(struct task_struct *task, unsigned long *stack) } endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); - printk("Stack from %08lx:", (unsigned long)stack); + pr_info("Stack from %08lx:", (unsigned long)stack); p = stack; for (i = 0; i < kstack_depth_to_print; i++) { if (p + 1 > endstack) break; if (i % 8 == 0) - printk("\n "); - printk(" %08lx", *p++); + pr_cont("\n "); + pr_cont(" %08lx", *p++); } - printk("\n"); + pr_cont("\n"); show_trace(stack); } @@ -1005,32 +975,32 @@ void bad_super_trap (struct frame *fp) console_verbose(); if (vector < ARRAY_SIZE(vec_names)) - printk ("*** %s *** FORMAT=%X\n", + pr_err("*** %s *** FORMAT=%X\n", vec_names[vector], fp->ptregs.format); else - printk ("*** Exception %d *** FORMAT=%X\n", + pr_err("*** Exception %d *** FORMAT=%X\n", vector, fp->ptregs.format); if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) { unsigned short ssw = fp->un.fmtb.ssw; - printk ("SSW=%#06x ", ssw); + pr_err("SSW=%#06x ", ssw); if (ssw & RC) - printk ("Pipe stage C instruction fault at %#010lx\n", + pr_err("Pipe stage C instruction fault at %#010lx\n", (fp->ptregs.format) == 0xA ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2); if (ssw & RB) - printk ("Pipe stage B instruction fault at %#010lx\n", + pr_err("Pipe stage B instruction fault at %#010lx\n", (fp->ptregs.format) == 0xA ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); } - printk ("Current process id is %d\n", task_pid_nr(current)); + pr_err("Current process id is %d\n", task_pid_nr(current)); die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); } @@ -1162,7 +1132,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr) return; console_verbose(); - printk("%s: %08x\n",str,nr); + pr_crit("%s: %08x\n", str, nr); show_registers(fp); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); do_exit(SIGSEGV); diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index afb95d5fb26b3b..982c3fe73c4a45 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -26,9 +26,10 @@ #include #include -#define BOOTINFO_COMPAT_1_0 #include #include +#include +#include #include #include @@ -107,45 +108,46 @@ static void __init mac_sched_init(irq_handler_t vector) int __init mac_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const u_long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_MAC_MODEL: - mac_bi_data.id = *data; + mac_bi_data.id = be32_to_cpup(data); break; case BI_MAC_VADDR: - mac_bi_data.videoaddr = *data; + mac_bi_data.videoaddr = be32_to_cpup(data); break; case BI_MAC_VDEPTH: - mac_bi_data.videodepth = *data; + mac_bi_data.videodepth = be32_to_cpup(data); break; case BI_MAC_VROW: - mac_bi_data.videorow = *data; + mac_bi_data.videorow = be32_to_cpup(data); break; case BI_MAC_VDIM: - mac_bi_data.dimensions = *data; + mac_bi_data.dimensions = be32_to_cpup(data); break; case BI_MAC_VLOGICAL: - mac_bi_data.videological = VIDEOMEMBASE + (*data & ~VIDEOMEMMASK); - mac_orig_videoaddr = *data; + mac_orig_videoaddr = be32_to_cpup(data); + mac_bi_data.videological = + VIDEOMEMBASE + (mac_orig_videoaddr & ~VIDEOMEMMASK); break; case BI_MAC_SCCBASE: - mac_bi_data.sccbase = *data; + mac_bi_data.sccbase = be32_to_cpup(data); break; case BI_MAC_BTIME: - mac_bi_data.boottime = *data; + mac_bi_data.boottime = be32_to_cpup(data); break; case BI_MAC_GMTBIAS: - mac_bi_data.gmtbias = *data; + mac_bi_data.gmtbias = be32_to_cpup(data); break; case BI_MAC_MEMSIZE: - mac_bi_data.memsize = *data; + mac_bi_data.memsize = be32_to_cpup(data); break; case BI_MAC_CPUID: - mac_bi_data.cpuid = *data; + mac_bi_data.cpuid = be32_to_cpup(data); break; case BI_MAC_ROMBASE: - mac_bi_data.rombase = *data; + mac_bi_data.rombase = be32_to_cpup(data); break; default: unknown = 1; diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 7d8d46127ad9fc..4d2adfb32a2ab4 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -111,16 +111,15 @@ #include #include -#include #include #include #include /*#define DEBUG_IOP*/ -/* Set to non-zero if the IOPs are present. Set by iop_init() */ +/* Non-zero if the IOPs are present */ -int iop_scc_present,iop_ism_present; +int iop_scc_present, iop_ism_present; /* structure for tracking channel listeners */ diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 5e085554ac7f2b..707b61aea2030c 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -25,8 +25,6 @@ #include #include -#define BOOTINFO_COMPAT_1_0 -#include #include /* Offset between Unix time (1970-based) and Mac time (1904-based) */ diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index 6c4c882c126e82..54037125ebf8c0 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index 6f026fc302fab3..835fa04511c85a 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include @@ -54,7 +53,7 @@ static void psc_debug_dump(void) * expanded to cover what I think are the other 7 channels. */ -static void psc_dma_die_die_die(void) +static __init void psc_dma_die_die_die(void) { int i; diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 5d1458bb871b8b..e198dec868e472 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -30,7 +30,6 @@ #include #include -#include #include #include #include diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index eb1d61f6872549..2bd7487440c455 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -25,9 +25,8 @@ int send_fault_sig(struct pt_regs *regs) siginfo.si_signo = current->thread.signo; siginfo.si_code = current->thread.code; siginfo.si_addr = (void *)current->thread.faddr; -#ifdef DEBUG - printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code); -#endif + pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, + siginfo.si_signo, siginfo.si_code); if (user_mode(regs)) { force_sig_info(siginfo.si_signo, @@ -45,10 +44,10 @@ int send_fault_sig(struct pt_regs *regs) * terminate things with extreme prejudice. */ if ((unsigned long)siginfo.si_addr < PAGE_SIZE) - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + pr_alert("Unable to handle kernel NULL pointer dereference"); else - printk(KERN_ALERT "Unable to handle kernel access"); - printk(" at virtual address %p\n", siginfo.si_addr); + pr_alert("Unable to handle kernel access"); + pr_cont(" at virtual address %p\n", siginfo.si_addr); die_if_kernel("Oops", regs, 0 /*error_code*/); do_exit(SIGKILL); } @@ -75,11 +74,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; -#ifdef DEBUG - printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", - regs->sr, regs->pc, address, error_code, - current->mm->pgd); -#endif + pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", + regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); /* * If we're in an interrupt or have no user @@ -118,9 +114,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * we can handle it.. */ good_area: -#ifdef DEBUG - printk("do_page_fault: good_area\n"); -#endif + pr_debug("do_page_fault: good_area\n"); switch (error_code & 3) { default: /* 3: write, present */ /* fall through */ @@ -143,9 +137,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, */ fault = handle_mm_fault(mm, vma, address, flags); -#ifdef DEBUG - printk("handle_mm_fault returns %d\n",fault); -#endif + pr_debug("handle_mm_fault returns %d\n", fault); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 6b4baa6e4d31d8..acaff6a49e357e 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -59,7 +59,7 @@ EXPORT_SYMBOL(pg_data_table); void __init m68k_setup_node(int node) { #ifndef CONFIG_SINGLE_MEMORY_CHUNK - struct mem_info *info = m68k_memory + node; + struct m68k_mem_info *info = m68k_memory + node; int i, end; i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 568cfad3ceb8da..6e4955bc542bfc 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -27,9 +27,9 @@ /* * For 040/060 we can use the virtual memory area like other architectures, - * but for 020/030 we want to use early termination page descriptor and we + * but for 020/030 we want to use early termination page descriptors and we * can't mix this with normal page descriptors, so we have to copy that code - * (mm/vmalloc.c) and return appriorate aligned addresses. + * (mm/vmalloc.c) and return appropriately aligned addresses. */ #ifdef CPU_M68040_OR_M68060_ONLY @@ -224,7 +224,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla EXPORT_SYMBOL(__ioremap); /* - * Unmap a ioremap()ed region again + * Unmap an ioremap()ed region again */ void iounmap(void __iomem *addr) { @@ -241,8 +241,8 @@ EXPORT_SYMBOL(iounmap); /* * __iounmap unmaps nearly everything, so be careful - * it doesn't free currently pointer/page tables anymore but it - * wans't used anyway and might be added later. + * Currently it doesn't free pointer/page tables anymore but this + * wasn't used anyway and might be added later. */ void __iounmap(void *addr, unsigned long size) { diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 251c5437787be6..7d4024432163ff 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -233,7 +233,7 @@ void __init paging_init(void) printk("Fix your bootloader or use a memfile to make use of this area!\n"); m68k_num_memory--; memmove(m68k_memory + i, m68k_memory + i + 1, - (m68k_num_memory - i) * sizeof(struct mem_info)); + (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue; } addr = m68k_memory[i].addr + m68k_memory[i].size; diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 1c6262803b9455..1bb3ce6634d366 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -26,6 +26,8 @@ #include #include +#include +#include #include #include #include @@ -51,9 +53,10 @@ static int bcd2int (unsigned char b); irq_handler_t tick_handler; -int mvme147_parse_bootinfo(const struct bi_record *bi) +int __init mvme147_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) + uint16_t tag = be16_to_cpu(bi->tag); + if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO) return 0; else return 1; diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 080a342458a193..eab7d342757ef6 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -29,6 +29,8 @@ #include #include +#include +#include #include #include #include @@ -60,9 +62,10 @@ unsigned short mvme16x_config; EXPORT_SYMBOL(mvme16x_config); -int mvme16x_parse_bootinfo(const struct bi_record *bi) +int __init mvme16x_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) + uint16_t tag = be16_to_cpu(bi->tag); + if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO) return 0; else return 1; @@ -87,15 +90,15 @@ static void mvme16x_get_model(char *model) suf[3] = '\0'; suf[0] = suf[1] ? '-' : '\0'; - sprintf(model, "Motorola MVME%x%s", p->brdno, suf); + sprintf(model, "Motorola MVME%x%s", be16_to_cpu(p->brdno), suf); } static void mvme16x_get_hardware_list(struct seq_file *m) { - p_bdid p = &mvme_bdid; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); - if (p->brdno == 0x0162 || p->brdno == 0x0172) + if (brdno == 0x0162 || brdno == 0x0172) { unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; @@ -285,6 +288,7 @@ void __init config_mvme16x(void) { p_bdid p = &mvme_bdid; char id[40]; + uint16_t brdno = be16_to_cpu(p->brdno); mach_max_dma_address = 0xffffffff; mach_sched_init = mvme16x_sched_init; @@ -306,18 +310,18 @@ void __init config_mvme16x(void) } /* Board type is only set by newer versions of vmelilo/tftplilo */ if (vme_brdtype == 0) - vme_brdtype = p->brdno; + vme_brdtype = brdno; mvme16x_get_model(id); printk ("\nBRD_ID: %s BUG %x.%x %02x/%02x/%02x\n", id, p->rev>>4, p->rev&0xf, p->yr, p->mth, p->day); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) { unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; mvme16x_config = rev | MVME16x_CONFIG_GOT_SCCA; - printk ("MVME%x Hardware status:\n", p->brdno); + printk ("MVME%x Hardware status:\n", brdno); printk (" CPU Type 68%s040\n", rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC"); printk (" CPU clock %dMHz\n", @@ -347,12 +351,12 @@ void __init config_mvme16x(void) static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) { - p_bdid p = &mvme_bdid; unsigned long *new = (unsigned long *)vectors; unsigned long *old = (unsigned long *)0xffe00000; volatile unsigned char uc, *ucp; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) { ucp = (volatile unsigned char *)0xfff42043; uc = *ucp | 8; @@ -366,7 +370,7 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) *(new+9) = *(old+9); /* Trace */ *(new+47) = *(old+47); /* Trap #15 */ - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) *(new+0x5e) = *(old+0x5e); /* ABORT switch */ else *(new+0x6e) = *(old+0x6e); /* ABORT switch */ @@ -381,7 +385,7 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id) void mvme16x_sched_init (irq_handler_t timer_routine) { - p_bdid p = &mvme_bdid; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); int irq; tick_handler = timer_routine; @@ -394,7 +398,7 @@ void mvme16x_sched_init (irq_handler_t timer_routine) "timer", mvme16x_timer_int)) panic ("Couldn't register timer int"); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) irq = MVME162_IRQ_ABORT; else irq = MVME167_IRQ_ABORT; diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index 078bb744b5fe11..e90fe903613ead 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -154,7 +154,7 @@ static unsigned int serports[] = 0x3f8,0x2f8,0x3e8,0x2e8,0 }; -static void q40_disable_irqs(void) +static void __init q40_disable_irqs(void) { unsigned i, j; @@ -198,7 +198,7 @@ void __init config_q40(void) } -int q40_parse_bootinfo(const struct bi_record *rec) +int __init q40_parse_bootinfo(const struct bi_record *rec) { return 1; } diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c index d522eaab45510a..d95506e06c2ac4 100644 --- a/arch/m68k/sun3/dvma.c +++ b/arch/m68k/sun3/dvma.c @@ -7,6 +7,7 @@ * */ +#include #include #include #include @@ -62,10 +63,7 @@ int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, } -void sun3_dvma_init(void) +void __init sun3_dvma_init(void) { - memset(ptelist, 0, sizeof(ptelist)); - - } diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index 8edc510a21be66..3f258e230ba5d0 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -6,6 +6,7 @@ ** Started 1/16/98 @ 2:22 am */ +#include #include #include #include @@ -122,7 +123,7 @@ void print_pte_vaddr (unsigned long vaddr) /* * Initialise the MMU emulator. */ -void mmu_emu_init(unsigned long bootmem_end) +void __init mmu_emu_init(unsigned long bootmem_end) { unsigned long seg, num; int i,j; diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c index cab54482ca34b9..b37521a5259ddb 100644 --- a/arch/m68k/sun3/sun3dvma.c +++ b/arch/m68k/sun3/sun3dvma.c @@ -6,6 +6,8 @@ * Contains common routines for sun3/sun3x DVMA management. */ +#include +#include #include #include #include @@ -30,7 +32,7 @@ static inline void dvma_unmap_iommu(unsigned long a, int b) extern void sun3_dvma_init(void); #endif -static unsigned long iommu_use[IOMMU_TOTAL_ENTRIES]; +static unsigned long *iommu_use; #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) @@ -245,7 +247,7 @@ static inline int free_baddr(unsigned long baddr) } -void dvma_init(void) +void __init dvma_init(void) { struct hole *hole; @@ -265,7 +267,7 @@ void dvma_init(void) list_add(&(hole->list), &hole_list); - memset(iommu_use, 0, sizeof(iommu_use)); + iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long)); dvma_unmap_iommu(DVMA_START, DVMA_SIZE); diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c index a7b7e818d62794..0898c3f8150851 100644 --- a/arch/m68k/sun3x/prom.c +++ b/arch/m68k/sun3x/prom.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index c90bfc6bf64892..5d6b4b407ddab2 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -82,4 +82,19 @@ static inline void fence(void) #define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #endif /* _ASM_METAG_BARRIER_H */ diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h index e0373f81a1172f..1d7e770f7a54bf 100644 --- a/arch/metag/include/asm/smp.h +++ b/arch/metag/include/asm/smp.h @@ -7,13 +7,11 @@ enum ipi_msg_type { IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_RESCHEDULE, }; extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask asmlinkage void secondary_start_kernel(void); diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index db589ad5dbc41e..c700d625067a96 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -399,11 +399,6 @@ static int __init dma_alloc_init(void) pgd = pgd_offset(&init_mm, CONSISTENT_START); pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); - if (!pmd) { - pr_err("%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } WARN_ON(!pmd_none(*pmd)); pte = pte_alloc_kernel(pmd, CONSISTENT_START); diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index 7c01131429817a..f006d2276f40ab 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running); /* * "thread" is assumed to be a valid Meta hardware thread ID. */ -int boot_secondary(unsigned int thread, struct task_struct *idle) +static int boot_secondary(unsigned int thread, struct task_struct *idle) { u32 val; @@ -491,7 +491,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } void show_ipi_list(struct seq_file *p) @@ -517,11 +517,10 @@ static DEFINE_SPINLOCK(stop_lock); * * Bit 0 - Inter-processor function call */ -static int do_IPI(struct pt_regs *regs) +static int do_IPI(void) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); - struct pt_regs *old_regs = set_irq_regs(regs); unsigned long msgs, nextmsg; int handled = 0; @@ -546,10 +545,6 @@ static int do_IPI(struct pt_regs *regs) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - default: pr_crit("CPU%u: Unknown IPI message 0x%lx\n", cpu, nextmsg); @@ -557,8 +552,6 @@ static int do_IPI(struct pt_regs *regs) } } - set_irq_regs(old_regs); - return handled; } @@ -624,7 +617,7 @@ static void kick_raise_softirq(cpumask_t callmap, unsigned int irq) static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI, int *handled) { - *handled = do_IPI((struct pt_regs *)State.Sig.pCtx); + *handled = do_IPI(); return State; } diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c index bec3dec4922e68..4ba595701f7d8c 100644 --- a/arch/metag/kernel/topology.c +++ b/arch/metag/kernel/topology.c @@ -19,6 +19,7 @@ DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); cpumask_t cpu_core_map[NR_CPUS]; +EXPORT_SYMBOL(cpu_core_map); static cpumask_t cpu_coregroup_map(unsigned int cpu) { diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index ce0bbf8f5640e7..a82426589fffb2 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += trace_clock.h diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h deleted file mode 100644 index df5be3e87044ae..00000000000000 --- a/arch/microblaze/include/asm/barrier.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_MICROBLAZE_BARRIER_H -#define _ASM_MICROBLAZE_BARRIER_H - -#define nop() asm volatile ("nop") - -#define smp_read_barrier_depends() do {} while (0) -#define read_barrier_depends() do {} while (0) - -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() - -#endif /* _ASM_MICROBLAZE_BARRIER_H */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 650de3976e7a5a..c93d92beb3d62c 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -47,6 +47,7 @@ config MIPS select MODULES_USE_ELF_RELA if MODULES && 64BIT select CLONE_BACKWARDS select HAVE_DEBUG_STACKOVERFLOW + select HAVE_CC_STACKPROTECTOR menu "Machine selection" @@ -2322,19 +2323,6 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above. - config USE_OF bool select OF diff --git a/arch/mips/Makefile b/arch/mips/Makefile index de300b9936076f..efe50787cd897c 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -232,10 +232,6 @@ bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ LDFLAGS += -m $(ld-emul) -ifdef CONFIG_CC_STACKPROTECTOR - KBUILD_CFLAGS += -fstack-protector -endif - ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c index 9a357fffcfbe0a..820b7a313d9bfd 100644 --- a/arch/mips/ar7/setup.c +++ b/arch/mips/ar7/setup.c @@ -92,7 +92,6 @@ void __init plat_mem_setup(void) _machine_restart = ar7_machine_restart; _machine_halt = ar7_machine_halt; pm_power_off = ar7_machine_power_off; - panic_timeout = 3; io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000); if (!io_base) diff --git a/arch/mips/emma/markeins/setup.c b/arch/mips/emma/markeins/setup.c index d71005835c007a..9100122e5cef89 100644 --- a/arch/mips/emma/markeins/setup.c +++ b/arch/mips/emma/markeins/setup.c @@ -111,9 +111,6 @@ void __init plat_mem_setup(void) iomem_resource.start = EMMA2RH_IO_BASE; iomem_resource.end = EMMA2RH_ROM_BASE - 1; - /* Reboot on panic */ - panic_timeout = 180; - markeins_sio_setup(); } diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index f26d8e1bf3c375..e1aa4e4c298423 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -180,4 +180,19 @@ #define nudge_writes() mb() #endif +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h index c75025f27c201f..06b9bc7ea14b1d 100644 --- a/arch/mips/include/asm/cacheops.h +++ b/arch/mips/include/asm/cacheops.h @@ -83,6 +83,6 @@ /* * Loongson2-specific cacheops */ -#define Hit_Invalidate_I_Loongson23 0x00 +#define Hit_Invalidate_I_Loongson2 0x00 #endif /* __ASM_CACHEOPS_H */ diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 34d1a19171257f..c84caddb8bdede 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr) __iflush_prologue switch (boot_cpu_type()) { case CPU_LOONGSON2: - cache_op(Hit_Invalidate_I_Loongson23, addr); + cache_op(Hit_Invalidate_I_Loongson2, addr); break; default: @@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr) { switch (boot_cpu_type()) { case CPU_LOONGSON2: - protected_cache_op(Hit_Invalidate_I_Loongson23, addr); + protected_cache_op(Hit_Invalidate_I_Loongson2, addr); break; default: @@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr) "i" (op)); /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ -static inline void blast_##pfx##cache##lsize(void) \ +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ +static inline void extra##blast_##pfx##cache##lsize(void) \ { \ unsigned long start = INDEX_BASE; \ unsigned long end = start + current_cpu_data.desc.waysize; \ @@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \ __##pfx##flush_epilogue \ } \ \ -static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ +static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ { \ unsigned long start = page; \ unsigned long end = page + PAGE_SIZE; \ @@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ __##pfx##flush_epilogue \ } \ \ -static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ +static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ { \ unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ unsigned long start = INDEX_BASE + (page & indexmask); \ @@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) __##pfx##flush_epilogue \ } -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) - -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) + +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) /* build blast_xxx_range, protected_blast_xxx_range */ #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ @@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \ - protected_, loongson23_) +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ + protected_, loongson2_) __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) /* blast_inv_dcache_range */ diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 62ffd20ea86909..49e572d879e134 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void) r4k_blast_icache_page = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache_page = blast_icache16_page; + else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache_page = loongson2_blast_icache32_page; else if (ic_lsize == 32) r4k_blast_icache_page = blast_icache32_page; else if (ic_lsize == 64) @@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void) else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache_page_indexed = tx49_blast_icache32_page_indexed; + else if (current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache_page_indexed = + loongson2_blast_icache32_page_indexed; else r4k_blast_icache_page_indexed = blast_icache32_page_indexed; @@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void) r4k_blast_icache = blast_r4600_v1_icache32; else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache = tx49_blast_icache32; + else if (current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache = loongson2_blast_icache32; else r4k_blast_icache = blast_icache32; } else if (ic_lsize == 64) @@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo else { switch (boot_cpu_type()) { case CPU_LOONGSON2: - protected_blast_icache_range(start, end); + protected_loongson2_blast_icache_range(start, end); break; default: - protected_loongson23_blast_icache_range(start, end); + protected_blast_icache_range(start, end); break; } } diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index 6d981bb337ecd8..54e75c77184b88 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -92,7 +92,6 @@ static void __init xlp_init_mem_from_bars(void) void __init plat_mem_setup(void) { - panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 214d123b79faf7..921be5f7779770 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -92,7 +92,6 @@ static void nlm_linux_exit(void) void __init plat_mem_setup(void) { - panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 41707a245dea61..3462c831d0ea5f 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -134,8 +134,6 @@ void __init plat_mem_setup(void) #error invalid SiByte board configuration #endif - panic_timeout = 5; /* For debug. */ - board_be_handler = swarm_be_handler; if (xicor_probe()) diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 74742dc6a3daab..032143ec232451 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += trace_clock.h diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h deleted file mode 100644 index 2bd97a5c8af714..00000000000000 --- a/arch/mn10300/include/asm/barrier.h +++ /dev/null @@ -1,37 +0,0 @@ -/* MN10300 memory barrier definitions - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#ifndef _ASM_BARRIER_H -#define _ASM_BARRIER_H - -#define nop() asm volatile ("nop") - -#define mb() asm volatile ("": : :"memory") -#define rmb() mb() -#define wmb() asm volatile ("": : :"memory") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { xchg(&var, value); } while (0) -#else /* CONFIG_SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#endif /* CONFIG_SMP */ - -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#endif /* _ASM_BARRIER_H */ diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index a603b9ebe54ce3..34b0be4ca52d77 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h deleted file mode 100644 index e77d834aa803e0..00000000000000 --- a/arch/parisc/include/asm/barrier.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __PARISC_BARRIER_H -#define __PARISC_BARRIER_H - -/* -** This is simply the barrier() macro from linux/kernel.h but when serial.c -** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h -** hasn't yet been included yet so it fails, thus repeating the macro here. -** -** PA-RISC architecture allows for weakly ordered memory accesses although -** none of the processors use it. There is a strong ordered bit that is -** set in the O-bit of the page directory entry. Operating systems that -** can not tolerate out of order accesses should set this bit when mapping -** pages. The O-bit of the PSW should also be set to 1 (I don't believe any -** of the processor implemented the PSW O-bit). The PCX-W ERS states that -** the TLB O-bit is not implemented so the page directory does not need to -** have the O-bit set when mapping pages (section 3.1). This section also -** states that the PSW Y, Z, G, and O bits are not implemented. -** So it looks like nothing needs to be done for parisc-linux (yet). -** (thanks to chada for the above comment -ggg) -** -** The __asm__ op below simple prevents gcc/ld from reordering -** instructions across the mb() "call". -*/ -#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ -#define rmb() mb() -#define wmb() mb() -#define smp_mb() mb() -#define smp_rmb() mb() -#define smp_wmb() mb() -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#endif /* __PARISC_BARRIER_H */ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index f0e2784e7ccacd..2f9b751878ba86 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma void mark_rodata_ro(void); #endif -#ifdef CONFIG_PA8X00 -/* Only pa8800, pa8900 needs this */ - #include #define ARCH_HAS_KMAP -void kunmap_parisc(void *addr); - static inline void *kmap(struct page *page) { might_sleep(); + flush_dcache_page(page); return page_address(page); } static inline void kunmap(struct page *page) { - kunmap_parisc(page_address(page)); + flush_kernel_dcache_page_addr(page_address(page)); } static inline void *kmap_atomic(struct page *page) { pagefault_disable(); + flush_dcache_page(page); return page_address(page); } static inline void __kunmap_atomic(void *addr) { - kunmap_parisc(addr); + flush_kernel_dcache_page_addr(addr); pagefault_enable(); } #define kmap_atomic_prot(page, prot) kmap_atomic(page) #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) -#endif #endif /* _PARISC_CACHEFLUSH_H */ diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index b7adb2ac049c0e..c53fc63149e831 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -28,9 +28,8 @@ struct page; void clear_page_asm(void *page); void copy_page_asm(void *to, void *from); -void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg); +#define clear_user_page(vto, vaddr, page) clear_page_asm(vto) +#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom) /* #define CONFIG_PARISC_TMPALIAS */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index f33113a6141e75..70b3674dac4e39 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -75,6 +75,6 @@ #define SO_BUSY_POLL 0x4027 -#define SO_MAX_PACING_RATE 0x4048 +#define SO_MAX_PACING_RATE 0x4028 #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c035673209f732..a72545554a3154 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr) } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); -void clear_user_page(void *vto, unsigned long vaddr, struct page *page) -{ - clear_page_asm(vto); - if (!parisc_requires_coherency()) - flush_kernel_dcache_page_asm(vto); -} -EXPORT_SYMBOL(clear_user_page); - -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg) -{ - /* Copy using kernel mapping. No coherency is needed - (all in kmap/kunmap) on machines that don't support - non-equivalent aliasing. However, the `from' page - needs to be flushed before it can be accessed through - the kernel mapping. */ - preempt_disable(); - flush_dcache_page_asm(__pa(vfrom), vaddr); - preempt_enable(); - copy_page_asm(vto, vfrom); - if (!parisc_requires_coherency()) - flush_kernel_dcache_page_asm(vto); -} -EXPORT_SYMBOL(copy_user_page); - -#ifdef CONFIG_PA8X00 - -void kunmap_parisc(void *addr) -{ - if (parisc_requires_coherency()) - flush_kernel_dcache_page_addr(addr); -} -EXPORT_SYMBOL(kunmap_parisc); -#endif - void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { unsigned long flags; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b44b52c0a8f07d..b2be8e8cb5c714 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -147,6 +147,10 @@ config EARLY_PRINTK bool default y +config PANIC_TIMEOUT + int + default 180 + config COMPAT bool default y if PPC64 diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts index 4177b62240c244..a618dfc13e4c8f 100644 --- a/arch/powerpc/boot/dts/mpc5125twr.dts +++ b/arch/powerpc/boot/dts/mpc5125twr.dts @@ -58,7 +58,6 @@ compatible = "fsl,mpc5121-immr"; #address-cells = <1>; #size-cells = <1>; - #interrupt-cells = <2>; ranges = <0x0 0x80000000 0x400000>; reg = <0x80000000 0x400000>; bus-frequency = <66000000>; // 66 MHz ips bus @@ -189,6 +188,10 @@ reg = <0xA000 0x1000>; }; + // disable USB1 port + // TODO: + // correct pinmux config and fix USB3320 ulpi dependency + // before re-enabling it usb@3000 { compatible = "fsl,mpc5121-usb2-dr"; reg = <0x3000 0x400>; @@ -197,6 +200,7 @@ interrupts = <43 0x8>; dr_mode = "host"; phy_type = "ulpi"; + status = "disabled"; }; // 5125 PSCs are not 52xx or 5121 PSC compatible diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ae782254e731bb..f89da808ce310e 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -45,11 +45,15 @@ # define SMPWMB eieio #endif +#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") + #define smp_mb() mb() -#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_rmb() __lwsync() #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") #define smp_read_barrier_depends() read_barrier_depends() #else +#define __lwsync() barrier() + #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() @@ -65,4 +69,19 @@ #define data_barrier(x) \ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ + ___p1; \ +}) + #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 894662a5d4d5c5..243ce69ad685ff 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -284,7 +284,7 @@ do_kvm_##n: \ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ beq- 1f; \ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ -1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ +1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ blt+ cr1,3f; /* abort if it is */ \ li r1,(n); /* will be reloaded later */ \ sth r1,PACA_TRAP_SAVE(r13); \ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 703a8412dac28e..11ba86e176315e 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -26,6 +26,7 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void do_init_bootmem(void); void setup_panic(void); +#define ARCH_PANIC_TIMEOUT 180 #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 5f54a744dcc5e2..f6e78d63fb6acc 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,6 +28,8 @@ #include #include +#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ + #define arch_spin_is_locked(x) ((x)->slock != 0) #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c21374d..8296381ae43294 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h @@ -4,13 +4,18 @@ #ifdef __KERNEL__ /* - * The PowerPC can do unaligned accesses itself in big endian mode. + * The PowerPC can do unaligned accesses itself based on its endian mode. */ #include #include +#ifdef __LITTLE_ENDIAN__ +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le +#else #define get_unaligned __get_unaligned_be #define put_unaligned __put_unaligned_be +#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UNALIGNED_H */ diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 75c6ecdb8f3728..7422a999a39a5b 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t; struct arch_uprobe { union { - u8 insn[MAX_UINSN_BYTES]; - u8 ixol[MAX_UINSN_BYTES]; - u32 ainsn; + u32 insn; + u32 ixol; }; }; diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 2ae41aba40530f..4f0946de2d5c91 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1) * of the function that the cpu should jump to to continue * initialization. */ + .balign 8 .globl __secondary_hold_spinloop __secondary_hold_spinloop: .llong 0x0 @@ -470,6 +471,7 @@ _STATIC(__after_prom_start) mtctr r8 bctr +.balign 8 p_end: .llong _end - _stext 4: /* Now copy the rest of the kernel up to _end */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb64a6e1dc5186..078145acf7fb86 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void) /* Get the full OF pathname of the stdout device */ memset(path, 0, 256); call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); - stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); - val = cpu_to_be32(stdout_node); - prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", - &val, sizeof(val)); prom_printf("OF stdout device is: %s\n", of_stdout_device); prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", path, strlen(path) + 1); - /* If it's a display, note it */ - memset(type, 0, sizeof(type)); - prom_getprop(stdout_node, "device_type", type, sizeof(type)); - if (strcmp(type, "display") == 0) - prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); + /* instance-to-package fails on PA-Semi */ + stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); + if (stdout_node != PROM_ERROR) { + val = cpu_to_be32(stdout_node); + prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", + &val, sizeof(val)); + + /* If it's a display, note it */ + memset(type, 0, sizeof(type)); + prom_getprop(stdout_node, "device_type", type, sizeof(type)); + if (strcmp(type, "display") == 0) + prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); + } } static int __init prom_find_machine_type(void) diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index b903dc5cf944ae..2b0da27eaee424 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -296,9 +296,6 @@ void __init setup_arch(char **cmdline_p) if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) ucache_bsize = icache_bsize = dcache_bsize; - /* reboot on panic */ - panic_timeout = 180; - if (ppc_md.panic) setup_panic(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4085aaa9478fd9..856dd4e99bfe45 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -588,9 +588,6 @@ void __init setup_arch(char **cmdline_p) dcache_bsize = ppc64_caches.dline_size; icache_bsize = ppc64_caches.iline_size; - /* reboot on panic */ - panic_timeout = 180; - if (ppc_md.panic) setup_panic(); diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 59f419b935f250..003b20964ea0df 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) * emulate_step() returns 1 if the insn was successfully emulated. * For all other cases, we need to single-step in hardware. */ - ret = emulate_step(regs, auprobe->ainsn); + ret = emulate_step(regs, auprobe->insn); if (ret > 0) return true; diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a5901490018..596a285c07554d 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -9,6 +9,14 @@ #include #include +#ifdef __BIG_ENDIAN__ +#define sLd sld /* Shift towards low-numbered address. */ +#define sHd srd /* Shift towards high-numbered address. */ +#else +#define sLd srd /* Shift towards low-numbered address. */ +#define sHd sld /* Shift towards high-numbered address. */ +#endif + .align 7 _GLOBAL(__copy_tofrom_user) BEGIN_FTR_SECTION @@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ 25: ld r0,8(r4) - sld r6,r9,r10 + sLd r6,r9,r10 26: ldu r9,16(r4) - srd r7,r0,r11 - sld r8,r0,r10 + sHd r7,r0,r11 + sLd r8,r0,r10 or r7,r7,r6 blt cr6,79f 27: ld r0,8(r4) @@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ 29: ldu r9,8(r4) - sld r8,r0,r10 + sLd r8,r0,r10 addi r3,r3,-8 blt cr6,5f 30: ld r0,8(r4) - srd r12,r9,r11 - sld r6,r9,r10 + sHd r12,r9,r11 + sLd r6,r9,r10 31: ldu r9,16(r4) or r12,r8,r12 - srd r7,r0,r11 - sld r8,r0,r10 + sHd r7,r0,r11 + sLd r8,r0,r10 addi r3,r3,16 beq cr6,78f 1: or r7,r7,r6 32: ld r0,8(r4) 76: std r12,8(r3) -2: srd r12,r9,r11 - sld r6,r9,r10 +2: sHd r12,r9,r11 + sLd r6,r9,r10 33: ldu r9,16(r4) or r12,r8,r12 77: stdu r7,16(r3) - srd r7,r0,r11 - sld r8,r0,r10 + sHd r7,r0,r11 + sLd r8,r0,r10 bdnz 1b 78: std r12,8(r3) or r7,r7,r6 79: std r7,16(r3) -5: srd r12,r9,r11 +5: sHd r12,r9,r11 or r12,r8,r12 80: std r12,24(r3) bne 6f @@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) blr 6: cmpwi cr1,r5,8 addi r3,r3,32 - sld r9,r9,r10 + sLd r9,r9,r10 ble cr1,7f 34: ld r0,8(r4) - srd r7,r0,r11 + sHd r7,r0,r11 or r9,r7,r9 7: bf cr7*4+1,1f +#ifdef __BIG_ENDIAN__ rotldi r9,r9,32 +#endif 94: stw r9,0(r3) +#ifdef __LITTLE_ENDIAN__ + rotrdi r9,r9,32 +#endif addi r3,r3,4 1: bf cr7*4+2,2f +#ifdef __BIG_ENDIAN__ rotldi r9,r9,16 +#endif 95: sth r9,0(r3) +#ifdef __LITTLE_ENDIAN__ + rotrdi r9,r9,16 +#endif addi r3,r3,2 2: bf cr7*4+3,3f +#ifdef __BIG_ENDIAN__ rotldi r9,r9,8 +#endif 96: stb r9,0(r3) +#ifdef __LITTLE_ENDIAN__ + rotrdi r9,r9,8 +#endif 3: li r3,0 blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index ac3c2a10dafda9..555034f8505e8d 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, } PPC_DIVWU(r_A, r_A, r_X); break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; PPC_LI32(r_scratch1, K); - /* Top 32 bits of 64bit result -> A */ - PPC_MULHWU(r_A, r_A, r_scratch1); + PPC_DIVWU(r_A, r_A, r_scratch1); break; case BPF_S_ALU_AND_X: ctx->seen |= SEEN_XREG; diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 02245cee781838..d7ddcee7feb8bc 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -36,7 +36,6 @@ #include "powernv.h" #include "pci.h" -static char *hub_diag = NULL; static int ioda_eeh_nb_init = 0; static int ioda_eeh_event(struct notifier_block *nb, @@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose) ioda_eeh_nb_init = 1; } - /* We needn't HUB diag-data on PHB3 */ - if (phb->type == PNV_PHB_IODA1 && !hub_diag) { - hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - if (!hub_diag) { - pr_err("%s: Out of memory !\n", __func__); - return -ENOMEM; - } - } - #ifdef CONFIG_DEBUG_FS if (phb->dbgfs) { debugfs_create_file("err_injct_outbound", 0600, @@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) static void ioda_eeh_hub_diag(struct pci_controller *hose) { struct pnv_phb *phb = hose->private_data; - struct OpalIoP7IOCErrorData *data; + struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; long rc; - data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; - rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE); + rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); if (rc != OPAL_SUCCESS) { pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", __func__, phb->hub_id, rc); @@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) struct OpalIoPhbErrorCommon *common; long rc; - common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; - rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); + rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, + PNV_PCI_DIAG_BUF_SIZE); if (rc != OPAL_SUCCESS) { pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", __func__, hose->global_number, rc); return; } + common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; switch (common->ioType) { case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: ioda_eeh_p7ioc_phb_diag(hose, common); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 911c24ef033e0a..1ed8d5f40f5ad1 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -172,11 +172,13 @@ struct pnv_phb { } ioda; }; - /* PHB status structure */ + /* PHB and hub status structure */ union { unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; struct OpalIoP7IOCPhbErrorData p7ioc; + struct OpalIoP7IOCErrorData hub_diag; } diag; + }; extern struct pci_ops pnv_pci_ops; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index c1f1908587011d..6f76ae417f47e5 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -470,7 +470,7 @@ static long pseries_little_endian_exceptions(void) static void __init pSeries_setup_arch(void) { - panic_timeout = 10; + set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); /* Discover PIC type and setup ppc_md accordingly */ pseries_discover_pic(); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1e1a03d2d19fbb..e9f3125325266f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -135,7 +135,6 @@ config S390 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 if 32BIT select HAVE_VIRT_CPU_ACCOUNTING - select INIT_ALL_POSSIBLE select KTIME_SCALAR if 32BIT select MODULES_USE_ELF_RELA select OLD_SIGACTION diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 16760eeb79b09e..578680f6207acb 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -32,4 +32,19 @@ #define set_mb(var, value) do { var = value; mb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 4bf9da03591e7a..5d7e8cf83bd6c7 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -38,7 +38,8 @@ #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ - PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME) + PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ + PSW32_ASC_PRIMARY) #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "s390\0\0\0\0" diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index c879fad404c8d3..cb700d54bd832a 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -56,6 +56,96 @@ struct cpumf_ctr_info { u32 reserved2[12]; } __packed; +/* QUERY SAMPLING INFORMATION block */ +struct hws_qsi_info_block { /* Bit(s) */ + unsigned int b0_13:14; /* 0-13: zeros */ + unsigned int as:1; /* 14: basic-sampling authorization */ + unsigned int ad:1; /* 15: diag-sampling authorization */ + unsigned int b16_21:6; /* 16-21: zeros */ + unsigned int es:1; /* 22: basic-sampling enable control */ + unsigned int ed:1; /* 23: diag-sampling enable control */ + unsigned int b24_29:6; /* 24-29: zeros */ + unsigned int cs:1; /* 30: basic-sampling activation control */ + unsigned int cd:1; /* 31: diag-sampling activation control */ + unsigned int bsdes:16; /* 4-5: size of basic sampling entry */ + unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */ + unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ + unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ + unsigned long tear; /* 24-31: TEAR contents */ + unsigned long dear; /* 32-39: DEAR contents */ + unsigned int rsvrd0; /* 40-43: reserved */ + unsigned int cpu_speed; /* 44-47: CPU speed */ + unsigned long long rsvrd1; /* 48-55: reserved */ + unsigned long long rsvrd2; /* 56-63: reserved */ +} __packed; + +/* SET SAMPLING CONTROLS request block */ +struct hws_lsctl_request_block { + unsigned int s:1; /* 0: maximum buffer indicator */ + unsigned int h:1; /* 1: part. level reserved for VM use*/ + unsigned long long b2_53:52;/* 2-53: zeros */ + unsigned int es:1; /* 54: basic-sampling enable control */ + unsigned int ed:1; /* 55: diag-sampling enable control */ + unsigned int b56_61:6; /* 56-61: - zeros */ + unsigned int cs:1; /* 62: basic-sampling activation control */ + unsigned int cd:1; /* 63: diag-sampling activation control */ + unsigned long interval; /* 8-15: sampling interval */ + unsigned long tear; /* 16-23: TEAR contents */ + unsigned long dear; /* 24-31: DEAR contents */ + /* 32-63: */ + unsigned long rsvrd1; /* reserved */ + unsigned long rsvrd2; /* reserved */ + unsigned long rsvrd3; /* reserved */ + unsigned long rsvrd4; /* reserved */ +} __packed; + +struct hws_basic_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:4; /* 16-19 reserved */ + unsigned int U:4; /* 20-23 Number of unique instruct. */ + unsigned int z:2; /* zeros */ + unsigned int T:1; /* 26 PSW DAT mode */ + unsigned int W:1; /* 27 PSW wait state */ + unsigned int P:1; /* 28 PSW Problem state */ + unsigned int AS:2; /* 29-30 PSW address-space control */ + unsigned int I:1; /* 31 entry valid or invalid */ + unsigned int:16; + unsigned int prim_asn:16; /* primary ASN */ + unsigned long long ia; /* Instruction Address */ + unsigned long long gpp; /* Guest Program Parameter */ + unsigned long long hpp; /* Host Program Parameter */ +} __packed; + +struct hws_diag_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:14; /* 16-19 and 20-30 reserved */ + unsigned int I:1; /* 31 entry valid or invalid */ + u8 data[]; /* Machine-dependent sample data */ +} __packed; + +struct hws_combined_entry { + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ +} __packed; + +struct hws_trailer_entry { + union { + struct { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ + unsigned long long:61; /* 3 - 63: Reserved */ + }; + unsigned long long flags; /* 0 - 63: All indicators */ + }; + unsigned long long overflow; /* 64 - sample Overflow count */ + unsigned char timestamp[16]; /* 16 - 31 timestamp */ + unsigned long long reserved1; /* 32 -Reserved */ + unsigned long long reserved2; /* */ + unsigned long long progusage1; /* 48 - reserved for programming use */ + unsigned long long progusage2; /* */ +} __packed; + /* Query counter information */ static inline int qctri(struct cpumf_ctr_info *info) { @@ -99,4 +189,95 @@ static inline int ecctr(u64 ctr, u64 *val) return cc; } +/* Query sampling information */ +static inline int qsi(struct hws_qsi_info_block *info) +{ + int cc; + cc = 1; + + asm volatile( + "0: .insn s,0xb2860000,0(%1)\n" + "1: lhi %0,0\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "=d" (cc), "+a" (info) + : "m" (*info) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} + +/* Load sampling controls */ +static inline int lsctl(struct hws_lsctl_request_block *req) +{ + int cc; + + cc = 1; + asm volatile( + "0: .insn s,0xb2870000,0(%1)\n" + "1: ipm %0\n" + " srl %0,28\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "+d" (cc), "+a" (req) + : "m" (*req) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} + +/* Sampling control helper functions */ + +#include + +static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi, + unsigned long freq) +{ + return (USEC_PER_SEC / freq) * qsi->cpu_speed; +} + +static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi, + unsigned long rate) +{ + return USEC_PER_SEC * qsi->cpu_speed / rate; +} + +#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL +#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL + +/* Return TOD timestamp contained in an trailer entry */ +static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) +{ + /* TOD in STCKE format */ + if (te->t) + return *((unsigned long long *) &te->timestamp[1]); + + /* TOD in STCK format */ + return *((unsigned long long *) &te->timestamp[0]); +} + +/* Return pointer to trailer entry of an sample data block */ +static inline unsigned long *trailer_entry_ptr(unsigned long v) +{ + void *ret; + + ret = (void *) v; + ret += PAGE_SIZE; + ret -= sizeof(struct hws_trailer_entry); + + return (unsigned long *) ret; +} + +/* Return if the entry in the sample data block table (sdbt) + * is a link to the next sdbt */ +static inline int is_link_entry(unsigned long *s) +{ + return *s & 0x1ul ? 1 : 0; +} + +/* Return pointer to the linked sdbt */ +static inline unsigned long *get_next_sdbt(unsigned long *s) +{ + return (unsigned long *) (*s & ~0x1ul); +} #endif /* _ASM_S390_CPU_MF_H */ diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 7e1c917bbba2ac..09d1dd46bd5777 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h @@ -29,6 +29,8 @@ struct css_general_char { u32 fcx : 1; /* bit 88 */ u32 : 19; u32 alt_ssi : 1; /* bit 108 */ + u32:1; + u32 narf:1; /* bit 110 */ } __packed; extern struct css_general_char css_general_characteristics; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index c129ab2ac731c9..2583466f576b61 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -144,6 +144,7 @@ int clp_disable_fh(struct zpci_dev *); void zpci_event_error(void *); void zpci_event_availability(void *); void zpci_rescan(void); +bool zpci_is_enabled(void); #else /* CONFIG_PCI */ static inline void zpci_event_error(void *e) {} static inline void zpci_event_availability(void *e) {} diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 1141fb3e7b21e6..159a8ec6da9afc 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -1,21 +1,40 @@ /* * Performance event support - s390 specific definitions. * - * Copyright IBM Corp. 2009, 2012 + * Copyright IBM Corp. 2009, 2013 * Author(s): Martin Schwidefsky * Hendrik Brueckner */ -#include +#ifndef _ASM_S390_PERF_EVENT_H +#define _ASM_S390_PERF_EVENT_H -/* CPU-measurement counter facility */ -#define PERF_CPUM_CF_MAX_CTR 256 +#ifdef CONFIG_64BIT + +#include +#include +#include /* Per-CPU flags for PMU states */ #define PMU_F_RESERVED 0x1000 #define PMU_F_ENABLED 0x2000 +#define PMU_F_IN_USE 0x4000 +#define PMU_F_ERR_IBE 0x0100 +#define PMU_F_ERR_LSDA 0x0200 +#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) + +/* Perf defintions for PMU event attributes in sysfs */ +extern __init const struct attribute_group **cpumf_cf_event_group(void); +extern ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page); +#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name +#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr) + +#define CPUMF_EVENT_ATTR(cat, name, id) \ + PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show) +#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name) -#ifdef CONFIG_64BIT /* Perf callbacks */ struct pt_regs; @@ -23,4 +42,55 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) +/* Perf pt_regs extension for sample-data-entry indicators */ +struct perf_sf_sde_regs { + unsigned char in_guest:1; /* guest sample */ + unsigned long reserved:63; /* reserved */ +}; + +/* Perf PMU definitions for the counter facility */ +#define PERF_CPUM_CF_MAX_CTR 256 + +/* Perf PMU definitions for the sampling facility */ +#define PERF_CPUM_SF_MAX_CTR 2 +#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ +#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ +#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ +#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ +#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ + PERF_CPUM_SF_DIAG_MODE) +#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */ + +#define REG_NONE 0 +#define REG_OVERFLOW 1 +#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) +#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) +#define RAWSAMPLE_REG(hwc) ((hwc)->config) +#define TEAR_REG(hwc) ((hwc)->last_tag) +#define SAMPL_RATE(hwc) ((hwc)->event_base) +#define SAMPL_FLAGS(hwc) ((hwc)->config_base) +#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) +#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) + +/* Structure for sampling data entries to be passed as perf raw sample data + * to user space. Note that raw sample data must be aligned and, thus, might + * be padded with zeros. + */ +struct sf_raw_sample { +#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE +#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE + u64 format; + u32 size; /* Size of sf_raw_sample */ + u16 bsdes; /* Basic-sampling data entry size */ + u16 dsdes; /* Diagnostic-sampling data entry size */ + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ + u8 padding[]; /* Padding to next multiple of 8 */ +} __packed; + +/* Perf hardware reserve and release functions */ +int perf_reserve_sampling(void); +void perf_release_sampling(void); + #endif /* CONFIG_64BIT */ +#endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 57d0d7e794b18a..d786c634e05206 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, #define QDIO_FLAG_CLEANUP_USING_HALT 0x02 /** - * struct qdio_initialize - qdio initalization data + * struct qdio_initialize - qdio initialization data * @cdev: associated ccw device * @q_format: queue format * @adapter_name: name for the adapter @@ -378,6 +378,34 @@ struct qdio_initialize { struct qdio_outbuf_state *output_sbal_state_array; }; +/** + * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc() + * @l3_ipv6_addr: entry contains IPv6 address + * @l3_ipv4_addr: entry contains IPv4 address + * @l2_addr_lnid: entry contains MAC address and VLAN ID + */ +enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid}; + +/** + * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc() + * @nit: Network interface token + * @addr: Address of one of the three types + * + * The struct is passed to the callback function by qdio_brinfo_desc() + */ +struct qdio_brinfo_entry_l3_ipv6 { + u64 nit; + struct { unsigned char _s6_addr[16]; } addr; +} __packed; +struct qdio_brinfo_entry_l3_ipv4 { + u64 nit; + struct { uint32_t _s_addr; } addr; +} __packed; +struct qdio_brinfo_entry_l2 { + u64 nit; + struct { u8 mac[6]; u16 lnid; } addr_lnid; +} __packed; + #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ #define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */ #define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */ @@ -399,5 +427,10 @@ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); extern int qdio_shutdown(struct ccw_device *, int); extern int qdio_free(struct ccw_device *); extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); +extern int qdio_pnso_brinfo(struct subchannel_id schid, + int cnc, u16 *response, + void (*cb)(void *priv, enum qdio_brinfo_entry_type type, + void *entry), + void *priv); #endif /* __QDIO_H__ */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 2f390956c7c1c9..220e171413f857 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -52,8 +52,8 @@ int sclp_chp_configure(struct chp_id chpid); int sclp_chp_deconfigure(struct chp_id chpid); int sclp_chp_read_info(struct sclp_chp_info *info); void sclp_get_ipl_info(struct sclp_ipl_info *info); -bool sclp_has_linemode(void); -bool sclp_has_vt220(void); +bool __init sclp_has_linemode(void); +bool __init sclp_has_vt220(void); int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ac9bed8e103fa7..16077939409622 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -31,6 +31,7 @@ extern void smp_yield(void); extern void smp_stop_cpu(void); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); +extern void smp_fill_possible_mask(void); #else /* CONFIG_SMP */ @@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield(void) { } static inline void smp_stop_cpu(void) { } +static inline void smp_fill_possible_mask(void) { } #endif /* CONFIG_SMP */ diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index e83fc116f5bf2c..f2b18eacaca80c 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -154,6 +154,67 @@ struct ica_xcRB { unsigned short priority_window; unsigned int status; } __attribute__((packed)); + +/** + * struct ep11_cprb - EP11 connectivity programming request block + * @cprb_len: CPRB header length [0x0020] + * @cprb_ver_id: CPRB version id. [0x04] + * @pad_000: Alignment pad bytes + * @flags: Admin cmd [0x80] or functional cmd [0x00] + * @func_id: Function id / subtype [0x5434] + * @source_id: Source id [originator id] + * @target_id: Target id [usage/ctrl domain id] + * @ret_code: Return code + * @reserved1: Reserved + * @reserved2: Reserved + * @payload_len: Payload length + */ +struct ep11_cprb { + uint16_t cprb_len; + unsigned char cprb_ver_id; + unsigned char pad_000[2]; + unsigned char flags; + unsigned char func_id[2]; + uint32_t source_id; + uint32_t target_id; + uint32_t ret_code; + uint32_t reserved1; + uint32_t reserved2; + uint32_t payload_len; +} __attribute__((packed)); + +/** + * struct ep11_target_dev - EP11 target device list + * @ap_id: AP device id + * @dom_id: Usage domain id + */ +struct ep11_target_dev { + uint16_t ap_id; + uint16_t dom_id; +}; + +/** + * struct ep11_urb - EP11 user request block + * @targets_num: Number of target adapters + * @targets: Addr to target adapter list + * @weight: Level of request priority + * @req_no: Request id/number + * @req_len: Request length + * @req: Addr to request block + * @resp_len: Response length + * @resp: Addr to response block + */ +struct ep11_urb { + uint16_t targets_num; + uint64_t targets; + uint64_t weight; + uint64_t req_no; + uint64_t req_len; + uint64_t req; + uint64_t resp_len; + uint64_t resp; +} __attribute__((packed)); + #define AUTOSELECT ((unsigned int)0xFFFFFFFF) #define ZCRYPT_IOCTL_MAGIC 'z' @@ -183,6 +244,9 @@ struct ica_xcRB { * ZSECSENDCPRB * Send an arbitrary CPRB to a crypto card. * + * ZSENDEP11CPRB + * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card. + * * Z90STAT_STATUS_MASK * Return an 64 element array of unsigned chars for the status of * all devices. @@ -256,6 +320,7 @@ struct ica_xcRB { #define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) +#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) /* New status calls */ #define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 2403303cfed708..1b3ac09c11b6df 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -60,7 +60,8 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o ifdef CONFIG_64BIT -obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ + perf_cpum_cf_events.o obj-y += runtime_instr.o cache.o endif diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 95e7ba0fbb7eb1..8b84bc373e945b 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -412,8 +412,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; } else { regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; - err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, - (u16 __force __user *)(frame->retcode)); + if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, + (u16 __force __user *)(frame->retcode))) + goto give_sigsegv; } /* Set up backchain. */ diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index e5b43c97a8340a..384e609b47110d 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -74,7 +74,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ .endm .macro LPP newpp -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP jz .+8 .insn s,0xb2800000,\newpp @@ -82,7 +82,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ .endm .macro HANDLE_SIE_INTERCEPT scratch,reason -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) tmhh %r8,0x0001 # interrupting from user ? jnz .+62 lgr \scratch,%r9 @@ -946,7 +946,7 @@ cleanup_idle_insn: .quad __critical_end - __critical_start -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) /* * sie64a calling convention: * %r2 pointer to sie control block @@ -975,7 +975,7 @@ sie_done: lctlg %c1,%c1,__LC_USER_ASCE # load primary asce # some program checks are suppressing. C code (e.g. do_protection_exception) # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions beween sie64a and sie_done should not cause program +# instructions between sie64a and sie_done should not cause program # interrupts. So lets use a nop (47 00 00 00) as a landing pad. # See also HANDLE_SIE_INTERCEPT rewind_pad: diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 1105502bf6e976..f51214c0485884 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -680,6 +680,7 @@ static int __init cpumf_pmu_init(void) goto out; } + cpumf_pmu.attr_groups = cpumf_cf_event_group(); rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); if (rc) { pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c new file mode 100644 index 00000000000000..4554a4bae39e5d --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -0,0 +1,322 @@ +/* + * Perf PMU sysfs events attributes for available CPU-measurement counters + * + */ + +#include +#include + + +/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */ + +CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000); +CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001); +CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002); +CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025); +CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004); +CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005); +CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040); +CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041); +CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042); +CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043); +CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044); +CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045); +CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046); +CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047); +CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048); +CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049); +CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a); +CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b); +CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c); +CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d); +CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e); +CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f); +CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082); +CPUMF_EVENT_ATTR(cf_z10, L1D_L3_LOCAL_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_z10, L1I_L3_REMOTE_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_z10, L1D_L3_REMOTE_WRITES, 0x0085); +CPUMF_EVENT_ATTR(cf_z10, L1D_LMEM_SOURCED_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z10, L1I_LMEM_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_z10, L1D_RO_EXCL_WRITES, 0x0088); +CPUMF_EVENT_ATTR(cf_z10, L1I_CACHELINE_INVALIDATES, 0x0089); +CPUMF_EVENT_ATTR(cf_z10, ITLB1_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z10, DTLB1_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_z10, TLB2_PTE_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_z10, ITLB1_MISSES, 0x0091); +CPUMF_EVENT_ATTR(cf_z10, DTLB1_MISSES, 0x0092); +CPUMF_EVENT_ATTR(cf_z10, L2C_STORES_SENT, 0x0093); +CPUMF_EVENT_ATTR(cf_z196, L1D_L2_SOURCED_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z196, L1I_L2_SOURCED_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_MISSES, 0x0082); +CPUMF_EVENT_ATTR(cf_z196, ITLB1_MISSES, 0x0083); +CPUMF_EVENT_ATTR(cf_z196, L2C_STORES_SENT, 0x0085); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES, 0x0088); +CPUMF_EVENT_ATTR(cf_z196, L1D_RO_EXCL_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_HPAGE_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_z196, L1D_LMEM_SOURCED_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_z196, L1I_LMEM_SOURCED_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x008f); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_WRITES, 0x0090); +CPUMF_EVENT_ATTR(cf_z196, ITLB1_WRITES, 0x0091); +CPUMF_EVENT_ATTR(cf_z196, TLB2_PTE_WRITES, 0x0092); +CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES, 0x0093); +CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_WRITES, 0x0094); +CPUMF_EVENT_ATTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0096); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0098); +CPUMF_EVENT_ATTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009b); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_MISSES, 0x0080); +CPUMF_EVENT_ATTR(cf_zec12, ITLB1_MISSES, 0x0081); +CPUMF_EVENT_ATTR(cf_zec12, L1D_L2I_SOURCED_WRITES, 0x0082); +CPUMF_EVENT_ATTR(cf_zec12, L1I_L2I_SOURCED_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_zec12, L1D_L2D_SOURCED_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_WRITES, 0x0085); +CPUMF_EVENT_ATTR(cf_zec12, L1D_LMEM_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_zec12, L1I_LMEM_SOURCED_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_zec12, L1D_RO_EXCL_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_HPAGE_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_zec12, ITLB1_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_PTE_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_WRITES, 0x008f); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0091); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0092); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0093); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x0094); +CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TEND, 0x0095); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0096); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV, 0x0097); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV, 0x0098); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009a); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x009b); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES, 0x009c); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x009d); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TEND, 0x009e); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x009f); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV, 0x00a0); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1); +CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); + +static struct attribute *cpumcf_pmu_event_attr[] = { + CPUMF_EVENT_PTR(cf, CPU_CYCLES), + CPUMF_EVENT_PTR(cf, INSTRUCTIONS), + CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES), + CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES), + CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS), + CPUMF_EVENT_PTR(cf, PRNG_CYCLES), + CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS), + CPUMF_EVENT_PTR(cf, SHA_CYCLES), + CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS), + CPUMF_EVENT_PTR(cf, DEA_CYCLES), + CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, AES_FUNCTIONS), + CPUMF_EVENT_PTR(cf, AES_CYCLES), + CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES), + NULL, +}; + +static struct attribute *cpumcf_z10_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z10, L1I_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_L3_LOCAL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L3_LOCAL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_L3_REMOTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L3_REMOTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_CACHELINE_INVALIDATES), + CPUMF_EVENT_PTR(cf_z10, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_z10, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z10, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_z10, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_z10, L2C_STORES_SENT), + NULL, +}; + +static struct attribute *cpumcf_z196_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z196, L1D_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_z196, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_z196, L2C_STORES_SENT), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_z196, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES), + NULL, +}; + +static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_zec12, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_zec12, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_zec12, L1D_L2I_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_L2I_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_L2D_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_zec12, DTLB1_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TX_NC_TEND), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TEND), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, TX_NC_TABORT), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_NO_SPECIAL), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_SPECIAL), + NULL, +}; + +/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */ + +static struct attribute_group cpumsf_pmu_events_group = { + .name = "events", + .attrs = cpumcf_pmu_event_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cpumsf_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cpumsf_pmu_format_group = { + .name = "format", + .attrs = cpumsf_pmu_format_attr, +}; + +static const struct attribute_group *cpumsf_pmu_attr_groups[] = { + &cpumsf_pmu_events_group, + &cpumsf_pmu_format_group, + NULL, +}; + + +static __init struct attribute **merge_attr(struct attribute **a, + struct attribute **b) +{ + struct attribute **new; + int j, i; + + for (j = 0; a[j]; j++) + ; + for (i = 0; b[i]; i++) + j++; + j++; + + new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + if (!new) + return NULL; + j = 0; + for (i = 0; a[i]; i++) + new[j++] = a[i]; + for (i = 0; b[i]; i++) + new[j++] = b[i]; + new[j] = NULL; + + return new; +} + +__init const struct attribute_group **cpumf_cf_event_group(void) +{ + struct attribute **combined, **model; + struct cpuid cpu_id; + + get_cpu_id(&cpu_id); + switch (cpu_id.machine) { + case 0x2097: + case 0x2098: + model = cpumcf_z10_pmu_event_attr; + break; + case 0x2817: + case 0x2818: + model = cpumcf_z196_pmu_event_attr; + break; + case 0x2827: + case 0x2828: + model = cpumcf_zec12_pmu_event_attr; + break; + default: + model = NULL; + break; + }; + + if (!model) + goto out; + + combined = merge_attr(cpumcf_pmu_event_attr, model); + if (combined) + cpumsf_pmu_events_group.attrs = combined; +out: + return cpumsf_pmu_attr_groups; +} diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c new file mode 100644 index 00000000000000..6c0d29827cb620 --- /dev/null +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -0,0 +1,1641 @@ +/* + * Performance event support for the System z CPU-measurement Sampling Facility + * + * Copyright IBM Corp. 2013 + * Author(s): Hendrik Brueckner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#define KMSG_COMPONENT "cpum_sf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Minimum number of sample-data-block-tables: + * At least one table is required for the sampling buffer structure. + * A single table contains up to 511 pointers to sample-data-blocks. + */ +#define CPUM_SF_MIN_SDBT 1 + +/* Number of sample-data-blocks per sample-data-block-table (SDBT): + * A table contains SDB pointers (8 bytes) and one table-link entry + * that points to the origin of the next SDBT. + */ +#define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) + +/* Maximum page offset for an SDBT table-link entry: + * If this page offset is reached, a table-link entry to the next SDBT + * must be added. + */ +#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) +static inline int require_table_link(const void *sdbt) +{ + return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; +} + +/* Minimum and maximum sampling buffer sizes: + * + * This number represents the maximum size of the sampling buffer taking + * the number of sample-data-block-tables into account. Note that these + * numbers apply to the basic-sampling function only. + * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if + * the diagnostic-sampling function is active. + * + * Sampling buffer size Buffer characteristics + * --------------------------------------------------- + * 64KB == 16 pages (4KB per page) + * 1 page for SDB-tables + * 15 pages for SDBs + * + * 32MB == 8192 pages (4KB per page) + * 16 pages for SDB-tables + * 8176 pages for SDBs + */ +static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; +static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; +static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; + +struct sf_buffer { + unsigned long *sdbt; /* Sample-data-block-table origin */ + /* buffer characteristics (required for buffer increments) */ + unsigned long num_sdb; /* Number of sample-data-blocks */ + unsigned long num_sdbt; /* Number of sample-data-block-tables */ + unsigned long *tail; /* last sample-data-block-table */ +}; + +struct cpu_hw_sf { + /* CPU-measurement sampling information block */ + struct hws_qsi_info_block qsi; + /* CPU-measurement sampling control block */ + struct hws_lsctl_request_block lsctl; + struct sf_buffer sfb; /* Sampling buffer */ + unsigned int flags; /* Status flags */ + struct perf_event *event; /* Scheduled perf event */ +}; +static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); + +/* Debug feature */ +static debug_info_t *sfdbg; + +/* + * sf_disable() - Switch off sampling facility + */ +static int sf_disable(void) +{ + struct hws_lsctl_request_block sreq; + + memset(&sreq, 0, sizeof(sreq)); + return lsctl(&sreq); +} + +/* + * sf_buffer_available() - Check for an allocated sampling buffer + */ +static int sf_buffer_available(struct cpu_hw_sf *cpuhw) +{ + return !!cpuhw->sfb.sdbt; +} + +/* + * deallocate sampling facility buffer + */ +static void free_sampling_buffer(struct sf_buffer *sfb) +{ + unsigned long *sdbt, *curr; + + if (!sfb->sdbt) + return; + + sdbt = sfb->sdbt; + curr = sdbt; + + /* Free the SDBT after all SDBs are processed... */ + while (1) { + if (!*curr || !sdbt) + break; + + /* Process table-link entries */ + if (is_link_entry(curr)) { + curr = get_next_sdbt(curr); + if (sdbt) + free_page((unsigned long) sdbt); + + /* If the origin is reached, sampling buffer is freed */ + if (curr == sfb->sdbt) + break; + else + sdbt = curr; + } else { + /* Process SDB pointer */ + if (*curr) { + free_page(*curr); + curr++; + } + } + } + + debug_sprintf_event(sfdbg, 5, + "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt); + memset(sfb, 0, sizeof(*sfb)); +} + +static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) +{ + unsigned long sdb, *trailer; + + /* Allocate and initialize sample-data-block */ + sdb = get_zeroed_page(gfp_flags); + if (!sdb) + return -ENOMEM; + trailer = trailer_entry_ptr(sdb); + *trailer = SDB_TE_ALERT_REQ_MASK; + + /* Link SDB into the sample-data-block-table */ + *sdbt = sdb; + + return 0; +} + +/* + * realloc_sampling_buffer() - extend sampler memory + * + * Allocates new sample-data-blocks and adds them to the specified sampling + * buffer memory. + * + * Important: This modifies the sampling buffer and must be called when the + * sampling facility is disabled. + * + * Returns zero on success, non-zero otherwise. + */ +static int realloc_sampling_buffer(struct sf_buffer *sfb, + unsigned long num_sdb, gfp_t gfp_flags) +{ + int i, rc; + unsigned long *new, *tail; + + if (!sfb->sdbt || !sfb->tail) + return -EINVAL; + + if (!is_link_entry(sfb->tail)) + return -EINVAL; + + /* Append to the existing sampling buffer, overwriting the table-link + * register. + * The tail variables always points to the "tail" (last and table-link) + * entry in an SDB-table. + */ + tail = sfb->tail; + + /* Do a sanity check whether the table-link entry points to + * the sampling buffer origin. + */ + if (sfb->sdbt != get_next_sdbt(tail)) { + debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: " + "sampling buffer is not linked: origin=%p" + "tail=%p\n", + (void *) sfb->sdbt, (void *) tail); + return -EINVAL; + } + + /* Allocate remaining SDBs */ + rc = 0; + for (i = 0; i < num_sdb; i++) { + /* Allocate a new SDB-table if it is full. */ + if (require_table_link(tail)) { + new = (unsigned long *) get_zeroed_page(gfp_flags); + if (!new) { + rc = -ENOMEM; + break; + } + sfb->num_sdbt++; + /* Link current page to tail of chain */ + *tail = (unsigned long)(void *) new + 1; + tail = new; + } + + /* Allocate a new sample-data-block. + * If there is not enough memory, stop the realloc process + * and simply use what was allocated. If this is a temporary + * issue, a new realloc call (if required) might succeed. + */ + rc = alloc_sample_data_block(tail, gfp_flags); + if (rc) + break; + sfb->num_sdb++; + tail++; + } + + /* Link sampling buffer to its origin */ + *tail = (unsigned long) sfb->sdbt + 1; + sfb->tail = tail; + + debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" + " settings: sdbt=%lu sdb=%lu\n", + sfb->num_sdbt, sfb->num_sdb); + return rc; +} + +/* + * allocate_sampling_buffer() - allocate sampler memory + * + * Allocates and initializes a sampling buffer structure using the + * specified number of sample-data-blocks (SDB). For each allocation, + * a 4K page is used. The number of sample-data-block-tables (SDBT) + * are calculated from SDBs. + * Also set the ALERT_REQ mask in each SDBs trailer. + * + * Returns zero on success, non-zero otherwise. + */ +static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) +{ + int rc; + + if (sfb->sdbt) + return -EINVAL; + + /* Allocate the sample-data-block-table origin */ + sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); + if (!sfb->sdbt) + return -ENOMEM; + sfb->num_sdb = 0; + sfb->num_sdbt = 1; + + /* Link the table origin to point to itself to prepare for + * realloc_sampling_buffer() invocation. + */ + sfb->tail = sfb->sdbt; + *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; + + /* Allocate requested number of sample-data-blocks */ + rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); + if (rc) { + free_sampling_buffer(sfb); + debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " + "realloc_sampling_buffer failed with rc=%i\n", rc); + } else + debug_sprintf_event(sfdbg, 4, + "alloc_sampling_buffer: tear=%p dear=%p\n", + sfb->sdbt, (void *) *sfb->sdbt); + return rc; +} + +static void sfb_set_limits(unsigned long min, unsigned long max) +{ + struct hws_qsi_info_block si; + + CPUM_SF_MIN_SDB = min; + CPUM_SF_MAX_SDB = max; + + memset(&si, 0, sizeof(si)); + if (!qsi(&si)) + CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); +} + +static unsigned long sfb_max_limit(struct hw_perf_event *hwc) +{ + return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR + : CPUM_SF_MAX_SDB; +} + +static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + if (!sfb->sdbt) + return SFB_ALLOC_REG(hwc); + if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) + return SFB_ALLOC_REG(hwc) - sfb->num_sdb; + return 0; +} + +static int sfb_has_pending_allocs(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + return sfb_pending_allocs(sfb, hwc) > 0; +} + +static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) +{ + /* Limit the number of SDBs to not exceed the maximum */ + num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); + if (num) + SFB_ALLOC_REG(hwc) += num; +} + +static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) +{ + SFB_ALLOC_REG(hwc) = 0; + sfb_account_allocs(num, hwc); +} + +static size_t event_sample_size(struct hw_perf_event *hwc) +{ + struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); + size_t sample_size; + + /* The sample size depends on the sampling function: The basic-sampling + * function must be always enabled, diagnostic-sampling function is + * optional. + */ + sample_size = sfr->bsdes; + if (SAMPL_DIAG_MODE(hwc)) + sample_size += sfr->dsdes; + + return sample_size; +} + +static void deallocate_buffers(struct cpu_hw_sf *cpuhw) +{ + if (cpuhw->sfb.sdbt) + free_sampling_buffer(&cpuhw->sfb); +} + +static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) +{ + unsigned long n_sdb, freq, factor; + size_t sfr_size, sample_size; + struct sf_raw_sample *sfr; + + /* Allocate raw sample buffer + * + * The raw sample buffer is used to temporarily store sampling data + * entries for perf raw sample processing. The buffer size mainly + * depends on the size of diagnostic-sampling data entries which is + * machine-specific. The exact size calculation includes: + * 1. The first 4 bytes of diagnostic-sampling data entries are + * already reflected in the sf_raw_sample structure. Subtract + * these bytes. + * 2. The perf raw sample data must be 8-byte aligned (u64) and + * perf's internal data size must be considered too. So add + * an additional u32 for correct alignment and subtract before + * allocating the buffer. + * 3. Store the raw sample buffer pointer in the perf event + * hardware structure. + */ + sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) + + sizeof(u32), sizeof(u64)); + sfr_size -= sizeof(u32); + sfr = kzalloc(sfr_size, GFP_KERNEL); + if (!sfr) + return -ENOMEM; + sfr->size = sfr_size; + sfr->bsdes = cpuhw->qsi.bsdes; + sfr->dsdes = cpuhw->qsi.dsdes; + RAWSAMPLE_REG(hwc) = (unsigned long) sfr; + + /* Calculate sampling buffers using 4K pages + * + * 1. Determine the sample data size which depends on the used + * sampling functions, for example, basic-sampling or + * basic-sampling with diagnostic-sampling. + * + * 2. Use the sampling frequency as input. The sampling buffer is + * designed for almost one second. This can be adjusted through + * the "factor" variable. + * In any case, alloc_sampling_buffer() sets the Alert Request + * Control indicator to trigger a measurement-alert to harvest + * sample-data-blocks (sdb). + * + * 3. Compute the number of sample-data-blocks and ensure a minimum + * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not + * exceed a "calculated" maximum. The symbolic maximum is + * designed for basic-sampling only and needs to be increased if + * diagnostic-sampling is active. + * See also the remarks for these symbolic constants. + * + * 4. Compute the number of sample-data-block-tables (SDBT) and + * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up + * to 511 SDBs). + */ + sample_size = event_sample_size(hwc); + freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); + factor = 1; + n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); + if (n_sdb < CPUM_SF_MIN_SDB) + n_sdb = CPUM_SF_MIN_SDB; + + /* If there is already a sampling buffer allocated, it is very likely + * that the sampling facility is enabled too. If the event to be + * initialized requires a greater sampling buffer, the allocation must + * be postponed. Changing the sampling buffer requires the sampling + * facility to be in the disabled state. So, account the number of + * required SDBs and let cpumsf_pmu_enable() resize the buffer just + * before the event is started. + */ + sfb_init_allocs(n_sdb, hwc); + if (sf_buffer_available(cpuhw)) + return 0; + + debug_sprintf_event(sfdbg, 3, + "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu" + " sample_size=%lu cpuhw=%p\n", + SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), + sample_size, cpuhw); + + return alloc_sampling_buffer(&cpuhw->sfb, + sfb_pending_allocs(&cpuhw->sfb, hwc)); +} + +static unsigned long min_percent(unsigned int percent, unsigned long base, + unsigned long min) +{ + return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); +} + +static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) +{ + /* Use a percentage-based approach to extend the sampling facility + * buffer. Accept up to 5% sample data loss. + * Vary the extents between 1% to 5% of the current number of + * sample-data-blocks. + */ + if (ratio <= 5) + return 0; + if (ratio <= 25) + return min_percent(1, base, 1); + if (ratio <= 50) + return min_percent(1, base, 1); + if (ratio <= 75) + return min_percent(2, base, 2); + if (ratio <= 100) + return min_percent(3, base, 3); + if (ratio <= 250) + return min_percent(4, base, 4); + + return min_percent(5, base, 8); +} + +static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, + struct hw_perf_event *hwc) +{ + unsigned long ratio, num; + + if (!OVERFLOW_REG(hwc)) + return; + + /* The sample_overflow contains the average number of sample data + * that has been lost because sample-data-blocks were full. + * + * Calculate the total number of sample data entries that has been + * discarded. Then calculate the ratio of lost samples to total samples + * per second in percent. + */ + ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, + sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); + + /* Compute number of sample-data-blocks */ + num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); + if (num) + sfb_account_allocs(num, hwc); + + debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu" + " num=%lu\n", OVERFLOW_REG(hwc), ratio, num); + OVERFLOW_REG(hwc) = 0; +} + +/* extend_sampling_buffer() - Extend sampling buffer + * @sfb: Sampling buffer structure (for local CPU) + * @hwc: Perf event hardware structure + * + * Use this function to extend the sampling buffer based on the overflow counter + * and postponed allocation extents stored in the specified Perf event hardware. + * + * Important: This function disables the sampling facility in order to safely + * change the sampling buffer structure. Do not call this function + * when the PMU is active. + */ +static void extend_sampling_buffer(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + unsigned long num, num_old; + int rc; + + num = sfb_pending_allocs(sfb, hwc); + if (!num) + return; + num_old = sfb->num_sdb; + + /* Disable the sampling facility to reset any states and also + * clear pending measurement alerts. + */ + sf_disable(); + + /* Extend the sampling buffer. + * This memory allocation typically happens in an atomic context when + * called by perf. Because this is a reallocation, it is fine if the + * new SDB-request cannot be satisfied immediately. + */ + rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); + if (rc) + debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " + "failed with rc=%i\n", rc); + + if (sfb_has_pending_allocs(sfb, hwc)) + debug_sprintf_event(sfdbg, 5, "sfb: extend: " + "req=%lu alloc=%lu remaining=%lu\n", + num, sfb->num_sdb - num_old, + sfb_pending_allocs(sfb, hwc)); +} + + +/* Number of perf events counting hardware events */ +static atomic_t num_events; +/* Used to avoid races in calling reserve/release_cpumf_hardware */ +static DEFINE_MUTEX(pmc_reserve_mutex); + +#define PMC_INIT 0 +#define PMC_RELEASE 1 +#define PMC_FAILURE 2 +static void setup_pmc_cpu(void *flags) +{ + int err; + struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf); + + err = 0; + switch (*((int *) flags)) { + case PMC_INIT: + memset(cpusf, 0, sizeof(*cpusf)); + err = qsi(&cpusf->qsi); + if (err) + break; + cpusf->flags |= PMU_F_RESERVED; + err = sf_disable(); + if (err) + pr_err("Switching off the sampling facility failed " + "with rc=%i\n", err); + debug_sprintf_event(sfdbg, 5, + "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); + break; + case PMC_RELEASE: + cpusf->flags &= ~PMU_F_RESERVED; + err = sf_disable(); + if (err) { + pr_err("Switching off the sampling facility failed " + "with rc=%i\n", err); + } else + deallocate_buffers(cpusf); + debug_sprintf_event(sfdbg, 5, + "setup_pmc_cpu: released: cpuhw=%p\n", cpusf); + break; + } + if (err) + *((int *) flags) |= PMC_FAILURE; +} + +static void release_pmc_hardware(void) +{ + int flags = PMC_RELEASE; + + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + on_each_cpu(setup_pmc_cpu, &flags, 1); + perf_release_sampling(); +} + +static int reserve_pmc_hardware(void) +{ + int flags = PMC_INIT; + int err; + + err = perf_reserve_sampling(); + if (err) + return err; + on_each_cpu(setup_pmc_cpu, &flags, 1); + if (flags & PMC_FAILURE) { + release_pmc_hardware(); + return -ENODEV; + } + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + return 0; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Free raw sample buffer */ + if (RAWSAMPLE_REG(&event->hw)) + kfree((void *) RAWSAMPLE_REG(&event->hw)); + + /* Release PMC if this is the last perf event */ + if (!atomic_add_unless(&num_events, -1, 1)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_dec_return(&num_events) == 0) + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + +static void hw_init_period(struct hw_perf_event *hwc, u64 period) +{ + hwc->sample_period = period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); +} + +static void hw_reset_registers(struct hw_perf_event *hwc, + unsigned long *sdbt_origin) +{ + struct sf_raw_sample *sfr; + + /* (Re)set to first sample-data-block-table */ + TEAR_REG(hwc) = (unsigned long) sdbt_origin; + + /* (Re)set raw sampling buffer register */ + sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); + memset(&sfr->basic, 0, sizeof(sfr->basic)); + memset(&sfr->diag, 0, sfr->dsdes); +} + +static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, + unsigned long rate) +{ + return clamp_t(unsigned long, rate, + si->min_sampl_rate, si->max_sampl_rate); +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct cpu_hw_sf *cpuhw; + struct hws_qsi_info_block si; + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + unsigned long rate; + int cpu, err; + + /* Reserve CPU-measurement sampling facility */ + err = 0; + if (!atomic_inc_not_zero(&num_events)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) + err = -EBUSY; + else + atomic_inc(&num_events); + mutex_unlock(&pmc_reserve_mutex); + } + event->destroy = hw_perf_event_destroy; + + if (err) + goto out; + + /* Access per-CPU sampling information (query sampling info) */ + /* + * The event->cpu value can be -1 to count on every CPU, for example, + * when attaching to a task. If this is specified, use the query + * sampling info from the current CPU, otherwise use event->cpu to + * retrieve the per-CPU information. + * Later, cpuhw indicates whether to allocate sampling buffers for a + * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). + */ + memset(&si, 0, sizeof(si)); + cpuhw = NULL; + if (event->cpu == -1) + qsi(&si); + else { + /* Event is pinned to a particular CPU, retrieve the per-CPU + * sampling structure for accessing the CPU-specific QSI. + */ + cpuhw = &per_cpu(cpu_hw_sf, event->cpu); + si = cpuhw->qsi; + } + + /* Check sampling facility authorization and, if not authorized, + * fall back to other PMUs. It is safe to check any CPU because + * the authorization is identical for all configured CPUs. + */ + if (!si.as) { + err = -ENOENT; + goto out; + } + + /* Always enable basic sampling */ + SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; + + /* Check if diagnostic sampling is requested. Deny if the required + * sampling authorization is missing. + */ + if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { + if (!si.ad) { + err = -EPERM; + goto out; + } + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; + } + + /* Check and set other sampling flags */ + if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; + + /* The sampling information (si) contains information about the + * min/max sampling intervals and the CPU speed. So calculate the + * correct sampling interval and avoid the whole period adjust + * feedback loop. + */ + rate = 0; + if (attr->freq) { + rate = freq_to_sample_rate(&si, attr->sample_freq); + rate = hw_limit_rate(&si, rate); + attr->freq = 0; + attr->sample_period = rate; + } else { + /* The min/max sampling rates specifies the valid range + * of sample periods. If the specified sample period is + * out of range, limit the period to the range boundary. + */ + rate = hw_limit_rate(&si, hwc->sample_period); + + /* The perf core maintains a maximum sample rate that is + * configurable through the sysctl interface. Ensure the + * sampling rate does not exceed this value. This also helps + * to avoid throttling when pushing samples with + * perf_event_overflow(). + */ + if (sample_rate_to_freq(&si, rate) > + sysctl_perf_event_sample_rate) { + err = -EINVAL; + debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n"); + goto out; + } + } + SAMPL_RATE(hwc) = rate; + hw_init_period(hwc, SAMPL_RATE(hwc)); + + /* Initialize sample data overflow accounting */ + hwc->extra_reg.reg = REG_OVERFLOW; + OVERFLOW_REG(hwc) = 0; + + /* Allocate the per-CPU sampling buffer using the CPU information + * from the event. If the event is not pinned to a particular + * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling + * buffers for each online CPU. + */ + if (cpuhw) + /* Event is pinned to a particular CPU */ + err = allocate_buffers(cpuhw, hwc); + else { + /* Event is not pinned, allocate sampling buffer on + * each online CPU + */ + for_each_online_cpu(cpu) { + cpuhw = &per_cpu(cpu_hw_sf, cpu); + err = allocate_buffers(cpuhw, hwc); + if (err) + break; + } + } +out: + return err; +} + +static int cpumsf_pmu_event_init(struct perf_event *event) +{ + int err; + + /* No support for taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + if ((event->attr.config != PERF_EVENT_CPUM_SF) && + (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) + return -ENOENT; + break; + case PERF_TYPE_HARDWARE: + /* Support sampling of CPU cycles in addition to the + * counter facility. However, the counter facility + * is more precise and, hence, restrict this PMU to + * sampling events only. + */ + if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) + return -ENOENT; + if (!is_sampling_event(event)) + return -ENOENT; + break; + default: + return -ENOENT; + } + + /* Check online status of the CPU to which the event is pinned */ + if (event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + /* Force reset of idle/hv excludes regardless of what the + * user requested. + */ + if (event->attr.exclude_hv) + event->attr.exclude_hv = 0; + if (event->attr.exclude_idle) + event->attr.exclude_idle = 0; + + err = __hw_perf_event_init(event); + if (unlikely(err)) + if (event->destroy) + event->destroy(event); + return err; +} + +static void cpumsf_pmu_enable(struct pmu *pmu) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + struct hw_perf_event *hwc; + int err; + + if (cpuhw->flags & PMU_F_ENABLED) + return; + + if (cpuhw->flags & PMU_F_ERR_MASK) + return; + + /* Check whether to extent the sampling buffer. + * + * Two conditions trigger an increase of the sampling buffer for a + * perf event: + * 1. Postponed buffer allocations from the event initialization. + * 2. Sampling overflows that contribute to pending allocations. + * + * Note that the extend_sampling_buffer() function disables the sampling + * facility, but it can be fully re-enabled using sampling controls that + * have been saved in cpumsf_pmu_disable(). + */ + if (cpuhw->event) { + hwc = &cpuhw->event->hw; + /* Account number of overflow-designated buffer extents */ + sfb_account_overflows(cpuhw, hwc); + if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) + extend_sampling_buffer(&cpuhw->sfb, hwc); + } + + /* (Re)enable the PMU and sampling facility */ + cpuhw->flags |= PMU_F_ENABLED; + barrier(); + + err = lsctl(&cpuhw->lsctl); + if (err) { + cpuhw->flags &= ~PMU_F_ENABLED; + pr_err("Loading sampling controls failed: op=%i err=%i\n", + 1, err); + return; + } + + debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " + "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs, + cpuhw->lsctl.ed, cpuhw->lsctl.cd, + (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear); +} + +static void cpumsf_pmu_disable(struct pmu *pmu) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + struct hws_lsctl_request_block inactive; + struct hws_qsi_info_block si; + int err; + + if (!(cpuhw->flags & PMU_F_ENABLED)) + return; + + if (cpuhw->flags & PMU_F_ERR_MASK) + return; + + /* Switch off sampling activation control */ + inactive = cpuhw->lsctl; + inactive.cs = 0; + inactive.cd = 0; + + err = lsctl(&inactive); + if (err) { + pr_err("Loading sampling controls failed: op=%i err=%i\n", + 2, err); + return; + } + + /* Save state of TEAR and DEAR register contents */ + if (!qsi(&si)) { + /* TEAR/DEAR values are valid only if the sampling facility is + * enabled. Note that cpumsf_pmu_disable() might be called even + * for a disabled sampling facility because cpumsf_pmu_enable() + * controls the enable/disable state. + */ + if (si.es) { + cpuhw->lsctl.tear = si.tear; + cpuhw->lsctl.dear = si.dear; + } + } else + debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " + "qsi() failed with err=%i\n", err); + + cpuhw->flags &= ~PMU_F_ENABLED; +} + +/* perf_exclude_event() - Filter event + * @event: The perf event + * @regs: pt_regs structure + * @sde_regs: Sample-data-entry (sde) regs structure + * + * Filter perf events according to their exclude specification. + * + * Return non-zero if the event shall be excluded. + */ +static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, + struct perf_sf_sde_regs *sde_regs) +{ + if (event->attr.exclude_user && user_mode(regs)) + return 1; + if (event->attr.exclude_kernel && !user_mode(regs)) + return 1; + if (event->attr.exclude_guest && sde_regs->in_guest) + return 1; + if (event->attr.exclude_host && !sde_regs->in_guest) + return 1; + return 0; +} + +/* perf_push_sample() - Push samples to perf + * @event: The perf event + * @sample: Hardware sample data + * + * Use the hardware sample data to create perf event sample. The sample + * is the pushed to the event subsystem and the function checks for + * possible event overflows. If an event overflow occurs, the PMU is + * stopped. + * + * Return non-zero if an event overflow occurred. + */ +static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) +{ + int overflow; + struct pt_regs regs; + struct perf_sf_sde_regs *sde_regs; + struct perf_sample_data data; + struct perf_raw_record raw; + + /* Setup perf sample */ + perf_sample_data_init(&data, 0, event->hw.last_period); + raw.size = sfr->size; + raw.data = sfr; + data.raw = &raw; + + /* Setup pt_regs to look like an CPU-measurement external interrupt + * using the Program Request Alert code. The regs.int_parm_long + * field which is unused contains additional sample-data-entry related + * indicators. + */ + memset(®s, 0, sizeof(regs)); + regs.int_code = 0x1407; + regs.int_parm = CPU_MF_INT_SF_PRA; + sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; + + regs.psw.addr = sfr->basic.ia; + if (sfr->basic.T) + regs.psw.mask |= PSW_MASK_DAT; + if (sfr->basic.W) + regs.psw.mask |= PSW_MASK_WAIT; + if (sfr->basic.P) + regs.psw.mask |= PSW_MASK_PSTATE; + switch (sfr->basic.AS) { + case 0x0: + regs.psw.mask |= PSW_ASC_PRIMARY; + break; + case 0x1: + regs.psw.mask |= PSW_ASC_ACCREG; + break; + case 0x2: + regs.psw.mask |= PSW_ASC_SECONDARY; + break; + case 0x3: + regs.psw.mask |= PSW_ASC_HOME; + break; + } + + /* The host-program-parameter (hpp) contains the sie control + * block that is set by sie64a() in entry64.S. Check if hpp + * refers to a valid control block and set sde_regs flags + * accordingly. This would allow to use hpp values for other + * purposes too. + * For now, simply use a non-zero value as guest indicator. + */ + if (sfr->basic.hpp) + sde_regs->in_guest = 1; + + overflow = 0; + if (perf_exclude_event(event, ®s, sde_regs)) + goto out; + if (perf_event_overflow(event, &data, ®s)) { + overflow = 1; + event->pmu->stop(event, 0); + } + perf_event_update_userpage(event); +out: + return overflow; +} + +static void perf_event_count_update(struct perf_event *event, u64 count) +{ + local64_add(count, &event->count); +} + +static int sample_format_is_valid(struct hws_combined_entry *sample, + unsigned int flags) +{ + if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) + /* Only basic-sampling data entries with data-entry-format + * version of 0x0001 can be processed. + */ + if (sample->basic.def != 0x0001) + return 0; + if (flags & PERF_CPUM_SF_DIAG_MODE) + /* The data-entry-format number of diagnostic-sampling data + * entries can vary. Because diagnostic data is just passed + * through, do only a sanity check on the DEF. + */ + if (sample->diag.def < 0x8001) + return 0; + return 1; +} + +static int sample_is_consistent(struct hws_combined_entry *sample, + unsigned long flags) +{ + /* This check applies only to basic-sampling data entries of potentially + * combined-sampling data entries. Invalid entries cannot be processed + * by the PMU and, thus, do not deliver an associated + * diagnostic-sampling data entry. + */ + if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE))) + return 0; + /* + * Samples are skipped, if they are invalid or for which the + * instruction address is not predictable, i.e., the wait-state bit is + * set. + */ + if (sample->basic.I || sample->basic.W) + return 0; + return 1; +} + +static void reset_sample_slot(struct hws_combined_entry *sample, + unsigned long flags) +{ + if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) + sample->basic.def = 0; + if (flags & PERF_CPUM_SF_DIAG_MODE) + sample->diag.def = 0; +} + +static void sfr_store_sample(struct sf_raw_sample *sfr, + struct hws_combined_entry *sample) +{ + if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE)) + sfr->basic = sample->basic; + if (sfr->format & PERF_CPUM_SF_DIAG_MODE) + memcpy(&sfr->diag, &sample->diag, sfr->dsdes); +} + +static void debug_sample_entry(struct hws_combined_entry *sample, + struct hws_trailer_entry *te, + unsigned long flags) +{ + debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown " + "sampling data entry: te->f=%i basic.def=%04x (%p)" + " diag.def=%04x (%p)\n", te->f, + sample->basic.def, &sample->basic, + (flags & PERF_CPUM_SF_DIAG_MODE) + ? sample->diag.def : 0xFFFF, + (flags & PERF_CPUM_SF_DIAG_MODE) + ? &sample->diag : NULL); +} + +/* hw_collect_samples() - Walk through a sample-data-block and collect samples + * @event: The perf event + * @sdbt: Sample-data-block table + * @overflow: Event overflow counter + * + * Walks through a sample-data-block and collects sampling data entries that are + * then pushed to the perf event subsystem. Depending on the sampling function, + * there can be either basic-sampling or combined-sampling data entries. A + * combined-sampling data entry consists of a basic- and a diagnostic-sampling + * data entry. The sampling function is determined by the flags in the perf + * event hardware structure. The function always works with a combined-sampling + * data entry but ignores the the diagnostic portion if it is not available. + * + * Note that the implementation focuses on basic-sampling data entries and, if + * such an entry is not valid, the entire combined-sampling data entry is + * ignored. + * + * The overflow variables counts the number of samples that has been discarded + * due to a perf event overflow. + */ +static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, + unsigned long long *overflow) +{ + unsigned long flags = SAMPL_FLAGS(&event->hw); + struct hws_combined_entry *sample; + struct hws_trailer_entry *te; + struct sf_raw_sample *sfr; + size_t sample_size; + + /* Prepare and initialize raw sample data */ + sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); + sfr->format = flags & PERF_CPUM_SF_MODE_MASK; + + sample_size = event_sample_size(&event->hw); + te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); + sample = (struct hws_combined_entry *) *sdbt; + while ((unsigned long *) sample < (unsigned long *) te) { + /* Check for an empty sample */ + if (!sample->basic.def) + break; + + /* Update perf event period */ + perf_event_count_update(event, SAMPL_RATE(&event->hw)); + + /* Check sampling data entry */ + if (sample_format_is_valid(sample, flags)) { + /* If an event overflow occurred, the PMU is stopped to + * throttle event delivery. Remaining sample data is + * discarded. + */ + if (!*overflow) { + if (sample_is_consistent(sample, flags)) { + /* Deliver sample data to perf */ + sfr_store_sample(sfr, sample); + *overflow = perf_push_sample(event, sfr); + } + } else + /* Count discarded samples */ + *overflow += 1; + } else { + debug_sample_entry(sample, te, flags); + /* Sample slot is not yet written or other record. + * + * This condition can occur if the buffer was reused + * from a combined basic- and diagnostic-sampling. + * If only basic-sampling is then active, entries are + * written into the larger diagnostic entries. + * This is typically the case for sample-data-blocks + * that are not full. Stop processing if the first + * invalid format was detected. + */ + if (!te->f) + break; + } + + /* Reset sample slot and advance to next sample */ + reset_sample_slot(sample, flags); + sample += sample_size; + } +} + +/* hw_perf_event_update() - Process sampling buffer + * @event: The perf event + * @flush_all: Flag to also flush partially filled sample-data-blocks + * + * Processes the sampling buffer and create perf event samples. + * The sampling buffer position are retrieved and saved in the TEAR_REG + * register of the specified perf event. + * + * Only full sample-data-blocks are processed. Specify the flash_all flag + * to also walk through partially filled sample-data-blocks. It is ignored + * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag + * enforces the processing of full sample-data-blocks only (trailer entries + * with the block-full-indicator bit set). + */ +static void hw_perf_event_update(struct perf_event *event, int flush_all) +{ + struct hw_perf_event *hwc = &event->hw; + struct hws_trailer_entry *te; + unsigned long *sdbt; + unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; + int done; + + if (flush_all && SDB_FULL_BLOCKS(hwc)) + flush_all = 0; + + sdbt = (unsigned long *) TEAR_REG(hwc); + done = event_overflow = sampl_overflow = num_sdb = 0; + while (!done) { + /* Get the trailer entry of the sample-data-block */ + te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); + + /* Leave loop if no more work to do (block full indicator) */ + if (!te->f) { + done = 1; + if (!flush_all) + break; + } + + /* Check the sample overflow count */ + if (te->overflow) + /* Account sample overflows and, if a particular limit + * is reached, extend the sampling buffer. + * For details, see sfb_account_overflows(). + */ + sampl_overflow += te->overflow; + + /* Timestamps are valid for full sample-data-blocks only */ + debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p " + "overflow=%llu timestamp=0x%llx\n", + sdbt, te->overflow, + (te->f) ? trailer_timestamp(te) : 0ULL); + + /* Collect all samples from a single sample-data-block and + * flag if an (perf) event overflow happened. If so, the PMU + * is stopped and remaining samples will be discarded. + */ + hw_collect_samples(event, sdbt, &event_overflow); + num_sdb++; + + /* Reset trailer (using compare-double-and-swap) */ + do { + te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; + te_flags |= SDB_TE_ALERT_REQ_MASK; + } while (!cmpxchg_double(&te->flags, &te->overflow, + te->flags, te->overflow, + te_flags, 0ULL)); + + /* Advance to next sample-data-block */ + sdbt++; + if (is_link_entry(sdbt)) + sdbt = get_next_sdbt(sdbt); + + /* Update event hardware registers */ + TEAR_REG(hwc) = (unsigned long) sdbt; + + /* Stop processing sample-data if all samples of the current + * sample-data-block were flushed even if it was not full. + */ + if (flush_all && done) + break; + + /* If an event overflow happened, discard samples by + * processing any remaining sample-data-blocks. + */ + if (event_overflow) + flush_all = 1; + } + + /* Account sample overflows in the event hardware structure */ + if (sampl_overflow) + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + + sampl_overflow, 1 + num_sdb); + if (sampl_overflow || event_overflow) + debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " + "overflow stats: sample=%llu event=%llu\n", + sampl_overflow, event_overflow); +} + +static void cpumsf_pmu_read(struct perf_event *event) +{ + /* Nothing to do ... updates are interrupt-driven */ +} + +/* Activate sampling control. + * Next call of pmu_enable() starts sampling. + */ +static void cpumsf_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + perf_pmu_disable(event->pmu); + event->hw.state = 0; + cpuhw->lsctl.cs = 1; + if (SAMPL_DIAG_MODE(&event->hw)) + cpuhw->lsctl.cd = 1; + perf_pmu_enable(event->pmu); +} + +/* Deactivate sampling control. + * Next call of pmu_enable() stops sampling. + */ +static void cpumsf_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + if (event->hw.state & PERF_HES_STOPPED) + return; + + perf_pmu_disable(event->pmu); + cpuhw->lsctl.cs = 0; + cpuhw->lsctl.cd = 0; + event->hw.state |= PERF_HES_STOPPED; + + if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { + hw_perf_event_update(event, 1); + event->hw.state |= PERF_HES_UPTODATE; + } + perf_pmu_enable(event->pmu); +} + +static int cpumsf_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + int err; + + if (cpuhw->flags & PMU_F_IN_USE) + return -EAGAIN; + + if (!cpuhw->sfb.sdbt) + return -EINVAL; + + err = 0; + perf_pmu_disable(event->pmu); + + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + /* Set up sampling controls. Always program the sampling register + * using the SDB-table start. Reset TEAR_REG event hardware register + * that is used by hw_perf_event_update() to store the sampling buffer + * position after samples have been flushed. + */ + cpuhw->lsctl.s = 0; + cpuhw->lsctl.h = 1; + cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; + cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; + cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); + hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); + + /* Ensure sampling functions are in the disabled state. If disabled, + * switch on sampling enable control. */ + if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { + err = -EAGAIN; + goto out; + } + cpuhw->lsctl.es = 1; + if (SAMPL_DIAG_MODE(&event->hw)) + cpuhw->lsctl.ed = 1; + + /* Set in_use flag and store event */ + event->hw.idx = 0; /* only one sampling event per CPU supported */ + cpuhw->event = event; + cpuhw->flags |= PMU_F_IN_USE; + + if (flags & PERF_EF_START) + cpumsf_pmu_start(event, PERF_EF_RELOAD); +out: + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); + return err; +} + +static void cpumsf_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + perf_pmu_disable(event->pmu); + cpumsf_pmu_stop(event, PERF_EF_UPDATE); + + cpuhw->lsctl.es = 0; + cpuhw->lsctl.ed = 0; + cpuhw->flags &= ~PMU_F_IN_USE; + cpuhw->event = NULL; + + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); +} + +static int cpumsf_pmu_event_idx(struct perf_event *event) +{ + return event->hw.idx; +} + +CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); +CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); + +static struct attribute *cpumsf_pmu_events_attr[] = { + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), + NULL, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cpumsf_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cpumsf_pmu_events_group = { + .name = "events", + .attrs = cpumsf_pmu_events_attr, +}; +static struct attribute_group cpumsf_pmu_format_group = { + .name = "format", + .attrs = cpumsf_pmu_format_attr, +}; +static const struct attribute_group *cpumsf_pmu_attr_groups[] = { + &cpumsf_pmu_events_group, + &cpumsf_pmu_format_group, + NULL, +}; + +static struct pmu cpumf_sampling = { + .pmu_enable = cpumsf_pmu_enable, + .pmu_disable = cpumsf_pmu_disable, + + .event_init = cpumsf_pmu_event_init, + .add = cpumsf_pmu_add, + .del = cpumsf_pmu_del, + + .start = cpumsf_pmu_start, + .stop = cpumsf_pmu_stop, + .read = cpumsf_pmu_read, + + .event_idx = cpumsf_pmu_event_idx, + .attr_groups = cpumsf_pmu_attr_groups, +}; + +static void cpumf_measurement_alert(struct ext_code ext_code, + unsigned int alert, unsigned long unused) +{ + struct cpu_hw_sf *cpuhw; + + if (!(alert & CPU_MF_INT_SF_MASK)) + return; + inc_irq_stat(IRQEXT_CMS); + cpuhw = &__get_cpu_var(cpu_hw_sf); + + /* Measurement alerts are shared and might happen when the PMU + * is not reserved. Ignore these alerts in this case. */ + if (!(cpuhw->flags & PMU_F_RESERVED)) + return; + + /* The processing below must take care of multiple alert events that + * might be indicated concurrently. */ + + /* Program alert request */ + if (alert & CPU_MF_INT_SF_PRA) { + if (cpuhw->flags & PMU_F_IN_USE) + hw_perf_event_update(cpuhw->event, 0); + else + WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); + } + + /* Report measurement alerts only for non-PRA codes */ + if (alert != CPU_MF_INT_SF_PRA) + debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert); + + /* Sampling authorization change request */ + if (alert & CPU_MF_INT_SF_SACA) + qsi(&cpuhw->qsi); + + /* Loss of sample data due to high-priority machine activities */ + if (alert & CPU_MF_INT_SF_LSDA) { + pr_err("Sample data was lost\n"); + cpuhw->flags |= PMU_F_ERR_LSDA; + sf_disable(); + } + + /* Invalid sampling buffer entry */ + if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { + pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", + alert); + cpuhw->flags |= PMU_F_ERR_IBE; + sf_disable(); + } +} + +static int cpumf_pmu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (long) hcpu; + int flags; + + /* Ignore the notification if no events are scheduled on the PMU. + * This might be racy... + */ + if (!atomic_read(&num_events)) + return NOTIFY_OK; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + flags = PMC_INIT; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + case CPU_DOWN_PREPARE: + flags = PMC_RELEASE; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) +{ + if (!cpum_sf_avail()) + return -ENODEV; + return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); +} + +static int param_set_sfb_size(const char *val, const struct kernel_param *kp) +{ + int rc; + unsigned long min, max; + + if (!cpum_sf_avail()) + return -ENODEV; + if (!val || !strlen(val)) + return -EINVAL; + + /* Valid parameter values: "min,max" or "max" */ + min = CPUM_SF_MIN_SDB; + max = CPUM_SF_MAX_SDB; + if (strchr(val, ',')) + rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; + else + rc = kstrtoul(val, 10, &max); + + if (min < 2 || min >= max || max > get_num_physpages()) + rc = -EINVAL; + if (rc) + return rc; + + sfb_set_limits(min, max); + pr_info("The sampling buffer limits have changed to: " + "min=%lu max=%lu (diag=x%lu)\n", + CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); + return 0; +} + +#define param_check_sfb_size(name, p) __param_check(name, p, void) +static struct kernel_param_ops param_ops_sfb_size = { + .set = param_set_sfb_size, + .get = param_get_sfb_size, +}; + +#define RS_INIT_FAILURE_QSI 0x0001 +#define RS_INIT_FAILURE_BSDES 0x0002 +#define RS_INIT_FAILURE_ALRT 0x0003 +#define RS_INIT_FAILURE_PERF 0x0004 +static void __init pr_cpumsf_err(unsigned int reason) +{ + pr_err("Sampling facility support for perf is not available: " + "reason=%04x\n", reason); +} + +static int __init init_cpum_sampling_pmu(void) +{ + struct hws_qsi_info_block si; + int err; + + if (!cpum_sf_avail()) + return -ENODEV; + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) { + pr_cpumsf_err(RS_INIT_FAILURE_QSI); + return -ENODEV; + } + + if (si.bsdes != sizeof(struct hws_basic_entry)) { + pr_cpumsf_err(RS_INIT_FAILURE_BSDES); + return -EINVAL; + } + + if (si.ad) + sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); + + sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); + if (!sfdbg) + pr_err("Registering for s390dbf failed\n"); + debug_register_view(sfdbg, &debug_sprintf_view); + + err = register_external_interrupt(0x1407, cpumf_measurement_alert); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_ALRT); + goto out; + } + + err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_PERF); + unregister_external_interrupt(0x1407, cpumf_measurement_alert); + goto out; + } + perf_cpu_notifier(cpumf_pmu_notifier); +out: + return err; +} +arch_initcall(init_cpum_sampling_pmu); +core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 2343c218b8f991..5d2dfa31c4efad 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -1,7 +1,7 @@ /* * Performance event support for s390x * - * Copyright IBM Corp. 2012 + * Copyright IBM Corp. 2012, 2013 * Author(s): Hendrik Brueckner * * This program is free software; you can redistribute it and/or modify @@ -16,15 +16,19 @@ #include #include #include +#include +#include +#include #include #include #include #include +#include const char *perf_pmu_name(void) { if (cpum_cf_avail() || cpum_sf_avail()) - return "CPU-measurement facilities (CPUMF)"; + return "CPU-Measurement Facilities (CPU-MF)"; return "pmu"; } EXPORT_SYMBOL(perf_pmu_name); @@ -35,6 +39,8 @@ int perf_num_counters(void) if (cpum_cf_avail()) num += PERF_CPUM_CF_MAX_CTR; + if (cpum_sf_avail()) + num += PERF_CPUM_SF_MAX_CTR; return num; } @@ -54,7 +60,7 @@ static bool is_in_guest(struct pt_regs *regs) { if (user_mode(regs)) return false; -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) return instruction_pointer(regs) == (unsigned long) &sie_exit; #else return false; @@ -83,8 +89,31 @@ static unsigned long perf_misc_guest_flags(struct pt_regs *regs) : PERF_RECORD_MISC_GUEST_KERNEL; } +static unsigned long perf_misc_flags_sf(struct pt_regs *regs) +{ + struct perf_sf_sde_regs *sde_regs; + unsigned long flags; + + sde_regs = (struct perf_sf_sde_regs *) ®s->int_parm_long; + if (sde_regs->in_guest) + flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER + : PERF_RECORD_MISC_GUEST_KERNEL; + else + flags = user_mode(regs) ? PERF_RECORD_MISC_USER + : PERF_RECORD_MISC_KERNEL; + return flags; +} + unsigned long perf_misc_flags(struct pt_regs *regs) { + /* Check if the cpum_sf PMU has created the pt_regs structure. + * In this case, perf misc flags can be easily extracted. Otherwise, + * do regular checks on the pt_regs content. + */ + if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA) + if (!regs->gprs[15]) + return perf_misc_flags_sf(regs); + if (is_in_guest(regs)) return perf_misc_guest_flags(regs); @@ -92,27 +121,107 @@ unsigned long perf_misc_flags(struct pt_regs *regs) : PERF_RECORD_MISC_KERNEL; } -void perf_event_print_debug(void) +void print_debug_cf(void) { struct cpumf_ctr_info cf_info; - unsigned long flags; - int cpu; - - if (!cpum_cf_avail()) - return; - - local_irq_save(flags); + int cpu = smp_processor_id(); - cpu = smp_processor_id(); memset(&cf_info, 0, sizeof(cf_info)); if (!qctri(&cf_info)) pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", cpu, cf_info.cfvn, cf_info.csvn, cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); +} + +static void print_debug_sf(void) +{ + struct hws_qsi_info_block si; + int cpu = smp_processor_id(); + memset(&si, 0, sizeof(si)); + if (qsi(&si)) + return; + + pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n", + cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate, + si.cpu_speed); + + if (si.as) + pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i" + " bsdes=%i tear=%016lx dear=%016lx\n", cpu, + si.as, si.es, si.cs, si.bsdes, si.tear, si.dear); + if (si.ad) + pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i" + " dsdes=%i tear=%016lx dear=%016lx\n", cpu, + si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear); +} + +void perf_event_print_debug(void) +{ + unsigned long flags; + + local_irq_save(flags); + if (cpum_cf_avail()) + print_debug_cf(); + if (cpum_sf_avail()) + print_debug_sf(); local_irq_restore(flags); } +/* Service level infrastructure */ +static void sl_print_counter(struct seq_file *m) +{ + struct cpumf_ctr_info ci; + + memset(&ci, 0, sizeof(ci)); + if (qctri(&ci)) + return; + + seq_printf(m, "CPU-MF: Counter facility: version=%u.%u " + "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl); +} + +static void sl_print_sampling(struct seq_file *m) +{ + struct hws_qsi_info_block si; + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) + return; + + if (!si.as && !si.ad) + return; + + seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu" + " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate, + si.cpu_speed); + if (si.as) + seq_printf(m, "CPU-MF: Sampling facility: mode=basic" + " sample_size=%u\n", si.bsdes); + if (si.ad) + seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic" + " sample_size=%u\n", si.dsdes); +} + +static void service_level_perf_print(struct seq_file *m, + struct service_level *sl) +{ + if (cpum_cf_avail()) + sl_print_counter(m); + if (cpum_sf_avail()) + sl_print_sampling(m); +} + +static struct service_level service_level_perf = { + .seq_print = service_level_perf_print, +}; + +static int __init service_level_perf_register(void) +{ + return register_service_level(&service_level_perf); +} +arch_initcall(service_level_perf_register); + /* See also arch/s390/kernel/traps.c */ static unsigned long __store_trace(struct perf_callchain_entry *entry, unsigned long sp, @@ -172,3 +281,44 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, __store_trace(entry, head, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); } + +/* Perf defintions for PMU event attributes in sysfs */ +ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%04llx,name=%s\n", + pmu_attr->id, attr->attr.name); +} + +/* Reserve/release functions for sharing perf hardware */ +static DEFINE_SPINLOCK(perf_hw_owner_lock); +static void *perf_sampling_owner; + +int perf_reserve_sampling(void) +{ + int err; + + err = 0; + spin_lock(&perf_hw_owner_lock); + if (perf_sampling_owner) { + pr_warn("The sampling facility is already reserved by %p\n", + perf_sampling_owner); + err = -EBUSY; + } else + perf_sampling_owner = __builtin_return_address(0); + spin_unlock(&perf_hw_owner_lock); + return err; +} +EXPORT_SYMBOL(perf_reserve_sampling); + +void perf_release_sampling(void) +{ + spin_lock(&perf_hw_owner_lock); + WARN_ON(!perf_sampling_owner); + perf_sampling_owner = NULL; + spin_unlock(&perf_hw_owner_lock); +} +EXPORT_SYMBOL(perf_release_sampling); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 7ed0d4e2a43545..dd145321d2151d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -261,20 +261,18 @@ static inline unsigned long brk_rnd(void) unsigned long arch_randomize_brk(struct mm_struct *mm) { - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); + unsigned long ret; - if (ret < mm->brk) - return mm->brk; - return ret; + ret = PAGE_ALIGN(mm->brk + brk_rnd()); + return (ret > mm->brk) ? ret : mm->brk; } unsigned long randomize_et_dyn(unsigned long base) { - unsigned long ret = PAGE_ALIGN(base + brk_rnd()); + unsigned long ret; if (!(current->flags & PF_RANDOMIZE)) return base; - if (ret < base) - return base; - return ret; + ret = PAGE_ALIGN(base + brk_rnd()); + return (ret > base) ? ret : base; } diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index e65c91c591e8b9..f6be6087a0e98e 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -56,25 +56,26 @@ void update_cr_regs(struct task_struct *task) #ifdef CONFIG_64BIT /* Take care of the enable/disable of transactional execution. */ if (MACHINE_HAS_TE) { - unsigned long cr[3], cr_new[3]; + unsigned long cr, cr_new; - __ctl_store(cr, 0, 2); - cr_new[1] = cr[1]; + __ctl_store(cr, 0, 0); /* Set or clear transaction execution TXC bit 8. */ + cr_new = cr | (1UL << 55); if (task->thread.per_flags & PER_FLAG_NO_TE) - cr_new[0] = cr[0] & ~(1UL << 55); - else - cr_new[0] = cr[0] | (1UL << 55); + cr_new &= ~(1UL << 55); + if (cr_new != cr) + __ctl_load(cr, 0, 0); /* Set or clear transaction execution TDC bits 62 and 63. */ - cr_new[2] = cr[2] & ~3UL; + __ctl_store(cr, 2, 2); + cr_new = cr & ~3UL; if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) - cr_new[2] |= 1UL; + cr_new |= 1UL; else - cr_new[2] |= 2UL; + cr_new |= 2UL; } - if (memcmp(&cr_new, &cr, sizeof(cr))) - __ctl_load(cr_new, 0, 2); + if (cr_new != cr) + __ctl_load(cr_new, 2, 2); } #endif /* Copy user specified PER registers */ @@ -107,15 +108,11 @@ void update_cr_regs(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { set_tsk_thread_flag(task, TIF_SINGLE_STEP); - if (task == current) - update_cr_regs(task); } void user_disable_single_step(struct task_struct *task) { clear_tsk_thread_flag(task, TIF_SINGLE_STEP); - if (task == current) - update_cr_regs(task); } /* diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 3bac589844a747..9f60467938d177 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -5,7 +5,7 @@ #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL(sie64a); EXPORT_SYMBOL(sie_exit); #endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4444875266ee02..09e2f468f48bc8 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -373,7 +373,7 @@ static void __init setup_lowcore(void) /* * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant - * restart data to the absolute zero lowcore. This is necesary if + * restart data to the absolute zero lowcore. This is necessary if * PSW restart is done on an offline CPU that has lowcore zero. */ lc->restart_stack = (unsigned long) restart_stack; @@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p) setup_vmcoreinfo(); setup_lowcore(); + smp_fill_possible_mask(); cpu_init(); s390_init_cpu_topology(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index dc4a534650604a..a7125b62a9a6c1 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -59,7 +59,7 @@ enum { }; struct pcpu { - struct cpu cpu; + struct cpu *cpu; struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ unsigned long async_stack; /* async stack for the cpu */ unsigned long panic_stack; /* panic stack for the cpu */ @@ -159,9 +159,9 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) { int order; - set_bit(ec_bit, &pcpu->ec_mask); - order = pcpu_running(pcpu) ? - SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; + if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) + return; + order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; pcpu_sigp_retry(pcpu, order, 0); } @@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) return 0; } -static int __init setup_possible_cpus(char *s) -{ - int max, cpu; +static unsigned int setup_possible_cpus __initdata; - if (kstrtoint(s, 0, &max) < 0) - return 0; - init_cpu_possible(cpumask_of(0)); - for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) - set_cpu_possible(cpu, true); +static int __init _setup_possible_cpus(char *s) +{ + get_option(&s, &setup_possible_cpus); return 0; } -early_param("possible_cpus", setup_possible_cpus); +early_param("possible_cpus", _setup_possible_cpus); #ifdef CONFIG_HOTPLUG_CPU @@ -775,6 +771,17 @@ void __noreturn cpu_die(void) #endif /* CONFIG_HOTPLUG_CPU */ +void __init smp_fill_possible_mask(void) +{ + unsigned int possible, cpu; + + possible = setup_possible_cpus; + if (!possible) + possible = MACHINE_IS_VM ? 64 : nr_cpu_ids; + for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) + set_cpu_possible(cpu, true); +} + void __init smp_prepare_cpus(unsigned int max_cpus) { /* request the 0x1201 emergency signal external interrupt */ @@ -958,7 +965,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; - struct cpu *c = &pcpu_devices[cpu].cpu; + struct cpu *c = pcpu_devices[cpu].cpu; struct device *s = &c->dev; int err = 0; @@ -975,10 +982,15 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, static int smp_add_present_cpu(int cpu) { - struct cpu *c = &pcpu_devices[cpu].cpu; - struct device *s = &c->dev; + struct device *s; + struct cpu *c; int rc; + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + pcpu_devices[cpu].cpu = c; + s = &c->dev; c->hotpluggable = 1; rc = register_cpu(c, cpu); if (rc) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 2440602e6df1e1..d101dae62771c5 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -275,7 +275,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } else { /* - * Set condition code 3 to stop the guest from issueing channel + * Set condition code 3 to stop the guest from issuing channel * I/O instructions. */ kvm_s390_set_psw_cc(vcpu, 3); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dbdab3e7a1a626..0632dc50da78b8 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -74,8 +74,8 @@ static size_t copy_in_kernel(size_t count, void __user *to, /* * Returns kernel address for user virtual address. If the returned address is - * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address - * contains the (negative) exception code. + * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the + * address contains the (negative) exception code. */ #ifdef CONFIG_64BIT diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index e794c88f699a45..3584ed9b20a183 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -293,7 +293,7 @@ static int gmap_alloc_table(struct gmap *gmap, * @addr: address in the guest address space * @len: length of the memory area to unmap * - * Returns 0 if the unmap succeded, -EINVAL if not. + * Returns 0 if the unmap succeeded, -EINVAL if not. */ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) { @@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment); * @from: source address in the parent address space * @to: target address in the guest address space * - * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. + * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not. */ int gmap_map_segment(struct gmap *gmap, unsigned long from, unsigned long to, unsigned long len) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 16871da3737164..708d60e4006676 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); /* lhi %r4,0 */ EMIT4(0xa7480000); - /* dr %r4,%r12 */ - EMIT2(0x1d4c); + /* dlr %r4,%r12 */ + EMIT4(0xb997004c); break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */ - /* m %r4,(%r13) */ - EMIT4_DISP(0x5c40d000, EMIT_CONST(K)); - /* lr %r5,%r4 */ - EMIT2(0x1854); + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; + /* lhi %r4,0 */ + EMIT4(0xa7480000); + /* dl %r4,(%r13) */ + EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); break; case BPF_S_ALU_MOD_X: /* A %= X */ jit->seen |= SEEN_XREG | SEEN_RET0; @@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); /* lhi %r4,0 */ EMIT4(0xa7480000); - /* dr %r4,%r12 */ - EMIT2(0x1d4c); + /* dlr %r4,%r12 */ + EMIT4(0xb997004c); /* lr %r5,%r4 */ EMIT2(0x1854); break; case BPF_S_ALU_MOD_K: /* A %= K */ + if (K == 1) { + /* lhi %r5,0 */ + EMIT4(0xa7580000); + break; + } /* lhi %r4,0 */ EMIT4(0xa7480000); - /* d %r4,(%r13) */ - EMIT4_DISP(0x5d40d000, EMIT_CONST(K)); + /* dl %r4,(%r13) */ + EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); /* lr %r5,%r4 */ EMIT2(0x1854); break; diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 231cecafc2f147..a32c96761eab5b 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c @@ -26,9 +26,6 @@ #define MAX_NUM_SDB 511 #define MIN_NUM_SDB 1 -#define ALERT_REQ_MASK 0x4000000000000000ul -#define BUFFER_FULL_MASK 0x8000000000000000ul - DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); struct hws_execute_parms { @@ -44,6 +41,7 @@ static DEFINE_MUTEX(hws_sem_oom); static unsigned char hws_flush_all; static unsigned int hws_oom; +static unsigned int hws_alert; static struct workqueue_struct *hws_wq; static unsigned int hws_state; @@ -65,43 +63,6 @@ static unsigned long interval; static unsigned long min_sampler_rate; static unsigned long max_sampler_rate; -static int ssctl(void *buffer) -{ - int cc; - - /* set in order to detect a program check */ - cc = 1; - - asm volatile( - "0: .insn s,0xB2870000,0(%1)\n" - "1: ipm %0\n" - " srl %0,28\n" - "2:\n" - EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) - : "+d" (cc), "+a" (buffer) - : "m" (*((struct hws_ssctl_request_block *)buffer)) - : "cc", "memory"); - - return cc ? -EINVAL : 0 ; -} - -static int qsi(void *buffer) -{ - int cc; - cc = 1; - - asm volatile( - "0: .insn s,0xB2860000,0(%1)\n" - "1: lhi %0,0\n" - "2:\n" - EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) - : "=d" (cc), "+a" (buffer) - : "m" (*((struct hws_qsi_info_block *)buffer)) - : "cc", "memory"); - - return cc ? -EINVAL : 0; -} - static void execute_qsi(void *parms) { struct hws_execute_parms *ep = parms; @@ -113,7 +74,7 @@ static void execute_ssctl(void *parms) { struct hws_execute_parms *ep = parms; - ep->rc = ssctl(ep->buffer); + ep->rc = lsctl(ep->buffer); } static int smp_ctl_ssctl_stop(int cpu) @@ -214,17 +175,6 @@ static int smp_ctl_qsi(int cpu) return ep.rc; } -static inline unsigned long *trailer_entry_ptr(unsigned long v) -{ - void *ret; - - ret = (void *)v; - ret += PAGE_SIZE; - ret -= sizeof(struct hws_trailer_entry); - - return (unsigned long *) ret; -} - static void hws_ext_handler(struct ext_code ext_code, unsigned int param32, unsigned long param64) { @@ -233,6 +183,9 @@ static void hws_ext_handler(struct ext_code ext_code, if (!(param32 & CPU_MF_INT_SF_MASK)) return; + if (!hws_alert) + return; + inc_irq_stat(IRQEXT_CMS); atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); @@ -256,16 +209,6 @@ static void init_all_cpu_buffers(void) } } -static int is_link_entry(unsigned long *s) -{ - return *s & 0x1ul ? 1 : 0; -} - -static unsigned long *get_next_sdbt(unsigned long *s) -{ - return (unsigned long *) (*s & ~0x1ul); -} - static int prepare_cpu_buffers(void) { int cpu; @@ -353,7 +296,7 @@ static int allocate_sdbt(int cpu) } *sdbt = sdb; trailer = trailer_entry_ptr(*sdbt); - *trailer = ALERT_REQ_MASK; + *trailer = SDB_TE_ALERT_REQ_MASK; sdbt++; mutex_unlock(&hws_sem_oom); } @@ -829,7 +772,7 @@ static void worker_on_interrupt(unsigned int cpu) trailer = trailer_entry_ptr(*sdbt); /* leave loop if no more work to do */ - if (!(*trailer & BUFFER_FULL_MASK)) { + if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) { done = 1; if (!hws_flush_all) continue; @@ -856,7 +799,7 @@ static void worker_on_interrupt(unsigned int cpu) static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, unsigned long *dear) { - struct hws_data_entry *sample_data_ptr; + struct hws_basic_entry *sample_data_ptr; unsigned long *trailer; trailer = trailer_entry_ptr(*sdbt); @@ -866,7 +809,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, trailer = dear; } - sample_data_ptr = (struct hws_data_entry *)(*sdbt); + sample_data_ptr = (struct hws_basic_entry *)(*sdbt); while ((unsigned long *)sample_data_ptr < trailer) { struct pt_regs *regs = NULL; @@ -1002,6 +945,7 @@ int hwsampler_deallocate(void) goto deallocate_exit; irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + hws_alert = 0; deallocate_sdbt(); hws_state = HWS_DEALLOCATED; @@ -1116,6 +1060,7 @@ int hwsampler_shutdown(void) if (hws_state == HWS_STOPPED) { irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + hws_alert = 0; deallocate_sdbt(); } if (hws_wq) { @@ -1190,6 +1135,7 @@ int hwsampler_start_all(unsigned long rate) hws_oom = 1; hws_flush_all = 0; /* now let them in, 1407 CPUMF external interrupts */ + hws_alert = 1; irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); return 0; diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h index 0022e1ebfbde90..a483d06f2fa78e 100644 --- a/arch/s390/oprofile/hwsampler.h +++ b/arch/s390/oprofile/hwsampler.h @@ -9,27 +9,7 @@ #define HWSAMPLER_H_ #include - -struct hws_qsi_info_block /* QUERY SAMPLING information block */ -{ /* Bit(s) */ - unsigned int b0_13:14; /* 0-13: zeros */ - unsigned int as:1; /* 14: sampling authorisation control*/ - unsigned int b15_21:7; /* 15-21: zeros */ - unsigned int es:1; /* 22: sampling enable control */ - unsigned int b23_29:7; /* 23-29: zeros */ - unsigned int cs:1; /* 30: sampling activation control */ - unsigned int:1; /* 31: reserved */ - unsigned int bsdes:16; /* 4-5: size of sampling entry */ - unsigned int:16; /* 6-7: reserved */ - unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ - unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ - unsigned long tear; /* 24-31: TEAR contents */ - unsigned long dear; /* 32-39: DEAR contents */ - unsigned int rsvrd0; /* 40-43: reserved */ - unsigned int cpu_speed; /* 44-47: CPU speed */ - unsigned long long rsvrd1; /* 48-55: reserved */ - unsigned long long rsvrd2; /* 56-63: reserved */ -}; +#include struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */ { /* bytes 0 - 7 Bit(s) */ @@ -68,36 +48,6 @@ struct hws_cpu_buffer { unsigned int stop_mode:1; }; -struct hws_data_entry { - unsigned int def:16; /* 0-15 Data Entry Format */ - unsigned int R:4; /* 16-19 reserved */ - unsigned int U:4; /* 20-23 Number of unique instruct. */ - unsigned int z:2; /* zeros */ - unsigned int T:1; /* 26 PSW DAT mode */ - unsigned int W:1; /* 27 PSW wait state */ - unsigned int P:1; /* 28 PSW Problem state */ - unsigned int AS:2; /* 29-30 PSW address-space control */ - unsigned int I:1; /* 31 entry valid or invalid */ - unsigned int:16; - unsigned int prim_asn:16; /* primary ASN */ - unsigned long long ia; /* Instruction Address */ - unsigned long long gpp; /* Guest Program Parameter */ - unsigned long long hpp; /* Host Program Parameter */ -}; - -struct hws_trailer_entry { - unsigned int f:1; /* 0 - Block Full Indicator */ - unsigned int a:1; /* 1 - Alert request control */ - unsigned long:62; /* 2 - 63: Reserved */ - unsigned long overflow; /* 64 - sample Overflow count */ - unsigned long timestamp; /* 16 - time-stamp */ - unsigned long timestamp1; /* */ - unsigned long reserved1; /* 32 -Reserved */ - unsigned long reserved2; /* */ - unsigned long progusage1; /* 48 - reserved for programming use */ - unsigned long progusage2; /* */ -}; - int hwsampler_setup(void); int hwsampler_shutdown(void); int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 04e1b6a8536297..9ffe645d59898c 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -67,6 +68,21 @@ module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" "(report cpu_type \"timer\""); +static int __oprofile_hwsampler_start(void) +{ + int retval; + + retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); + if (retval) + return retval; + + retval = hwsampler_start_all(oprofile_hw_interval); + if (retval) + hwsampler_deallocate(); + + return retval; +} + static int oprofile_hwsampler_start(void) { int retval; @@ -76,13 +92,13 @@ static int oprofile_hwsampler_start(void) if (!hwsampler_running) return timer_ops.start(); - retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); + retval = perf_reserve_sampling(); if (retval) return retval; - retval = hwsampler_start_all(oprofile_hw_interval); + retval = __oprofile_hwsampler_start(); if (retval) - hwsampler_deallocate(); + perf_release_sampling(); return retval; } @@ -96,6 +112,7 @@ static void oprofile_hwsampler_stop(void) hwsampler_stop_all(); hwsampler_deallocate(); + perf_release_sampling(); return; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index bf7c73d71eef1e..0820362c7b0f19 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -919,17 +919,23 @@ static void zpci_mem_exit(void) kmem_cache_destroy(zdev_fmb_cache); } -static unsigned int s390_pci_probe; +static unsigned int s390_pci_probe = 1; +static unsigned int s390_pci_initialized; char * __init pcibios_setup(char *str) { - if (!strcmp(str, "on")) { - s390_pci_probe = 1; + if (!strcmp(str, "off")) { + s390_pci_probe = 0; return NULL; } return str; } +bool zpci_is_enabled(void) +{ + return s390_pci_initialized; +} + static int __init pci_base_init(void) { int rc; @@ -961,6 +967,7 @@ static int __init pci_base_init(void) if (rc) goto out_find; + s390_pci_initialized = 1; return 0; out_find: @@ -978,5 +985,6 @@ subsys_initcall_sync(pci_base_init); void zpci_rescan(void) { - clp_rescan_pci_devices_simple(); + if (zpci_is_enabled()) + clp_rescan_pci_devices_simple(); } diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 9b83d080902dfb..60c11a629d96d0 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -285,7 +285,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, flags |= ZPCI_TABLE_PROTECTED; if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { - atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); + atomic64_add(nr_pages, &zdev->fmb->mapped_pages); return dma_addr + (offset & ~PAGE_MASK); } @@ -313,7 +313,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, zpci_err_hex(&dma_addr, sizeof(dma_addr)); } - atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); + atomic64_add(npages, &zdev->fmb->unmapped_pages); iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; dma_free_iommu(zdev, iommu_page_index, npages); } @@ -332,7 +332,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size, if (!page) return NULL; - atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages); pa = page_to_phys(page); memset((void *) pa, 0, size); @@ -343,6 +342,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, return NULL; } + atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages); if (dma_handle) *dma_handle = map; return (void *) pa; @@ -352,8 +352,11 @@ static void s390_dma_free(struct device *dev, size_t size, void *pa, dma_addr_t dma_handle, struct dma_attrs *attrs) { - s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size), - DMA_BIDIRECTIONAL, NULL); + struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + + size = PAGE_ALIGN(size); + atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages); + s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); free_pages((unsigned long) pa, get_order(size)); } diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 800f064b0da7c9..01e251b1da0cb8 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -43,9 +43,8 @@ struct zpci_ccdf_avail { u16 pec; /* PCI event code */ } __packed; -void zpci_event_error(void *data) +static void __zpci_event_error(struct zpci_ccdf_err *ccdf) { - struct zpci_ccdf_err *ccdf = data; struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); zpci_err("error CCDF:\n"); @@ -58,9 +57,14 @@ void zpci_event_error(void *data) pci_name(zdev->pdev), ccdf->pec, ccdf->fid); } -void zpci_event_availability(void *data) +void zpci_event_error(void *data) +{ + if (zpci_is_enabled()) + __zpci_event_error(data); +} + +static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { - struct zpci_ccdf_avail *ccdf = data; struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); struct pci_dev *pdev = zdev ? zdev->pdev : NULL; int ret; @@ -75,6 +79,7 @@ void zpci_event_availability(void *data) if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) break; zdev->state = ZPCI_FN_STATE_CONFIGURED; + zdev->fh = ccdf->fh; ret = zpci_enable_device(zdev); if (ret) break; @@ -98,9 +103,14 @@ void zpci_event_availability(void *data) break; case 0x0304: /* Configured -> Standby */ - if (pdev) + if (pdev) { + /* Give the driver a hint that the function is + * already unusable. */ + pdev->error_state = pci_channel_io_perm_failure; pci_stop_and_remove_bus_device(pdev); + } + zdev->fh = ccdf->fh; zpci_disable_device(zdev); zdev->state = ZPCI_FN_STATE_STANDBY; break; @@ -108,6 +118,8 @@ void zpci_event_availability(void *data) clp_rescan_pci_devices(); break; case 0x0308: /* Standby -> Reserved */ + if (!zdev) + break; pci_stop_root_bus(zdev->bus); pci_remove_root_bus(zdev->bus); break; @@ -115,3 +127,9 @@ void zpci_event_availability(void *data) break; } } + +void zpci_event_availability(void *data) +{ + if (zpci_is_enabled()) + __zpci_event_availability(data); +} diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index f3414ade77a37c..fe7471eb0167cb 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -1,6 +1,7 @@ header-y += +generic-y += barrier.h generic-y += clkdev.h generic-y += trace_clock.h generic-y += xor.h diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h deleted file mode 100644 index 0eacb6471e6dab..00000000000000 --- a/arch/score/include/asm/barrier.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _ASM_SCORE_BARRIER_H -#define _ASM_SCORE_BARRIER_H - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#define set_mb(var, value) do {var = value; wmb(); } while (0) - -#endif /* _ASM_SCORE_BARRIER_H */ diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9b0979f4df7a53..ce298317a73e5d 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -66,6 +66,7 @@ config SUPERH32 select PERF_EVENTS select ARCH_HIBERNATION_POSSIBLE if MMU select SPARSE_IRQ + select HAVE_CC_STACKPROTECTOR config SUPERH64 def_bool ARCH = "sh64" @@ -695,20 +696,6 @@ config SECCOMP If unsure, say N. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - depends on SUPERH32 - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above. - config SMP bool "Symmetric multi-processing support" depends on SYS_SUPPORTS_SMP diff --git a/arch/sh/Makefile b/arch/sh/Makefile index aed701c7b11bb0..d4d16e4be07c24 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -199,10 +199,6 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y) KBUILD_CFLAGS += -fasynchronous-unwind-tables endif -ifeq ($(CONFIG_CC_STACKPROTECTOR),y) - KBUILD_CFLAGS += -fstack-protector -endif - libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index 72c103dae30099..43715308b06874 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h @@ -26,29 +26,14 @@ #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() -#define wmb() __asm__ __volatile__ ("synco": : :"memory") +#define wmb() mb() #define ctrl_barrier() __icbi(PAGE_OFFSET) -#define read_barrier_depends() do { } while(0) #else -#define mb() __asm__ __volatile__ ("": : :"memory") -#define rmb() mb() -#define wmb() __asm__ __volatile__ ("": : :"memory") #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") -#define read_barrier_depends() do { } while(0) -#endif - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) #endif #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) +#include + #endif /* __ASM_SH_BARRIER_H */ diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 2a0a596ebf67d9..d77f2f6c7ff076 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(empty_zero_page); +#ifdef CONFIG_FLATMEM +/* need in pfn_valid macro */ +EXPORT_SYMBOL(min_low_pfn); +EXPORT_SYMBOL(max_low_pfn); +#endif #define DECLARE_EXPORT(name) \ extern void name(void);EXPORT_SYMBOL(name) diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h index c1b76654ee76a8..ae69eda288f417 100644 --- a/arch/sparc/include/asm/barrier_32.h +++ b/arch/sparc/include/asm/barrier_32.h @@ -1,15 +1,7 @@ #ifndef __SPARC_BARRIER_H #define __SPARC_BARRIER_H -/* XXX Change this if we ever use a PSO mode kernel. */ -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) -#define smp_mb() __asm__ __volatile__("":::"memory") -#define smp_rmb() __asm__ __volatile__("":::"memory") -#define smp_wmb() __asm__ __volatile__("":::"memory") -#define smp_read_barrier_depends() do { } while(0) +#include /* for nop() */ +#include #endif /* !(__SPARC_BARRIER_H) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 95d45986f908d4..b5aad964558e75 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define smp_read_barrier_depends() do { } while(0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* !(__SPARC64_BARRIER_H) */ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index e562d3caee5745..ad7e178337f12f 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long); extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *str, long n); -#define __copy_to_user_inatomic ___copy_to_user -#define __copy_from_user_inatomic ___copy_from_user +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user struct pt_regs; extern unsigned long compute_effective_address(struct pt_regs *, diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 070ed141aac797..76663b019eb520 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask) return 1; #ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) + if (dev_is_pci(dev)) return pci64_dma_supported(to_pci_dev(dev), device_mask); #endif diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2096468de9b27f..e7e215dfa86668 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops); */ int dma_supported(struct device *dev, u64 mask) { -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) + if (dev_is_pci(dev)) return 1; -#endif + return 0; } EXPORT_SYMBOL(dma_supported); diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 60b19f50c80a8f..b45fe3fb4d2cfb 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b66a5338231e96..b085311dcd0ea9 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -123,11 +123,12 @@ void smp_callin(void) rmb(); set_cpu_online(cpuid, true); - local_irq_enable(); /* idle thread is expected to have preempt disabled */ preempt_disable(); + local_irq_enable(); + cpu_startup_entry(CPUHP_ONLINE); } diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 218b6b23c378f8..01fe9946d388de 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_ALU_MUL_K: /* A *= K */ emit_alu_K(MUL, K); break; - case BPF_S_ALU_DIV_K: /* A /= K */ - emit_alu_K(MUL, K); - emit_read_y(r_A); + case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ + if (K == 1) + break; + emit_write_y(G0); +#ifdef CONFIG_SPARC32 + /* The Sparc v8 architecture requires + * three instructions between a %y + * register write and the first use. + */ + emit_nop(); + emit_nop(); + emit_nop(); +#endif + emit_alu_K(DIV, K); break; case BPF_S_ALU_DIV_X: /* A /= X; */ emit_cmpi(r_X, 0); diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index a9a73da5865d4c..b5a05d050a8f6d 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -22,59 +22,6 @@ #include #include -/* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - */ -#define read_barrier_depends() do { } while (0) - #define __sync() __insn_mf() #include @@ -125,20 +72,7 @@ mb_incoherent(void) #define mb() fast_mb() #define iob() fast_iob() -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#endif - -#define set_mb(var, value) \ - do { var = value; mb(); } while (0) +#include #endif /* !__ASSEMBLY__ */ #endif /* _ASM_TILE_BARRIER_H */ diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h index a6620e5336b68f..83d6a520f4bd7a 100644 --- a/arch/unicore32/include/asm/barrier.h +++ b/arch/unicore32/include/asm/barrier.h @@ -14,15 +14,6 @@ #define dsb() __asm__ __volatile__ ("" : : : "memory") #define dmb() __asm__ __volatile__ ("" : : : "memory") -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#include #endif /* __UNICORE_BARRIER_H__ */ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0952ecd60ecaf7..cd18b839340034 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -125,6 +125,7 @@ config X86 select RTC_LIB select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 + select HAVE_CC_STACKPROTECTOR config INSTRUCTION_DECODER def_bool y @@ -438,42 +439,26 @@ config X86_INTEL_CE This option compiles in support for the CE4100 SOC for settop boxes and media devices. -config X86_WANT_INTEL_MID +config X86_INTEL_MID bool "Intel MID platform support" depends on X86_32 depends on X86_EXTENDED_PLATFORM - ---help--- - Select to build a kernel capable of supporting Intel MID platform - systems which do not have the PCI legacy interfaces (Moorestown, - Medfield). If you are building for a PC class system say N here. - -if X86_WANT_INTEL_MID - -config X86_INTEL_MID - bool - -config X86_MDFLD - bool "Medfield MID platform" depends on PCI depends on PCI_GOANY depends on X86_IO_APIC - select X86_INTEL_MID select SFI + select I2C select DW_APB_TIMER select APB_TIMER - select I2C - select SPI select INTEL_SCU_IPC - select X86_PLATFORM_DEVICES select MFD_INTEL_MSIC ---help--- - Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin - Internet Device(MID) platform. - Unlike standard x86 PCs, Medfield does not have many legacy devices - nor standard legacy replacement devices/features. e.g. Medfield does - not contain i8259, i8254, HPET, legacy BIOS, most of the io ports. + Select to build a kernel capable of supporting Intel MID (Mobile + Internet Device) platform systems which do not have the PCI legacy + interfaces. If you are building for a PC class system say N here. -endif + Intel MID platforms are based on an Intel processor and chipset which + consume less power than most of the x86 derivatives. config X86_INTEL_LPSS bool "Intel Low Power Subsystem Support" @@ -1080,10 +1065,6 @@ config MICROCODE_OLD_INTERFACE def_bool y depends on MICROCODE -config MICROCODE_INTEL_LIB - def_bool y - depends on MICROCODE_INTEL - config MICROCODE_INTEL_EARLY def_bool n @@ -1617,22 +1598,6 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection" - ---help--- - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above, or a distribution - gcc with the feature backported. Older versions are automatically - detected and for those versions, this configuration option is - ignored. (and a warning is printed during bootup) - source kernel/Kconfig.hz config KEXEC @@ -1728,16 +1693,67 @@ config RELOCATABLE Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address it has been loaded at and the compile time physical address - (CONFIG_PHYSICAL_START) is ignored. + (CONFIG_PHYSICAL_START) is used as the minimum location. -# Relocation on x86-32 needs some additional build support +config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + depends on RELOCATABLE + depends on !HIBERNATION + default n + ---help--- + Randomizes the physical and virtual address at which the + kernel image is decompressed, as a security feature that + deters exploit attempts relying on knowledge of the location + of kernel internals. + + Entropy is generated using the RDRAND instruction if it is + supported. If RDTSC is supported, it is used as well. If + neither RDRAND nor RDTSC are supported, then randomness is + read from the i8254 timer. + + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET, + and aligned according to PHYSICAL_ALIGN. Since the kernel is + built using 2GiB addressing, and PHYSICAL_ALGIN must be at a + minimum of 2MiB, only 10 bits of entropy is theoretically + possible. At best, due to page table layouts, 64-bit can use + 9 bits of entropy and 32-bit uses 8 bits. + + If unsure, say N. + +config RANDOMIZE_BASE_MAX_OFFSET + hex "Maximum kASLR offset allowed" if EXPERT + depends on RANDOMIZE_BASE + range 0x0 0x20000000 if X86_32 + default "0x20000000" if X86_32 + range 0x0 0x40000000 if X86_64 + default "0x40000000" if X86_64 + ---help--- + The lesser of RANDOMIZE_BASE_MAX_OFFSET and available physical + memory is used to determine the maximal offset in bytes that will + be applied to the kernel when kernel Address Space Layout + Randomization (kASLR) is active. This must be a multiple of + PHYSICAL_ALIGN. + + On 32-bit this is limited to 512MiB by page table layouts. The + default is 512MiB. + + On 64-bit this is limited by how the kernel fixmap page table is + positioned, so this cannot be larger than 1GiB currently. Without + RANDOMIZE_BASE, there is a 512MiB to 1.5GiB split between kernel + and modules. When RANDOMIZE_BASE_MAX_OFFSET is above 512MiB, the + modules area will shrink to compensate, up to the current maximum + 1GiB to 1GiB split. The default is 1GiB. + + If unsure, leave at the default value. + +# Relocation on x86 needs some additional build support config X86_NEED_RELOCS def_bool y - depends on X86_32 && RELOCATABLE + depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE) config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" - default "0x1000000" + default "0x200000" range 0x2000 0x1000000 if X86_32 range 0x200000 0x1000000 if X86_64 ---help--- @@ -2393,6 +2409,14 @@ config X86_DMA_REMAP bool depends on STA2X11 +config IOSF_MBI + bool + depends on PCI + ---help--- + To be selected by modules requiring access to the Intel OnChip System + Fabric (IOSF) Sideband MailBox Interface (MBI). For MBI platforms + enumerable by PCI. + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 57d021507120ed..13b22e0f681dbe 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -89,13 +89,11 @@ else KBUILD_CFLAGS += -maccumulate-outgoing-args endif +# Make sure compiler does not have buggy stack-protector support. ifdef CONFIG_CC_STACKPROTECTOR cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh - ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) - stackp-y := -fstack-protector - KBUILD_CFLAGS += $(stackp-y) - else - $(warning stack protector enabled but no compiler support) + ifneq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) + $(warning stack-protector enabled but compiler support broken) endif endif diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index d9c11956fce0e9..de706691800521 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -20,7 +20,7 @@ targets := vmlinux.bin setup.bin setup.elf bzImage targets += fdimage fdimage144 fdimage288 image.iso mtools.conf subdir- := compressed -setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpucheck.o +setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o setup-y += video-mode.o version.o diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S index 1dfbf64e52a2b0..d401b4a262b09a 100644 --- a/arch/x86/boot/bioscall.S +++ b/arch/x86/boot/bioscall.S @@ -1,6 +1,6 @@ /* ----------------------------------------------------------------------- * - * Copyright 2009 Intel Corporation; author H. Peter Anvin + * Copyright 2009-2014 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2 or (at your @@ -13,8 +13,8 @@ * touching registers they shouldn't be. */ - .code16gcc - .text + .code16 + .section ".inittext","ax" .globl intcall .type intcall, @function intcall: diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index ef72baeff48475..50f8c5e0f37e1d 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -26,9 +26,8 @@ #include #include #include "bitops.h" -#include -#include #include "ctype.h" +#include "cpuflags.h" /* Useful macros */ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) @@ -307,14 +306,7 @@ static inline int cmdline_find_option_bool(const char *option) return __cmdline_find_option_bool(cmd_line_ptr, option); } - /* cpu.c, cpucheck.c */ -struct cpu_features { - int level; /* Family, or 64 for x86-64 */ - int model; - u32 flags[NCAPINTS]; -}; -extern struct cpu_features cpu; int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); int validate_cpu(void); diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index c8a6792e78423a..0fcd9133790c56 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -28,7 +28,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \ - $(obj)/piggy.o + $(obj)/piggy.o $(obj)/cpuflags.o $(obj)/aslr.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c new file mode 100644 index 00000000000000..90a21f430117d1 --- /dev/null +++ b/arch/x86/boot/compressed/aslr.c @@ -0,0 +1,316 @@ +#include "misc.h" + +#ifdef CONFIG_RANDOMIZE_BASE +#include +#include +#include + +#include +#include +#include +#include +#include + +/* Simplified build-specific string for starting entropy. */ +static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" + LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; + +#define I8254_PORT_CONTROL 0x43 +#define I8254_PORT_COUNTER0 0x40 +#define I8254_CMD_READBACK 0xC0 +#define I8254_SELECT_COUNTER0 0x02 +#define I8254_STATUS_NOTREADY 0x40 +static inline u16 i8254(void) +{ + u16 status, timer; + + do { + outb(I8254_PORT_CONTROL, + I8254_CMD_READBACK | I8254_SELECT_COUNTER0); + status = inb(I8254_PORT_COUNTER0); + timer = inb(I8254_PORT_COUNTER0); + timer |= inb(I8254_PORT_COUNTER0) << 8; + } while (status & I8254_STATUS_NOTREADY); + + return timer; +} + +static unsigned long rotate_xor(unsigned long hash, const void *area, + size_t size) +{ + size_t i; + unsigned long *ptr = (unsigned long *)area; + + for (i = 0; i < size / sizeof(hash); i++) { + /* Rotate by odd number of bits and XOR. */ + hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); + hash ^= ptr[i]; + } + + return hash; +} + +/* Attempt to create a simple but unpredictable starting entropy. */ +static unsigned long get_random_boot(void) +{ + unsigned long hash = 0; + + hash = rotate_xor(hash, build_str, sizeof(build_str)); + hash = rotate_xor(hash, real_mode, sizeof(*real_mode)); + + return hash; +} + +static unsigned long get_random_long(void) +{ +#ifdef CONFIG_X86_64 + const unsigned long mix_const = 0x5d6008cbf3848dd3UL; +#else + const unsigned long mix_const = 0x3f39e593UL; +#endif + unsigned long raw, random = get_random_boot(); + bool use_i8254 = true; + + debug_putstr("KASLR using"); + + if (has_cpuflag(X86_FEATURE_RDRAND)) { + debug_putstr(" RDRAND"); + if (rdrand_long(&raw)) { + random ^= raw; + use_i8254 = false; + } + } + + if (has_cpuflag(X86_FEATURE_TSC)) { + debug_putstr(" RDTSC"); + rdtscll(raw); + + random ^= raw; + use_i8254 = false; + } + + if (use_i8254) { + debug_putstr(" i8254"); + random ^= i8254(); + } + + /* Circular multiply for better bit diffusion */ + asm("mul %3" + : "=a" (random), "=d" (raw) + : "a" (random), "rm" (mix_const)); + random += raw; + + debug_putstr("...\n"); + + return random; +} + +struct mem_vector { + unsigned long start; + unsigned long size; +}; + +#define MEM_AVOID_MAX 5 +struct mem_vector mem_avoid[MEM_AVOID_MAX]; + +static bool mem_contains(struct mem_vector *region, struct mem_vector *item) +{ + /* Item at least partially before region. */ + if (item->start < region->start) + return false; + /* Item at least partially after region. */ + if (item->start + item->size > region->start + region->size) + return false; + return true; +} + +static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) +{ + /* Item one is entirely before item two. */ + if (one->start + one->size <= two->start) + return false; + /* Item one is entirely after item two. */ + if (one->start >= two->start + two->size) + return false; + return true; +} + +static void mem_avoid_init(unsigned long input, unsigned long input_size, + unsigned long output, unsigned long output_size) +{ + u64 initrd_start, initrd_size; + u64 cmd_line, cmd_line_size; + unsigned long unsafe, unsafe_len; + char *ptr; + + /* + * Avoid the region that is unsafe to overlap during + * decompression (see calculations at top of misc.c). + */ + unsafe_len = (output_size >> 12) + 32768 + 18; + unsafe = (unsigned long)input + input_size - unsafe_len; + mem_avoid[0].start = unsafe; + mem_avoid[0].size = unsafe_len; + + /* Avoid initrd. */ + initrd_start = (u64)real_mode->ext_ramdisk_image << 32; + initrd_start |= real_mode->hdr.ramdisk_image; + initrd_size = (u64)real_mode->ext_ramdisk_size << 32; + initrd_size |= real_mode->hdr.ramdisk_size; + mem_avoid[1].start = initrd_start; + mem_avoid[1].size = initrd_size; + + /* Avoid kernel command line. */ + cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32; + cmd_line |= real_mode->hdr.cmd_line_ptr; + /* Calculate size of cmd_line. */ + ptr = (char *)(unsigned long)cmd_line; + for (cmd_line_size = 0; ptr[cmd_line_size++]; ) + ; + mem_avoid[2].start = cmd_line; + mem_avoid[2].size = cmd_line_size; + + /* Avoid heap memory. */ + mem_avoid[3].start = (unsigned long)free_mem_ptr; + mem_avoid[3].size = BOOT_HEAP_SIZE; + + /* Avoid stack memory. */ + mem_avoid[4].start = (unsigned long)free_mem_end_ptr; + mem_avoid[4].size = BOOT_STACK_SIZE; +} + +/* Does this memory vector overlap a known avoided area? */ +bool mem_avoid_overlap(struct mem_vector *img) +{ + int i; + + for (i = 0; i < MEM_AVOID_MAX; i++) { + if (mem_overlaps(img, &mem_avoid[i])) + return true; + } + + return false; +} + +unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; +unsigned long slot_max = 0; + +static void slots_append(unsigned long addr) +{ + /* Overflowing the slots list should be impossible. */ + if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET / + CONFIG_PHYSICAL_ALIGN) + return; + + slots[slot_max++] = addr; +} + +static unsigned long slots_fetch_random(void) +{ + /* Handle case of no slots stored. */ + if (slot_max == 0) + return 0; + + return slots[get_random_long() % slot_max]; +} + +static void process_e820_entry(struct e820entry *entry, + unsigned long minimum, + unsigned long image_size) +{ + struct mem_vector region, img; + + /* Skip non-RAM entries. */ + if (entry->type != E820_RAM) + return; + + /* Ignore entries entirely above our maximum. */ + if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET) + return; + + /* Ignore entries entirely below our minimum. */ + if (entry->addr + entry->size < minimum) + return; + + region.start = entry->addr; + region.size = entry->size; + + /* Potentially raise address to minimum location. */ + if (region.start < minimum) + region.start = minimum; + + /* Potentially raise address to meet alignment requirements. */ + region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); + + /* Did we raise the address above the bounds of this e820 region? */ + if (region.start > entry->addr + entry->size) + return; + + /* Reduce size by any delta from the original address. */ + region.size -= region.start - entry->addr; + + /* Reduce maximum size to fit end of image within maximum limit. */ + if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET) + region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start; + + /* Walk each aligned slot and check for avoided areas. */ + for (img.start = region.start, img.size = image_size ; + mem_contains(®ion, &img) ; + img.start += CONFIG_PHYSICAL_ALIGN) { + if (mem_avoid_overlap(&img)) + continue; + slots_append(img.start); + } +} + +static unsigned long find_random_addr(unsigned long minimum, + unsigned long size) +{ + int i; + unsigned long addr; + + /* Make sure minimum is aligned. */ + minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); + + /* Verify potential e820 positions, appending to slots list. */ + for (i = 0; i < real_mode->e820_entries; i++) { + process_e820_entry(&real_mode->e820_map[i], minimum, size); + } + + return slots_fetch_random(); +} + +unsigned char *choose_kernel_location(unsigned char *input, + unsigned long input_size, + unsigned char *output, + unsigned long output_size) +{ + unsigned long choice = (unsigned long)output; + unsigned long random; + + if (cmdline_find_option_bool("nokaslr")) { + debug_putstr("KASLR disabled...\n"); + goto out; + } + + /* Record the various known unsafe memory ranges. */ + mem_avoid_init((unsigned long)input, input_size, + (unsigned long)output, output_size); + + /* Walk e820 and find a random address. */ + random = find_random_addr(choice, output_size); + if (!random) { + debug_putstr("KASLR could not find suitable E820 region...\n"); + goto out; + } + + /* Always enforce the minimum. */ + if (random < choice) + goto out; + + choice = random; +out: + return (unsigned char *)choice; +} + +#endif /* CONFIG_RANDOMIZE_BASE */ diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c index bffd73b45b1f27..b68e3033e6b9bd 100644 --- a/arch/x86/boot/compressed/cmdline.c +++ b/arch/x86/boot/compressed/cmdline.c @@ -1,6 +1,6 @@ #include "misc.h" -#ifdef CONFIG_EARLY_PRINTK +#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE static unsigned long fs; static inline void set_fs(unsigned long seg) diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c new file mode 100644 index 00000000000000..aa313466118b2d --- /dev/null +++ b/arch/x86/boot/compressed/cpuflags.c @@ -0,0 +1,12 @@ +#ifdef CONFIG_RANDOMIZE_BASE + +#include "../cpuflags.c" + +bool has_cpuflag(int flag) +{ + get_cpuflags(); + + return test_bit(flag, cpu.flags); +} + +#endif diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 5d6f6891b1881e..9116aac232c746 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -117,9 +117,11 @@ preferred_addr: addl %eax, %ebx notl %eax andl %eax, %ebx -#else - movl $LOAD_PHYSICAL_ADDR, %ebx + cmpl $LOAD_PHYSICAL_ADDR, %ebx + jge 1f #endif + movl $LOAD_PHYSICAL_ADDR, %ebx +1: /* Target address to relocate to for decompression */ addl $z_extract_offset, %ebx @@ -191,14 +193,14 @@ relocated: leal boot_heap(%ebx), %eax pushl %eax /* heap area */ pushl %esi /* real mode pointer */ - call decompress_kernel + call decompress_kernel /* returns kernel location in %eax */ addl $24, %esp /* * Jump to the decompressed kernel. */ xorl %ebx, %ebx - jmp *%ebp + jmp *%eax /* * Stack and heap for uncompression diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index c337422b575de1..c5c1ae0997e7b2 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -94,9 +94,11 @@ ENTRY(startup_32) addl %eax, %ebx notl %eax andl %eax, %ebx -#else - movl $LOAD_PHYSICAL_ADDR, %ebx + cmpl $LOAD_PHYSICAL_ADDR, %ebx + jge 1f #endif + movl $LOAD_PHYSICAL_ADDR, %ebx +1: /* Target address to relocate to for decompression */ addl $z_extract_offset, %ebx @@ -269,9 +271,11 @@ preferred_addr: addq %rax, %rbp notq %rax andq %rax, %rbp -#else - movq $LOAD_PHYSICAL_ADDR, %rbp + cmpq $LOAD_PHYSICAL_ADDR, %rbp + jge 1f #endif + movq $LOAD_PHYSICAL_ADDR, %rbp +1: /* Target address to relocate to for decompression */ leaq z_extract_offset(%rbp), %rbx @@ -339,13 +343,13 @@ relocated: movl $z_input_len, %ecx /* input_len */ movq %rbp, %r8 /* output target address */ movq $z_output_len, %r9 /* decompressed length */ - call decompress_kernel + call decompress_kernel /* returns kernel location in %rax */ popq %rsi /* * Jump to the decompressed kernel. */ - jmp *%rbp + jmp *%rax .code32 no_longmode: diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 434f077d2c4d7b..196eaf373a06ad 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -112,14 +112,8 @@ struct boot_params *real_mode; /* Pointer to real-mode data */ void *memset(void *s, int c, size_t n); void *memcpy(void *dest, const void *src, size_t n); -#ifdef CONFIG_X86_64 -#define memptr long -#else -#define memptr unsigned -#endif - -static memptr free_mem_ptr; -static memptr free_mem_end_ptr; +memptr free_mem_ptr; +memptr free_mem_end_ptr; static char *vidmem; static int vidport; @@ -395,7 +389,7 @@ static void parse_elf(void *output) free(phdrs); } -asmlinkage void decompress_kernel(void *rmode, memptr heap, +asmlinkage void *decompress_kernel(void *rmode, memptr heap, unsigned char *input_data, unsigned long input_len, unsigned char *output, @@ -422,6 +416,10 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; + output = choose_kernel_location(input_data, input_len, + output, output_len); + + /* Validate memory location choices. */ if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) error("Destination address inappropriately aligned"); #ifdef CONFIG_X86_64 @@ -441,5 +439,5 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, parse_elf(output); handle_relocations(output, output_len); debug_putstr("done.\nBooting the kernel.\n"); - return; + return output; } diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 674019d8e23559..24e3e569a13ce9 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -23,7 +23,15 @@ #define BOOT_BOOT_H #include "../ctype.h" +#ifdef CONFIG_X86_64 +#define memptr long +#else +#define memptr unsigned +#endif + /* misc.c */ +extern memptr free_mem_ptr; +extern memptr free_mem_end_ptr; extern struct boot_params *real_mode; /* Pointer to real-mode data */ void __putstr(const char *s); #define error_putstr(__x) __putstr(__x) @@ -39,23 +47,40 @@ static inline void debug_putstr(const char *s) #endif -#ifdef CONFIG_EARLY_PRINTK - +#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE /* cmdline.c */ int cmdline_find_option(const char *option, char *buffer, int bufsize); int cmdline_find_option_bool(const char *option); +#endif -/* early_serial_console.c */ -extern int early_serial_base; -void console_init(void); +#if CONFIG_RANDOMIZE_BASE +/* aslr.c */ +unsigned char *choose_kernel_location(unsigned char *input, + unsigned long input_size, + unsigned char *output, + unsigned long output_size); +/* cpuflags.c */ +bool has_cpuflag(int flag); #else +static inline +unsigned char *choose_kernel_location(unsigned char *input, + unsigned long input_size, + unsigned char *output, + unsigned long output_size) +{ + return output; +} +#endif +#ifdef CONFIG_EARLY_PRINTK /* early_serial_console.c */ +extern int early_serial_base; +void console_init(void); +#else static const int early_serial_base; static inline void console_init(void) { } - #endif #endif diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S index 11f272c6f5e9e0..1eb7d298b47d54 100644 --- a/arch/x86/boot/copy.S +++ b/arch/x86/boot/copy.S @@ -14,7 +14,7 @@ * Memory copy routines */ - .code16gcc + .code16 .text GLOBAL(memcpy) @@ -30,7 +30,7 @@ GLOBAL(memcpy) rep; movsb popw %di popw %si - ret + retl ENDPROC(memcpy) GLOBAL(memset) @@ -45,25 +45,25 @@ GLOBAL(memset) andw $3, %cx rep; stosb popw %di - ret + retl ENDPROC(memset) GLOBAL(copy_from_fs) pushw %ds pushw %fs popw %ds - call memcpy + calll memcpy popw %ds - ret + retl ENDPROC(copy_from_fs) GLOBAL(copy_to_fs) pushw %es pushw %fs popw %es - call memcpy + calll memcpy popw %es - ret + retl ENDPROC(copy_to_fs) #if 0 /* Not currently used, but can be enabled as needed */ @@ -71,17 +71,17 @@ GLOBAL(copy_from_gs) pushw %ds pushw %gs popw %ds - call memcpy + calll memcpy popw %ds - ret + retl ENDPROC(copy_from_gs) GLOBAL(copy_to_gs) pushw %es pushw %gs popw %es - call memcpy + calll memcpy popw %es - ret + retl ENDPROC(copy_to_gs) #endif diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4d3ff037201ffb..100a9a10076a64 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c @@ -28,8 +28,6 @@ #include #include -struct cpu_features cpu; -static u32 cpu_vendor[3]; static u32 err_flags[NCAPINTS]; static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY; @@ -69,92 +67,8 @@ static int is_transmeta(void) cpu_vendor[2] == A32('M', 'x', '8', '6'); } -static int has_fpu(void) -{ - u16 fcw = -1, fsw = -1; - u32 cr0; - - asm("movl %%cr0,%0" : "=r" (cr0)); - if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { - cr0 &= ~(X86_CR0_EM|X86_CR0_TS); - asm volatile("movl %0,%%cr0" : : "r" (cr0)); - } - - asm volatile("fninit ; fnstsw %0 ; fnstcw %1" - : "+m" (fsw), "+m" (fcw)); - - return fsw == 0 && (fcw & 0x103f) == 0x003f; -} - -static int has_eflag(u32 mask) -{ - u32 f0, f1; - - asm("pushfl ; " - "pushfl ; " - "popl %0 ; " - "movl %0,%1 ; " - "xorl %2,%1 ; " - "pushl %1 ; " - "popfl ; " - "pushfl ; " - "popl %1 ; " - "popfl" - : "=&r" (f0), "=&r" (f1) - : "ri" (mask)); - - return !!((f0^f1) & mask); -} - -static void get_flags(void) -{ - u32 max_intel_level, max_amd_level; - u32 tfms; - - if (has_fpu()) - set_bit(X86_FEATURE_FPU, cpu.flags); - - if (has_eflag(X86_EFLAGS_ID)) { - asm("cpuid" - : "=a" (max_intel_level), - "=b" (cpu_vendor[0]), - "=d" (cpu_vendor[1]), - "=c" (cpu_vendor[2]) - : "a" (0)); - - if (max_intel_level >= 0x00000001 && - max_intel_level <= 0x0000ffff) { - asm("cpuid" - : "=a" (tfms), - "=c" (cpu.flags[4]), - "=d" (cpu.flags[0]) - : "a" (0x00000001) - : "ebx"); - cpu.level = (tfms >> 8) & 15; - cpu.model = (tfms >> 4) & 15; - if (cpu.level >= 6) - cpu.model += ((tfms >> 16) & 0xf) << 4; - } - - asm("cpuid" - : "=a" (max_amd_level) - : "a" (0x80000000) - : "ebx", "ecx", "edx"); - - if (max_amd_level >= 0x80000001 && - max_amd_level <= 0x8000ffff) { - u32 eax = 0x80000001; - asm("cpuid" - : "+a" (eax), - "=c" (cpu.flags[6]), - "=d" (cpu.flags[1]) - : : "ebx"); - } - } -} - /* Returns a bitmask of which words we have error bits in */ -static int check_flags(void) +static int check_cpuflags(void) { u32 err; int i; @@ -187,8 +101,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) if (has_eflag(X86_EFLAGS_AC)) cpu.level = 4; - get_flags(); - err = check_flags(); + get_cpuflags(); + err = check_cpuflags(); if (test_bit(X86_FEATURE_LM, cpu.flags)) cpu.level = 64; @@ -207,8 +121,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) eax &= ~(1 << 15); asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - get_flags(); /* Make sure it really did something */ - err = check_flags(); + get_cpuflags(); /* Make sure it really did something */ + err = check_cpuflags(); } else if (err == 0x01 && !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) && is_centaur() && cpu.model >= 6) { @@ -223,7 +137,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); set_bit(X86_FEATURE_CX8, cpu.flags); - err = check_flags(); + err = check_cpuflags(); } else if (err == 0x01 && is_transmeta()) { /* Transmeta might have masked feature bits in word 0 */ @@ -238,7 +152,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) : : "ecx", "ebx"); asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - err = check_flags(); + err = check_cpuflags(); } if (err_flags_ptr) diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c new file mode 100644 index 00000000000000..a9fcb7cfb2411f --- /dev/null +++ b/arch/x86/boot/cpuflags.c @@ -0,0 +1,104 @@ +#include +#include "bitops.h" + +#include +#include +#include +#include "cpuflags.h" + +struct cpu_features cpu; +u32 cpu_vendor[3]; + +static bool loaded_flags; + +static int has_fpu(void) +{ + u16 fcw = -1, fsw = -1; + unsigned long cr0; + + asm volatile("mov %%cr0,%0" : "=r" (cr0)); + if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { + cr0 &= ~(X86_CR0_EM|X86_CR0_TS); + asm volatile("mov %0,%%cr0" : : "r" (cr0)); + } + + asm volatile("fninit ; fnstsw %0 ; fnstcw %1" + : "+m" (fsw), "+m" (fcw)); + + return fsw == 0 && (fcw & 0x103f) == 0x003f; +} + +int has_eflag(unsigned long mask) +{ + unsigned long f0, f1; + + asm volatile("pushf \n\t" + "pushf \n\t" + "pop %0 \n\t" + "mov %0,%1 \n\t" + "xor %2,%1 \n\t" + "push %1 \n\t" + "popf \n\t" + "pushf \n\t" + "pop %1 \n\t" + "popf" + : "=&r" (f0), "=&r" (f1) + : "ri" (mask)); + + return !!((f0^f1) & mask); +} + +/* Handle x86_32 PIC using ebx. */ +#if defined(__i386__) && defined(__PIC__) +# define EBX_REG "=r" +#else +# define EBX_REG "=b" +#endif + +static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d) +{ + asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t" + "cpuid \n\t" + ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t" + : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b) + : "a" (id) + ); +} + +void get_cpuflags(void) +{ + u32 max_intel_level, max_amd_level; + u32 tfms; + u32 ignored; + + if (loaded_flags) + return; + loaded_flags = true; + + if (has_fpu()) + set_bit(X86_FEATURE_FPU, cpu.flags); + + if (has_eflag(X86_EFLAGS_ID)) { + cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2], + &cpu_vendor[1]); + + if (max_intel_level >= 0x00000001 && + max_intel_level <= 0x0000ffff) { + cpuid(0x1, &tfms, &ignored, &cpu.flags[4], + &cpu.flags[0]); + cpu.level = (tfms >> 8) & 15; + cpu.model = (tfms >> 4) & 15; + if (cpu.level >= 6) + cpu.model += ((tfms >> 16) & 0xf) << 4; + } + + cpuid(0x80000000, &max_amd_level, &ignored, &ignored, + &ignored); + + if (max_amd_level >= 0x80000001 && + max_amd_level <= 0x8000ffff) { + cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], + &cpu.flags[1]); + } + } +} diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h new file mode 100644 index 00000000000000..ea97697e51e40f --- /dev/null +++ b/arch/x86/boot/cpuflags.h @@ -0,0 +1,19 @@ +#ifndef BOOT_CPUFLAGS_H +#define BOOT_CPUFLAGS_H + +#include +#include + +struct cpu_features { + int level; /* Family, or 64 for x86-64 */ + int model; + u32 flags[NCAPINTS]; +}; + +extern struct cpu_features cpu; +extern u32 cpu_vendor[3]; + +int has_eflag(unsigned long mask); +void get_cpuflags(void); + +#endif diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 9ec06a1f6d61b2..ec3b8ba68096c5 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -391,7 +391,14 @@ xloadflags: #else # define XLF23 0 #endif - .word XLF0 | XLF1 | XLF23 + +#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC) +# define XLF4 XLF_EFI_KEXEC +#else +# define XLF4 0 +#endif + + .word XLF0 | XLF1 | XLF23 | XLF4 cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, #added with boot protocol diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 0d9ec770f2f877..e6a92455740e6a 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h @@ -39,6 +39,20 @@ #ifdef CONFIG_ARCH_RANDOM +/* Instead of arch_get_random_long() when alternatives haven't run. */ +static inline int rdrand_long(unsigned long *v) +{ + int ok; + asm volatile("1: " RDRAND_LONG "\n\t" + "jc 2f\n\t" + "decl %0\n\t" + "jnz 1b\n\t" + "2:" + : "=r" (ok), "=a" (*v) + : "0" (RDRAND_RETRY_LOOPS)); + return ok; +} + #define GET_RANDOM(name, type, rdrand, nop) \ static inline int name(type *v) \ { \ @@ -68,6 +82,13 @@ GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); #endif /* CONFIG_X86_64 */ +#else + +static inline int rdrand_long(unsigned long *v) +{ + return 0; +} + #endif /* CONFIG_ARCH_RANDOM */ extern void x86_init_rdrand(struct cpuinfo_x86 *c); diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index c6cd358a1eec80..04a48903b2eb31 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -92,12 +92,53 @@ #endif #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) -#else +#else /* !SMP */ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0) +#endif /* SMP */ + +#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) + +/* + * For either of these options x86 doesn't have a strong TSO memory + * model and we should fall back to full barriers. + */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#else /* regular x86 TSO memory ordering */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 89270b4318db8b..e099f9502acec8 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -216,6 +216,7 @@ #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */ #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 65c6e6e3a5521b..3b978c472d08b5 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -1,6 +1,24 @@ #ifndef _ASM_X86_EFI_H #define _ASM_X86_EFI_H +/* + * We map the EFI regions needed for runtime services non-contiguously, + * with preserved alignment on virtual addresses starting from -4G down + * for a total max space of 64G. This way, we provide for stable runtime + * services addresses across kernels so that a kexec'd kernel can still + * use them. + * + * This is the main reason why we're doing stable VA mappings for RT + * services. + * + * This flag is used in conjuction with a chicken bit called + * "efi=old_map" which can be used as a fallback to the old runtime + * services mapping method in case there's some b0rkage with a + * particular EFI implementation (haha, it is hard to hold up the + * sarcasm here...). + */ +#define EFI_OLD_MEMMAP EFI_ARCH_1 + #ifdef CONFIG_X86_32 #define EFI_LOADER_SIGNATURE "EL32" @@ -69,24 +87,31 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ (u64)(a4), (u64)(a5), (u64)(a6)) +#define _efi_call_virtX(x, f, ...) \ +({ \ + efi_status_t __s; \ + \ + efi_sync_low_kernel_mappings(); \ + preempt_disable(); \ + __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \ + preempt_enable(); \ + __s; \ +}) + #define efi_call_virt0(f) \ - efi_call0((efi.systab->runtime->f)) -#define efi_call_virt1(f, a1) \ - efi_call1((efi.systab->runtime->f), (u64)(a1)) -#define efi_call_virt2(f, a1, a2) \ - efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2)) -#define efi_call_virt3(f, a1, a2, a3) \ - efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3)) -#define efi_call_virt4(f, a1, a2, a3, a4) \ - efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4)) -#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ - efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4), (u64)(a5)) -#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ - efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) + _efi_call_virtX(0, f) +#define efi_call_virt1(f, a1) \ + _efi_call_virtX(1, f, (u64)(a1)) +#define efi_call_virt2(f, a1, a2) \ + _efi_call_virtX(2, f, (u64)(a1), (u64)(a2)) +#define efi_call_virt3(f, a1, a2, a3) \ + _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3)) +#define efi_call_virt4(f, a1, a2, a3, a4) \ + _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4)) +#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ + _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5)) +#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ + _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, u32 type, u64 attribute); @@ -95,12 +120,28 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, extern int add_efi_memmap; extern unsigned long x86_efi_facility; +extern struct efi_scratch efi_scratch; extern void efi_set_executable(efi_memory_desc_t *md, bool executable); extern int efi_memblock_x86_reserve_range(void); extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); +extern void __init efi_map_region(efi_memory_desc_t *md); +extern void __init efi_map_region_fixed(efi_memory_desc_t *md); +extern void efi_sync_low_kernel_mappings(void); +extern void efi_setup_page_tables(void); +extern void __init old_map_region(efi_memory_desc_t *md); + +struct efi_setup_data { + u64 fw_vendor; + u64 runtime; + u64 tables; + u64 smbios; + u64 reserved[8]; +}; + +extern u64 efi_setup; #ifdef CONFIG_EFI @@ -110,7 +151,7 @@ static inline bool efi_is_native(void) } extern struct console early_efi_console; - +extern void parse_efi_setup(u64 phys_addr, u32 data_len); #else /* * IF EFI is not configured, have the EFI calls return -ENOSYS. @@ -122,6 +163,7 @@ extern struct console early_efi_console; #define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS) #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) +static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} #endif /* CONFIG_EFI */ #endif /* _ASM_X86_EFI_H */ diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index c49a613c6452fc..cea1c76d49bf5b 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk) /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. "m" is a random variable that should be in L1 */ - alternative_input( - ASM_NOP8 ASM_NOP2, - "emms\n\t" /* clear stack tags */ - "fildl %P[addr]", /* set F?P to defined value */ - X86_FEATURE_FXSAVE_LEAK, - [addr] "m" (tsk->thread.fpu.has_fpu)); + if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) { + asm volatile( + "fnclex\n\t" + "emms\n\t" + "fildl %P[addr]" /* set F?P to defined value */ + : : [addr] "m" (tsk->thread.fpu.has_fpu)); + } return fpu_restore_checking(&tsk->thread.fpu); } diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index be27ba1e947a95..b4c1f545343663 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -110,26 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { - int ret = 0; - - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) - return -EFAULT; - - asm volatile("\t" ASM_STAC "\n" - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" - "2:\t" ASM_CLAC "\n" - "\t.section .fixup, \"ax\"\n" - "3:\tmov %3, %0\n" - "\tjmp 2b\n" - "\t.previous\n" - _ASM_EXTABLE(1b, 3b) - : "+r" (ret), "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "1" (oldval) - : "memory" - ); - - *uval = oldval; - return ret; + return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval); } #endif diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index cba45d99ac1aad..67d69b8e2d20d0 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -191,6 +191,9 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); #define trace_interrupt interrupt #endif +#define VECTOR_UNDEFINED -1 +#define VECTOR_RETRIGGERED -2 + typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); extern void setup_vector_irq(int cpu); diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 459769d39263e2..e34e097b6f9de4 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h @@ -51,10 +51,41 @@ struct devs_id { enum intel_mid_cpu_type { /* 1 was Moorestown */ INTEL_MID_CPU_CHIP_PENWELL = 2, + INTEL_MID_CPU_CHIP_CLOVERVIEW, + INTEL_MID_CPU_CHIP_TANGIER, }; extern enum intel_mid_cpu_type __intel_mid_cpu_chip; +/** + * struct intel_mid_ops - Interface between intel-mid & sub archs + * @arch_setup: arch_setup function to re-initialize platform + * structures (x86_init, x86_platform_init) + * + * This structure can be extended if any new interface is required + * between intel-mid & its sub arch files. + */ +struct intel_mid_ops { + void (*arch_setup)(void); +}; + +/* Helper API's for INTEL_MID_OPS_INIT */ +#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \ + [cpuid] = get_##cpuname##_ops + +/* Maximum number of CPU ops */ +#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *)) + +/* + * For every new cpu addition, a weak get__ops() function needs be + * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h. + */ +#define INTEL_MID_OPS_INIT {\ + DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \ + DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \ + DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \ +}; + #ifdef CONFIG_X86_INTEL_MID static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) @@ -86,8 +117,21 @@ extern enum intel_mid_timer_options intel_mid_timer_options; * Penwell uses spread spectrum clock, so the freq number is not exactly * the same as reported by MSR based on SDM. */ -#define PENWELL_FSB_FREQ_83SKU 83200 -#define PENWELL_FSB_FREQ_100SKU 99840 +#define FSB_FREQ_83SKU 83200 +#define FSB_FREQ_100SKU 99840 +#define FSB_FREQ_133SKU 133000 + +#define FSB_FREQ_167SKU 167000 +#define FSB_FREQ_200SKU 200000 +#define FSB_FREQ_267SKU 267000 +#define FSB_FREQ_333SKU 333000 +#define FSB_FREQ_400SKU 400000 + +/* Bus Select SoC Fuse value */ +#define BSEL_SOC_FUSE_MASK 0x7 +#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */ +#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */ +#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */ #define SFI_MTMR_MAX_NUM 8 #define SFI_MRTC_MAX 8 diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h new file mode 100644 index 00000000000000..8e71c79417672e --- /dev/null +++ b/arch/x86/include/asm/iosf_mbi.h @@ -0,0 +1,90 @@ +/* + * iosf_mbi.h: Intel OnChip System Fabric MailBox access support + */ + +#ifndef IOSF_MBI_SYMS_H +#define IOSF_MBI_SYMS_H + +#define MBI_MCR_OFFSET 0xD0 +#define MBI_MDR_OFFSET 0xD4 +#define MBI_MCRX_OFFSET 0xD8 + +#define MBI_RD_MASK 0xFEFFFFFF +#define MBI_WR_MASK 0X01000000 + +#define MBI_MASK_HI 0xFFFFFF00 +#define MBI_MASK_LO 0x000000FF +#define MBI_ENABLE 0xF0 + +/* Baytrail available units */ +#define BT_MBI_UNIT_AUNIT 0x00 +#define BT_MBI_UNIT_SMC 0x01 +#define BT_MBI_UNIT_CPU 0x02 +#define BT_MBI_UNIT_BUNIT 0x03 +#define BT_MBI_UNIT_PMC 0x04 +#define BT_MBI_UNIT_GFX 0x06 +#define BT_MBI_UNIT_SMI 0x0C +#define BT_MBI_UNIT_USB 0x43 +#define BT_MBI_UNIT_SATA 0xA3 +#define BT_MBI_UNIT_PCIE 0xA6 + +/* Baytrail read/write opcodes */ +#define BT_MBI_AUNIT_READ 0x10 +#define BT_MBI_AUNIT_WRITE 0x11 +#define BT_MBI_SMC_READ 0x10 +#define BT_MBI_SMC_WRITE 0x11 +#define BT_MBI_CPU_READ 0x10 +#define BT_MBI_CPU_WRITE 0x11 +#define BT_MBI_BUNIT_READ 0x10 +#define BT_MBI_BUNIT_WRITE 0x11 +#define BT_MBI_PMC_READ 0x06 +#define BT_MBI_PMC_WRITE 0x07 +#define BT_MBI_GFX_READ 0x00 +#define BT_MBI_GFX_WRITE 0x01 +#define BT_MBI_SMIO_READ 0x06 +#define BT_MBI_SMIO_WRITE 0x07 +#define BT_MBI_USB_READ 0x06 +#define BT_MBI_USB_WRITE 0x07 +#define BT_MBI_SATA_READ 0x00 +#define BT_MBI_SATA_WRITE 0x01 +#define BT_MBI_PCIE_READ 0x00 +#define BT_MBI_PCIE_WRITE 0x01 + +/** + * iosf_mbi_read() - MailBox Interface read command + * @port: port indicating subunit being accessed + * @opcode: port specific read or write opcode + * @offset: register address offset + * @mdr: register data to be read + * + * Locking is handled by spinlock - cannot sleep. + * Return: Nonzero on error + */ +int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr); + +/** + * iosf_mbi_write() - MailBox unmasked write command + * @port: port indicating subunit being accessed + * @opcode: port specific read or write opcode + * @offset: register address offset + * @mdr: register data to be written + * + * Locking is handled by spinlock - cannot sleep. + * Return: Nonzero on error + */ +int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr); + +/** + * iosf_mbi_modify() - MailBox masked write command + * @port: port indicating subunit being accessed + * @opcode: port specific read or write opcode + * @offset: register address offset + * @mdr: register data being modified + * @mask: mask indicating bits in mdr to be modified + * + * Locking is handled by spinlock - cannot sleep. + * Return: Nonzero on error + */ +int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask); + +#endif /* IOSF_MBI_SYMS_H */ diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 0ea10f27d61354..cb6cfcd034cf71 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu); #ifdef CONFIG_HOTPLUG_CPU #include +extern int check_irq_vectors_for_cpu_disable(void); extern void fixup_irqs(void); extern void irq_force_complete_move(int); #endif diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index c696a868756769..6e4ce2df87cf68 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -118,7 +118,6 @@ extern void mce_register_decode_chain(struct notifier_block *nb); extern void mce_unregister_decode_chain(struct notifier_block *nb); #include -#include #include extern int mce_p5_enabled; diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index f98bd662531821..b59827e7652924 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -1,6 +1,21 @@ #ifndef _ASM_X86_MICROCODE_H #define _ASM_X86_MICROCODE_H +#define native_rdmsr(msr, val1, val2) \ +do { \ + u64 __val = native_read_msr((msr)); \ + (void)((val1) = (u32)__val); \ + (void)((val2) = (u32)(__val >> 32)); \ +} while (0) + +#define native_wrmsr(msr, low, high) \ + native_write_msr(msr, low, high) + +#define native_wrmsrl(msr, val) \ + native_write_msr((msr), \ + (u32)((u64)(val)), \ + (u32)((u64)(val) >> 32)) + struct cpu_signature { unsigned int sig; unsigned int pf; diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 4c019179a57dd9..b7b10b82d3e510 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -61,11 +61,10 @@ extern int __apply_microcode_amd(struct microcode_amd *mc_amd); extern int apply_microcode_amd(int cpu); extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); +#define PATCH_MAX_SIZE PAGE_SIZE +extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; + #ifdef CONFIG_MICROCODE_AMD_EARLY -#ifdef CONFIG_X86_32 -#define MPB_MAX_SIZE PAGE_SIZE -extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; -#endif extern void __init load_ucode_amd_bsp(void); extern void load_ucode_amd_ap(void); extern int __init save_microcode_in_initrd_amd(void); diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 3142a94c7b4bf9..3e6b4920ef5de2 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_MPSPEC_H #define _ASM_X86_MPSPEC_H -#include #include #include diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 2f366d0ac6b469..1da25a5f96f924 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_MWAIT_H #define _ASM_X86_MWAIT_H +#include + #define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf #define MWAIT_SUBSTATE_SIZE 4 @@ -13,4 +15,45 @@ #define MWAIT_ECX_INTERRUPT_BREAK 0x1 +static inline void __monitor(const void *eax, unsigned long ecx, + unsigned long edx) +{ + /* "monitor %eax, %ecx, %edx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc8;" + :: "a" (eax), "c" (ecx), "d"(edx)); +} + +static inline void __mwait(unsigned long eax, unsigned long ecx) +{ + /* "mwait %eax, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +} + +/* + * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, + * which can obviate IPI to trigger checking of need_resched. + * We execute MONITOR against need_resched and enter optimized wait state + * through MWAIT. Whenever someone changes need_resched, we would be woken + * up from MWAIT (without an IPI). + * + * New with Core Duo processors, MWAIT can take some hints based on CPU + * capability. + */ +static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) +{ + if (!current_set_polling_and_test()) { + if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { + mb(); + clflush((void *)¤t_thread_info()->flags); + mb(); + } + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + if (!need_resched()) + __mwait(eax, ecx); + } + current_clr_polling(); +} + #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index c87892442e53d4..775873d3be5561 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -71,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr); #include #define __HAVE_ARCH_GATE_AREA 1 +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif /* __KERNEL__ */ #endif /* _ASM_X86_PAGE_H */ diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h index 4d550d04b6099b..904f528cc8e82a 100644 --- a/arch/x86/include/asm/page_32.h +++ b/arch/x86/include/asm/page_32.h @@ -5,10 +5,6 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_HUGETLB_PAGE -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) #ifdef CONFIG_DEBUG_VIRTUAL extern unsigned long __phys_addr(unsigned long); diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 43dcd804ebd573..8de6d9cf3b954a 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -39,9 +39,18 @@ #define __VIRTUAL_MASK_SHIFT 47 /* - * Kernel image size is limited to 512 MB (see level2_kernel_pgt in - * arch/x86/kernel/head_64.S), and it is mapped here: + * Kernel image size is limited to 1GiB due to the fixmap living in the + * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use + * 512MiB by default, leaving 1.5GiB for modules once the page tables + * are fully set up. If kernel ASLR is configured, it can extend the + * kernel page table mapping, reducing the size of the modules area. */ -#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) +#define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024) +#if defined(CONFIG_RANDOMIZE_BASE) && \ + CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT +#define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET +#else +#define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT +#endif #endif /* _ASM_X86_PAGE_64_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 3bf2dd0cf61fec..0d193e23464707 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -55,6 +55,13 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) #endif +/* Bit manipulation helper on pte/pgoff entry */ +static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, + unsigned long mask, unsigned int leftshift) +{ + return ((value >> rightshift) & mask) << leftshift; +} + #ifdef CONFIG_MEM_SOFT_DIRTY /* @@ -71,31 +78,34 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) -#define pte_to_pgoff(pte) \ - ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ - & ((1U << PTE_FILE_BITS1) - 1))) \ - + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ - & ((1U << PTE_FILE_BITS2) - 1)) \ - << (PTE_FILE_BITS1)) \ - + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ - & ((1U << PTE_FILE_BITS3) - 1)) \ - << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ - + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ - << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) - -#define pgoff_to_pte(off) \ - ((pte_t) { .pte_low = \ - ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ - + ((((off) >> PTE_FILE_BITS1) \ - & ((1U << PTE_FILE_BITS2) - 1)) \ - << PTE_FILE_SHIFT2) \ - + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ - & ((1U << PTE_FILE_BITS3) - 1)) \ - << PTE_FILE_SHIFT3) \ - + ((((off) >> \ - (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ - << PTE_FILE_SHIFT4) \ - + _PAGE_FILE }) +#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) +#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) +#define PTE_FILE_MASK3 ((1U << PTE_FILE_BITS3) - 1) + +#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) +#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) +#define PTE_FILE_LSHIFT4 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3) + +static __always_inline pgoff_t pte_to_pgoff(pte_t pte) +{ + return (pgoff_t) + (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, PTE_FILE_MASK3, PTE_FILE_LSHIFT3) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT4, -1UL, PTE_FILE_LSHIFT4)); +} + +static __always_inline pte_t pgoff_to_pte(pgoff_t off) +{ + return (pte_t){ + .pte_low = + pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + + pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + + pte_bitop(off, PTE_FILE_LSHIFT3, PTE_FILE_MASK3, PTE_FILE_SHIFT3) + + pte_bitop(off, PTE_FILE_LSHIFT4, -1UL, PTE_FILE_SHIFT4) + + _PAGE_FILE, + }; +} #else /* CONFIG_MEM_SOFT_DIRTY */ @@ -115,22 +125,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) -#define pte_to_pgoff(pte) \ - ((((pte).pte_low >> PTE_FILE_SHIFT1) \ - & ((1U << PTE_FILE_BITS1) - 1)) \ - + ((((pte).pte_low >> PTE_FILE_SHIFT2) \ - & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \ - + (((pte).pte_low >> PTE_FILE_SHIFT3) \ - << (PTE_FILE_BITS1 + PTE_FILE_BITS2))) - -#define pgoff_to_pte(off) \ - ((pte_t) { .pte_low = \ - (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ - + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \ - << PTE_FILE_SHIFT2) \ - + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ - << PTE_FILE_SHIFT3) \ - + _PAGE_FILE }) +#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) +#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) + +#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) +#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) + +static __always_inline pgoff_t pte_to_pgoff(pte_t pte) +{ + return (pgoff_t) + (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3)); +} + +static __always_inline pte_t pgoff_to_pte(pgoff_t off) +{ + return (pte_t){ + .pte_low = + pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + + pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + + pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) + + _PAGE_FILE, + }; +} #endif /* CONFIG_MEM_SOFT_DIRTY */ diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 2d883440cb9a28..c883bf726398a2 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -58,7 +58,7 @@ typedef struct { pteval_t pte; } pte_t; #define VMALLOC_START _AC(0xffffc90000000000, UL) #define VMALLOC_END _AC(0xffffe8ffffffffff, UL) #define VMEMMAP_START _AC(0xffffea0000000000, UL) -#define MODULES_VADDR _AC(0xffffffffa0000000, UL) +#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) #define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 0ecac257fb26cf..a83aa44bb1fb83 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -382,7 +382,8 @@ static inline void update_page_count(int level, unsigned long pages) { } */ extern pte_t *lookup_address(unsigned long address, unsigned int *level); extern phys_addr_t slow_virt_to_phys(void *__address); - +extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, + unsigned numpages, unsigned long page_flags); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_DEFS_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7b034a4057f93c..fdedd38fd0fc7f 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -27,7 +27,6 @@ struct mm_struct; #include #include #include -#include #include #include @@ -72,6 +71,7 @@ extern u16 __read_mostly tlb_lli_4m[NR_INFO]; extern u16 __read_mostly tlb_lld_4k[NR_INFO]; extern u16 __read_mostly tlb_lld_2m[NR_INFO]; extern u16 __read_mostly tlb_lld_4m[NR_INFO]; +extern u16 __read_mostly tlb_lld_1g[NR_INFO]; extern s8 __read_mostly tlb_flushall_shift; /* @@ -370,6 +370,20 @@ struct ymmh_struct { u32 ymmh_space[64]; }; +/* We don't support LWP yet: */ +struct lwp_struct { + u8 reserved[128]; +}; + +struct bndregs_struct { + u64 bndregs[8]; +} __packed; + +struct bndcsr_struct { + u64 cfg_reg_u; + u64 status_reg; +} __packed; + struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2]; @@ -380,6 +394,9 @@ struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; + struct lwp_struct lwp; + struct bndregs_struct bndregs; + struct bndcsr_struct bndcsr; /* new processor state extensions will go here */ } __attribute__ ((packed, aligned (64))); @@ -700,29 +717,6 @@ static inline void sync_core(void) #endif } -static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) -{ - /* "monitor %eax, %ecx, %edx;" */ - asm volatile(".byte 0x0f, 0x01, 0xc8;" - :: "a" (eax), "c" (ecx), "d"(edx)); -} - -static inline void __mwait(unsigned long eax, unsigned long ecx) -{ - /* "mwait %eax, %ecx;" */ - asm volatile(".byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); -} - -static inline void __sti_mwait(unsigned long eax, unsigned long ecx) -{ - trace_hardirqs_on(); - /* "mwait %eax, %ecx;" */ - asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); -} - extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void init_amd_e400_c1e_mask(void); diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 942a08623a1a09..14fd6fd75a19b8 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -60,7 +60,6 @@ struct pt_regs { #endif /* !__i386__ */ -#include #ifdef CONFIG_PARAVIRT #include #endif diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 59bcf4e2241857..d62c9f809bc5f6 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -3,7 +3,6 @@ #include - #define COMMAND_LINE_SIZE 2048 #include @@ -29,6 +28,8 @@ #include #include +extern u64 relocated_ramdisk; + /* Interrupt control for vSMPowered x86_64 systems */ #ifdef CONFIG_X86_64 void vsmp_init(void); diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4137890e88e3ad..8cd27e08e23c47 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -2,7 +2,6 @@ #define _ASM_X86_SMP_H #ifndef __ASSEMBLY__ #include -#include #include /* diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 34baa0eb5d0c97..a04eabd43d0662 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -1,9 +1,9 @@ #ifndef _ASM_X86_TIMER_H #define _ASM_X86_TIMER_H -#include #include #include #include +#include #define TICK_SIZE (tick_nsec / 1000) @@ -12,68 +12,26 @@ extern int recalibrate_cpu_khz(void); extern int no_timer_check; -/* Accelerators for sched_clock() - * convert from cycles(64bits) => nanoseconds (64bits) - * basic equation: - * ns = cycles / (freq / ns_per_sec) - * ns = cycles * (ns_per_sec / freq) - * ns = cycles * (10^9 / (cpu_khz * 10^3)) - * ns = cycles * (10^6 / cpu_khz) +/* + * We use the full linear equation: f(x) = a + b*x, in order to allow + * a continuous function in the face of dynamic freq changes. * - * Then we use scaling math (suggested by george@mvista.com) to get: - * ns = cycles * (10^6 * SC / cpu_khz) / SC - * ns = cycles * cyc2ns_scale / SC + * Continuity means that when our frequency changes our slope (b); we want to + * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t. * - * And since SC is a constant power of two, we can convert the div - * into a shift. + * Without an offset (a) the above would not be possible. * - * We can use khz divisor instead of mhz to keep a better precision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. - * (mathieu.desnoyers@polymtl.ca) - * - * -johnstul@us.ibm.com "math is hard, lets go shopping!" - * - * In: - * - * ns = cycles * cyc2ns_scale / SC - * - * Although we may still have enough bits to store the value of ns, - * in some cases, we may not have enough bits to store cycles * cyc2ns_scale, - * leading to an incorrect result. - * - * To avoid this, we can decompose 'cycles' into quotient and remainder - * of division by SC. Then, - * - * ns = (quot * SC + rem) * cyc2ns_scale / SC - * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC - * - * - sqazi@google.com + * See the comment near cycles_2_ns() for details on how we compute (b). */ - -DECLARE_PER_CPU(unsigned long, cyc2ns); -DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); - -#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ - -static inline unsigned long long __cycles_2_ns(unsigned long long cyc) -{ - int cpu = smp_processor_id(); - unsigned long long ns = per_cpu(cyc2ns_offset, cpu); - ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), - (1UL << CYC2NS_SCALE_FACTOR)); - return ns; -} - -static inline unsigned long long cycles_2_ns(unsigned long long cyc) -{ - unsigned long long ns; - unsigned long flags; - - local_irq_save(flags); - ns = __cycles_2_ns(cyc); - local_irq_restore(flags); - - return ns; -} +struct cyc2ns_data { + u32 cyc2ns_mul; + u32 cyc2ns_shift; + u64 cyc2ns_offset; + u32 __count; + /* u32 hole */ +}; /* 24 bytes -- do not grow */ + +extern struct cyc2ns_data *cyc2ns_read_begin(void); +extern void cyc2ns_read_end(struct cyc2ns_data *); #endif /* _ASM_X86_TIMER_H */ diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 235be70d5bb4d0..57ae63cd6ee2ec 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -65,4 +65,7 @@ extern int notsc_setup(char *); extern void tsc_save_sched_clock_state(void); extern void tsc_restore_sched_clock_state(void); +/* MSR based TSC calibration for Intel Atom SoC platforms */ +int try_msr_calibrate_tsc(unsigned long *fast_calibrate); + #endif /* _ASM_X86_TSC_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8ec57c07b125a9..0d592e0a5b84fa 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -40,22 +40,30 @@ /* * Test whether a block of memory is a valid user space address. * Returns 0 if the range is valid, nonzero otherwise. - * - * This is equivalent to the following test: - * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) - * - * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ +static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) +{ + /* + * If we have used "sizeof()" for the size, + * we know it won't overflow the limit (but + * it might overflow the 'addr', so it's + * important to subtract the size from the + * limit, not add it to the address). + */ + if (__builtin_constant_p(size)) + return addr > limit - size; + + /* Arbitrary sizes? Be careful about overflow */ + addr += size; + if (addr < size) + return true; + return addr > limit; +} #define __range_not_ok(addr, size, limit) \ ({ \ - unsigned long flag, roksum; \ __chk_user_ptr(addr); \ - asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ - : "=&r" (flag), "=r" (roksum) \ - : "1" (addr), "g" ((long)(size)), \ - "rm" (limit)); \ - flag; \ + __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ }) /** @@ -78,7 +86,7 @@ * this function, memory access functions may still return -EFAULT. */ #define access_ok(type, addr, size) \ - (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) + likely(!__range_not_ok(addr, size, user_addr_max())) /* * The exception table consists of pairs of addresses relative to the @@ -525,6 +533,98 @@ extern __must_check long strnlen_user(const char __user *str, long n); unsigned long __must_check clear_user(void __user *mem, unsigned long len); unsigned long __must_check __clear_user(void __user *mem, unsigned long len); +extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + +#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ +({ \ + int __ret = 0; \ + __typeof__(ptr) __uval = (uval); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + switch (size) { \ + case 1: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "q" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 2: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 4: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 8: \ + { \ + if (!IS_ENABLED(CONFIG_X86_64)) \ + __cmpxchg_wrong_size(); \ + \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + default: \ + __cmpxchg_wrong_size(); \ + } \ + *__uval = __old; \ + __ret; \ +}) + +#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ +({ \ + access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ + __user_atomic_cmpxchg_inatomic((uval), (ptr), \ + (old), (new), sizeof(*(ptr))) : \ + -EFAULT; \ +}) + /* * movsl can be slow when source and dest are not both 8-byte aligned */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 190413d0de57b2..12a26b979bf163 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -204,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) static __must_check __always_inline int __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) { - return __copy_from_user_nocheck(dst, (__force const void *)src, size); + return __copy_from_user_nocheck(dst, src, size); } static __must_check __always_inline int __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) { - return __copy_to_user_nocheck((__force void *)dst, src, size); + return __copy_to_user_nocheck(dst, src, size); } extern long __copy_user_nocache(void *dst, const void __user *src, diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 0415cdabb5a663..554738963b28cf 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -9,6 +9,8 @@ #define XSTATE_FP 0x1 #define XSTATE_SSE 0x2 #define XSTATE_YMM 0x4 +#define XSTATE_BNDREGS 0x8 +#define XSTATE_BNDCSR 0x10 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) @@ -20,10 +22,14 @@ #define XSAVE_YMM_SIZE 256 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) -/* - * These are the features that the OS can handle currently. - */ -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) +/* Supported features which support lazy state saving */ +#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) + +/* Supported features which require eager state saving */ +#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR) + +/* All currently supported features */ +#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER) #ifdef CONFIG_X86_64 #define REX_PREFIX "0x48, " diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 9c3733c5f8f795..225b0988043a0a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -6,6 +6,7 @@ #define SETUP_E820_EXT 1 #define SETUP_DTB 2 #define SETUP_PCI 3 +#define SETUP_EFI 4 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF @@ -23,6 +24,7 @@ #define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1) #define XLF_EFI_HANDOVER_32 (1<<2) #define XLF_EFI_HANDOVER_64 (1<<3) +#define XLF_EFI_KEXEC (1<<4) #ifndef __ASSEMBLY__ diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 37813b5ddc3747..59cea185ad1dd1 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -184,6 +184,7 @@ #define MSR_AMD64_PATCH_LOADER 0xc0010020 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 +#define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 diff --git a/arch/x86/include/uapi/asm/stat.h b/arch/x86/include/uapi/asm/stat.h index 7b3ddc348585f0..bc03eb5d6360f8 100644 --- a/arch/x86/include/uapi/asm/stat.h +++ b/arch/x86/include/uapi/asm/stat.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_STAT_H #define _ASM_X86_STAT_H +#include + #define STAT_HAVE_NSEC 1 #ifdef __i386__ @@ -78,26 +80,26 @@ struct stat64 { #else /* __i386__ */ struct stat { - unsigned long st_dev; - unsigned long st_ino; - unsigned long st_nlink; - - unsigned int st_mode; - unsigned int st_uid; - unsigned int st_gid; - unsigned int __pad0; - unsigned long st_rdev; - long st_size; - long st_blksize; - long st_blocks; /* Number 512-byte blocks allocated. */ - - unsigned long st_atime; - unsigned long st_atime_nsec; - unsigned long st_mtime; - unsigned long st_mtime_nsec; - unsigned long st_ctime; - unsigned long st_ctime_nsec; - long __unused[3]; + __kernel_ulong_t st_dev; + __kernel_ulong_t st_ino; + __kernel_ulong_t st_nlink; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad0; + __kernel_ulong_t st_rdev; + __kernel_long_t st_size; + __kernel_long_t st_blksize; + __kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */ + + __kernel_ulong_t st_atime; + __kernel_ulong_t st_atime_nsec; + __kernel_ulong_t st_mtime; + __kernel_ulong_t st_mtime_nsec; + __kernel_ulong_t st_ctime; + __kernel_ulong_t st_ctime_nsec; + __kernel_long_t __unused[3]; }; /* We don't need to memset the whole thing just to initialize the padding */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 9b0a34e2cd793d..cb648c84b32799 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -29,10 +29,11 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-y += syscall_$(BITS).o obj-$(CONFIG_X86_64) += vsyscall_64.o obj-$(CONFIG_X86_64) += vsyscall_emu_64.o +obj-$(CONFIG_SYSFS) += ksysfs.o obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o topology.o kdebugfs.o obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o -obj-y += tsc.o io_delay.o rtc.o +obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o @@ -91,15 +92,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o -obj-$(CONFIG_MICROCODE_EARLY) += microcode_core_early.o -obj-$(CONFIG_MICROCODE_INTEL_EARLY) += microcode_intel_early.o -obj-$(CONFIG_MICROCODE_INTEL_LIB) += microcode_intel_lib.o -microcode-y := microcode_core.o -microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o -microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o -obj-$(CONFIG_MICROCODE_AMD_EARLY) += microcode_amd_early.o -obj-$(CONFIG_MICROCODE) += microcode.o - obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o @@ -111,6 +103,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o +obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o ### # 64 bit specific files diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index d2b7f27781bce0..e69182fd01cfe4 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); -/* - * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, - * which can obviate IPI to trigger checking of need_resched. - * We execute MONITOR against need_resched and enter optimized wait state - * through MWAIT. Whenever someone changes need_resched, we would be woken - * up from MWAIT (without an IPI). - * - * New with Core Duo processors, MWAIT can take some hints based on CPU - * capability. - */ -void mwait_idle_with_hints(unsigned long ax, unsigned long cx) -{ - if (!need_resched()) { - if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) - clflush((void *)¤t_thread_info()->flags); - - __monitor((void *)¤t_thread_info()->flags, 0, 0); - smp_mb(); - if (!need_resched()) - __mwait(ax, cx); - } -} - void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index d278736bf774f7..7f26c9a70a9e0a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -74,6 +74,13 @@ unsigned int max_physical_apicid; */ physid_mask_t phys_cpu_present_map; +/* + * Processor to be disabled specified by kernel parameter + * disable_cpu_apicid=, mostly used for the kdump 2nd kernel to + * avoid undefined behaviour caused by sending INIT from AP to BSP. + */ +static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID; + /* * Map cpu index to physical APIC ID */ @@ -1968,7 +1975,7 @@ __visible void smp_trace_spurious_interrupt(struct pt_regs *regs) */ static inline void __smp_error_interrupt(struct pt_regs *regs) { - u32 v0, v1; + u32 v; u32 i = 0; static const char * const error_interrupt_reason[] = { "Send CS error", /* APIC Error Bit 0 */ @@ -1982,21 +1989,20 @@ static inline void __smp_error_interrupt(struct pt_regs *regs) }; /* First tickle the hardware, only then report what went on. -- REW */ - v0 = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); - v1 = apic_read(APIC_ESR); + v = apic_read(APIC_ESR); ack_APIC_irq(); atomic_inc(&irq_err_count); - apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)", - smp_processor_id(), v0 , v1); + apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x", + smp_processor_id(), v); - v1 = v1 & 0xff; - while (v1) { - if (v1 & 0x1) + v &= 0xff; + while (v) { + if (v & 0x1) apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); i++; - v1 >>= 1; + v >>= 1; } apic_printk(APIC_DEBUG, KERN_CONT "\n"); @@ -2114,6 +2120,39 @@ int generic_processor_info(int apicid, int version) bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); + /* + * boot_cpu_physical_apicid is designed to have the apicid + * returned by read_apic_id(), i.e, the apicid of the + * currently booting-up processor. However, on some platforms, + * it is temporarily modified by the apicid reported as BSP + * through MP table. Concretely: + * + * - arch/x86/kernel/mpparse.c: MP_processor_info() + * - arch/x86/mm/amdtopology.c: amd_numa_init() + * - arch/x86/platform/visws/visws_quirks.c: MP_processor_info() + * + * This function is executed with the modified + * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel + * parameter doesn't work to disable APs on kdump 2nd kernel. + * + * Since fixing handling of boot_cpu_physical_apicid requires + * another discussion and tests on each platform, we leave it + * for now and here we use read_apic_id() directly in this + * function, generic_processor_info(). + */ + if (disabled_cpu_apicid != BAD_APICID && + disabled_cpu_apicid != read_apic_id() && + disabled_cpu_apicid == apicid) { + int thiscpu = num_processors + disabled_cpus; + + pr_warning("APIC: Disabling requested cpu." + " Processor %d/0x%x ignored.\n", + thiscpu, apicid); + + disabled_cpus++; + return -ENODEV; + } + /* * If boot cpu has not been detected yet, then only allow upto * nr_cpu_ids - 1 processors and keep one slot free for boot cpu @@ -2592,3 +2631,12 @@ static int __init lapic_insert_resource(void) * that is using request_resource */ late_initcall(lapic_insert_resource); + +static int __init apic_set_disabled_cpu_apicid(char *arg) +{ + if (!arg || !get_option(&arg, &disabled_cpu_apicid)) + return -EINVAL; + + return 0; +} +early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid); diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 00c77cf78e9e18..5d5b9eb2b7a417 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index e145f28b40993d..191ce75c0e54b8 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e63a5bd2a78f55..a43f068ebec1c0 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1142,9 +1142,10 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) if (test_bit(vector, used_vectors)) goto next; - for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) - if (per_cpu(vector_irq, new_cpu)[vector] != -1) + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { + if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED) goto next; + } /* Found one! */ current_vector = vector; current_offset = offset; @@ -1183,7 +1184,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) vector = cfg->vector; for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; cfg->vector = 0; cpumask_clear(cfg->domain); @@ -1191,11 +1192,10 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) if (likely(!cfg->move_in_progress)) return; for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { - for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; - vector++) { + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) continue; - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; break; } } @@ -1228,12 +1228,12 @@ void __setup_vector_irq(int cpu) /* Mark the free vectors */ for (vector = 0; vector < NR_VECTORS; ++vector) { irq = per_cpu(vector_irq, cpu)[vector]; - if (irq < 0) + if (irq <= VECTOR_UNDEFINED) continue; cfg = irq_cfg(irq); if (!cpumask_test_cpu(cpu, cfg->domain)) - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; } raw_spin_unlock(&vector_lock); } @@ -2202,13 +2202,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) me = smp_processor_id(); for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { - unsigned int irq; + int irq; unsigned int irr; struct irq_desc *desc; struct irq_cfg *cfg; irq = __this_cpu_read(vector_irq[vector]); - if (irq == -1) + if (irq <= VECTOR_UNDEFINED) continue; desc = irq_to_desc(irq); diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 7434d8556d0916..62071569bd50d8 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -1,6 +1,5 @@ #include #include -#include #include #include diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 77c95c0e1bf77d..00146f9b025431 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -29,7 +29,6 @@ #define pr_fmt(fmt) "summit: %s: " fmt, __func__ #include -#include #include #include diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 140e29db478d78..cac85ee6913f70 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -3,7 +3,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 562a76d433c8e2..de231e328cae31 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -3,7 +3,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 47b56a7e99cbcf..7fd54f09b011e2 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -36,12 +36,13 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o endif obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o endif obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_MTRR) += mtrr/ +obj-$(CONFIG_MICROCODE) += microcode/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index bca023bdd6b2a8..d3153e281d7291 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1,5 +1,4 @@ #include -#include #include #include #include @@ -487,7 +486,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) - sched_clock_stable = 1; + set_sched_clock_stable(); } #ifdef CONFIG_X86_64 @@ -508,6 +507,16 @@ static void early_init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EXTD_APICID); } #endif + + /* F16h erratum 793, CVE-2013-6885 */ + if (c->x86 == 0x16 && c->x86_model <= 0xf) { + u64 val; + + rdmsrl(MSR_AMD64_LS_CFG, val); + if (!(val & BIT(15))) + wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15)); + } + } static const int amd_erratum_383[]; @@ -790,14 +799,10 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) } /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ - if (!((eax >> 16) & mask)) { - u32 a, b, c, d; - - cpuid(0x80000005, &a, &b, &c, &d); - tlb_lld_2m[ENTRIES] = (a >> 16) & 0xff; - } else { + if (!((eax >> 16) & mask)) + tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; + else tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; - } /* a 4M entry uses two 2M entries */ tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8d5652dc99dd53..8779edab684efd 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -1,6 +1,5 @@ #include #include -#include #include #include diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6abc172b825811..24b6fd10625a5a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -472,6 +472,7 @@ u16 __read_mostly tlb_lli_4m[NR_INFO]; u16 __read_mostly tlb_lld_4k[NR_INFO]; u16 __read_mostly tlb_lld_2m[NR_INFO]; u16 __read_mostly tlb_lld_4m[NR_INFO]; +u16 __read_mostly tlb_lld_1g[NR_INFO]; /* * tlb_flushall_shift shows the balance point in replacing cr3 write @@ -486,13 +487,13 @@ void cpu_detect_tlb(struct cpuinfo_x86 *c) if (this_cpu->c_detect_tlb) this_cpu->c_detect_tlb(c); - printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \ - "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \ + printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" + "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n" "tlb_flushall_shift: %d\n", tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], - tlb_flushall_shift); + tlb_lld_1g[ENTRIES], tlb_flushall_shift); } void detect_ht(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index d0969c75ab54bb..aaf152e7963738 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -1,4 +1,3 @@ -#include #include #include #include diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc1ec0dff939ec..3db61c644e440e 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1,4 +1,3 @@ -#include #include #include @@ -93,7 +92,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) - sched_clock_stable = 1; + set_sched_clock_stable(); } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ @@ -387,7 +386,8 @@ static void init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_PEBS); } - if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) + if (c->x86 == 6 && cpu_has_clflush && + (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); #ifdef CONFIG_X86_64 @@ -505,6 +505,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) #define TLB_DATA0_2M_4M 0x23 #define STLB_4K 0x41 +#define STLB_4K_2M 0x42 static const struct _tlb_table intel_tlb_table[] = { { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, @@ -525,13 +526,20 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, + { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, + { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, + { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" }, + { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" }, { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, + { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, + { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" }, { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, { 0x00, 0, 0 } }; @@ -557,6 +565,20 @@ static void intel_tlb_lookup(const unsigned char desc) if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; break; + case STLB_4K_2M: + if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) + tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) + tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) + tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) + tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) + tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) + tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; + break; case TLB_INST_ALL: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; @@ -602,6 +624,10 @@ static void intel_tlb_lookup(const unsigned char desc) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; + case TLB_DATA_1G: + if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) + tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; + break; } } diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c index de8b60a53f695f..a1aef9533154b8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-apei.c +++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c @@ -33,22 +33,28 @@ #include #include #include +#include #include #include "mce-internal.h" -void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err) +void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) { struct mce m; - /* Only corrected MC is reported */ - if (!corrected || !(mem_err->validation_bits & CPER_MEM_VALID_PA)) + if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) return; mce_setup(&m); m.bank = 1; - /* Fake a memory read corrected error with unknown channel */ + /* Fake a memory read error with unknown channel */ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; + + if (severity >= GHES_SEV_RECOVERABLE) + m.status |= MCI_STATUS_UC; + if (severity >= GHES_SEV_PANIC) + m.status |= MCI_STATUS_PCC; + m.addr = mem_err->physical_addr; mce_log(&m); mce_notify_irq(); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index b3218cdee95f43..4d5419b249da53 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1638,15 +1638,15 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) static void mce_start_timer(unsigned int cpu, struct timer_list *t) { - unsigned long iv = mce_adjust_timer(check_interval * HZ); - - __this_cpu_write(mce_next_interval, iv); + unsigned long iv = check_interval * HZ; if (mca_cfg.ignore_ce || !iv) return; + per_cpu(mce_next_interval, cpu) = iv; + t->expires = round_jiffies(jiffies + iv); - add_timer_on(t, smp_processor_id()); + add_timer_on(t, cpu); } static void __mcheck_cpu_init_timer(void) @@ -2272,8 +2272,10 @@ static int mce_device_create(unsigned int cpu) dev->release = &mce_device_release; err = device_register(dev); - if (err) + if (err) { + put_device(dev); return err; + } for (i = 0; mce_device_attrs[i]; i++) { err = device_create_file(dev, mce_device_attrs[i]); diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 4cfe0458ca665a..fb6156fee6f79d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -6,7 +6,6 @@ */ #include -#include #include #include #include diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 1c044b1ccc593c..a3042989398c1c 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index e9a701aecaa14a..7dc5564d0cdf57 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile new file mode 100644 index 00000000000000..285c85427c323c --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/Makefile @@ -0,0 +1,7 @@ +microcode-y := core.o +obj-$(CONFIG_MICROCODE) += microcode.o +microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o +microcode-$(CONFIG_MICROCODE_AMD) += amd.o +obj-$(CONFIG_MICROCODE_EARLY) += core_early.o +obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o +obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/cpu/microcode/amd.c similarity index 96% rename from arch/x86/kernel/microcode_amd.c rename to arch/x86/kernel/cpu/microcode/amd.c index c3d4cc972eca6b..8fffd845e22b9e 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -182,10 +182,10 @@ int __apply_microcode_amd(struct microcode_amd *mc_amd) { u32 rev, dummy; - wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); + native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); /* verify patch application was successful */ - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); if (rev != mc_amd->hdr.patch_id) return -1; @@ -332,6 +332,9 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) patch->patch_id = mc_hdr->patch_id; patch->equiv_cpu = proc_id; + pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", + __func__, patch->patch_id, proc_id); + /* ... and add to cache. */ update_cache(patch); @@ -390,9 +393,9 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { struct ucode_patch *p = find_patch(smp_processor_id()); if (p) { - memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); - memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), - MPB_MAX_SIZE)); + memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); + memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), + PATCH_MAX_SIZE)); } } #endif @@ -430,7 +433,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, if (c->x86 >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); - if (request_firmware(&fw, (const char *)fw_name, device)) { + if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); goto out; } diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c similarity index 55% rename from arch/x86/kernel/microcode_amd_early.c rename to arch/x86/kernel/cpu/microcode/amd_early.c index 6073104ccaa36b..8384c0fa206f17 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c @@ -2,6 +2,7 @@ * Copyright (C) 2013 Advanced Micro Devices, Inc. * * Author: Jacob Shin + * Fixes: Borislav Petkov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -15,10 +16,18 @@ #include #include -static bool ucode_loaded; +/* + * This points to the current valid container of microcode patches which we will + * save from the initrd before jettisoning its contents. + */ +static u8 *container; +static size_t container_size; + static u32 ucode_new_rev; -static unsigned long ucode_offset; -static size_t ucode_size; +u8 amd_ucode_patch[PATCH_MAX_SIZE]; +static u16 this_equiv_id; + +struct cpio_data ucode_cpio; /* * Microcode patch container file is prepended to the initrd in cpio format. @@ -32,9 +41,6 @@ static struct cpio_data __init find_ucode_in_initrd(void) char *path; void *start; size_t size; - unsigned long *uoffset; - size_t *usize; - struct cpio_data cd; #ifdef CONFIG_X86_32 struct boot_params *p; @@ -47,30 +53,50 @@ static struct cpio_data __init find_ucode_in_initrd(void) path = (char *)__pa_nodebug(ucode_path); start = (void *)p->hdr.ramdisk_image; size = p->hdr.ramdisk_size; - uoffset = (unsigned long *)__pa_nodebug(&ucode_offset); - usize = (size_t *)__pa_nodebug(&ucode_size); #else path = ucode_path; start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); size = boot_params.hdr.ramdisk_size; - uoffset = &ucode_offset; - usize = &ucode_size; #endif - cd = find_cpio_data(path, start, size, &offset); - if (!cd.data) - return cd; + return find_cpio_data(path, start, size, &offset); +} - if (*(u32 *)cd.data != UCODE_MAGIC) { - cd.data = NULL; - cd.size = 0; - return cd; - } +static size_t compute_container_size(u8 *data, u32 total_size) +{ + size_t size = 0; + u32 *header = (u32 *)data; - *uoffset = (u8 *)cd.data - (u8 *)start; - *usize = cd.size; + if (header[0] != UCODE_MAGIC || + header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ + header[2] == 0) /* size */ + return size; - return cd; + size = header[2] + CONTAINER_HDR_SZ; + total_size -= size; + data += size; + + while (total_size) { + u16 patch_size; + + header = (u32 *)data; + + if (header[0] != UCODE_UCODE_TYPE) + break; + + /* + * Sanity-check patch size. + */ + patch_size = header[1]; + if (patch_size > PATCH_MAX_SIZE) + break; + + size += patch_size + SECTION_HDR_SIZE; + data += patch_size + SECTION_HDR_SIZE; + total_size -= patch_size + SECTION_HDR_SIZE; + } + + return size; } /* @@ -85,23 +111,22 @@ static struct cpio_data __init find_ucode_in_initrd(void) static void apply_ucode_in_initrd(void *ucode, size_t size) { struct equiv_cpu_entry *eq; + size_t *cont_sz; u32 *header; - u8 *data; + u8 *data, **cont; u16 eq_id = 0; int offset, left; - u32 rev, eax; + u32 rev, eax, ebx, ecx, edx; u32 *new_rev; - unsigned long *uoffset; - size_t *usize; #ifdef CONFIG_X86_32 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); - uoffset = (unsigned long *)__pa_nodebug(&ucode_offset); - usize = (size_t *)__pa_nodebug(&ucode_size); + cont_sz = (size_t *)__pa_nodebug(&container_size); + cont = (u8 **)__pa_nodebug(&container); #else new_rev = &ucode_new_rev; - uoffset = &ucode_offset; - usize = &ucode_size; + cont_sz = &container_size; + cont = &container; #endif data = ucode; @@ -109,23 +134,37 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) header = (u32 *)data; /* find equiv cpu table */ - - if (header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ + if (header[0] != UCODE_MAGIC || + header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ header[2] == 0) /* size */ return; - eax = cpuid_eax(0x00000001); + eax = 0x00000001; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); while (left > 0) { eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); + *cont = data; + + /* Advance past the container header */ offset = header[2] + CONTAINER_HDR_SZ; data += offset; left -= offset; eq_id = find_equiv_id(eq, eax); - if (eq_id) + if (eq_id) { + this_equiv_id = eq_id; + *cont_sz = compute_container_size(*cont, left + offset); + + /* + * truncate how much we need to iterate over in the + * ucode update loop below + */ + left = *cont_sz - offset; break; + } /* * support multiple container files appended together. if this @@ -145,19 +184,18 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) /* mark where the next microcode container file starts */ offset = data - (u8 *)ucode; - *uoffset += offset; - *usize -= offset; ucode = data; } if (!eq_id) { - *usize = 0; + *cont = NULL; + *cont_sz = 0; return; } /* find ucode and update if needed */ - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); + native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); while (left > 0) { struct microcode_amd *mc; @@ -168,73 +206,83 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) break; mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); - if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) - if (__apply_microcode_amd(mc) == 0) { + + if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { + + if (!__apply_microcode_amd(mc)) { rev = mc->hdr.patch_id; *new_rev = rev; + + /* save ucode patch */ + memcpy(amd_ucode_patch, mc, + min_t(u32, header[1], PATCH_MAX_SIZE)); } + } offset = header[1] + SECTION_HDR_SIZE; data += offset; left -= offset; } - - /* mark where this microcode container file ends */ - offset = *usize - (data - (u8 *)ucode); - *usize -= offset; - - if (!(*new_rev)) - *usize = 0; } void __init load_ucode_amd_bsp(void) { - struct cpio_data cd = find_ucode_in_initrd(); - if (!cd.data) + struct cpio_data cp; + void **data; + size_t *size; + +#ifdef CONFIG_X86_32 + data = (void **)__pa_nodebug(&ucode_cpio.data); + size = (size_t *)__pa_nodebug(&ucode_cpio.size); +#else + data = &ucode_cpio.data; + size = &ucode_cpio.size; +#endif + + cp = find_ucode_in_initrd(); + if (!cp.data) return; - apply_ucode_in_initrd(cd.data, cd.size); + *data = cp.data; + *size = cp.size; + + apply_ucode_in_initrd(cp.data, cp.size); } #ifdef CONFIG_X86_32 -u8 amd_bsp_mpb[MPB_MAX_SIZE]; - /* * On 32-bit, since AP's early load occurs before paging is turned on, we * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During - * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which - * is used upon resume from suspend. + * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, + * which is used upon resume from suspend. */ void load_ucode_amd_ap(void) { struct microcode_amd *mc; - unsigned long *initrd; - unsigned long *uoffset; size_t *usize; - void *ucode; + void **ucode; - mc = (struct microcode_amd *)__pa(amd_bsp_mpb); + mc = (struct microcode_amd *)__pa(amd_ucode_patch); if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { __apply_microcode_amd(mc); return; } - initrd = (unsigned long *)__pa(&initrd_start); - uoffset = (unsigned long *)__pa(&ucode_offset); - usize = (size_t *)__pa(&ucode_size); + ucode = (void *)__pa_nodebug(&container); + usize = (size_t *)__pa_nodebug(&container_size); - if (!*usize || !*initrd) + if (!*ucode || !*usize) return; - ucode = (void *)((unsigned long)__pa(*initrd) + *uoffset); - apply_ucode_in_initrd(ucode, *usize); + apply_ucode_in_initrd(*ucode, *usize); } static void __init collect_cpu_sig_on_bsp(void *arg) { unsigned int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + uci->cpu_sig.sig = cpuid_eax(0x00000001); } #else @@ -242,36 +290,54 @@ void load_ucode_amd_ap(void) { unsigned int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + struct equiv_cpu_entry *eq; + struct microcode_amd *mc; u32 rev, eax; + u16 eq_id; + + /* Exit if called on the BSP. */ + if (!cpu) + return; + + if (!container) + return; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); - eax = cpuid_eax(0x00000001); uci->cpu_sig.rev = rev; uci->cpu_sig.sig = eax; - if (cpu && !ucode_loaded) { - void *ucode; + eax = cpuid_eax(0x00000001); + eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); - if (!ucode_size || !initrd_start) - return; + eq_id = find_equiv_id(eq, eax); + if (!eq_id) + return; + + if (eq_id == this_equiv_id) { + mc = (struct microcode_amd *)amd_ucode_patch; - ucode = (void *)(initrd_start + ucode_offset); - eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); - if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) + if (mc && rev < mc->hdr.patch_id) { + if (!__apply_microcode_amd(mc)) + ucode_new_rev = mc->hdr.patch_id; + } + + } else { + if (!ucode_cpio.data) return; - ucode_loaded = true; + /* + * AP has a different equivalence ID than BSP, looks like + * mixed-steppings silicon so go through the ucode blob anew. + */ + apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); } - - apply_microcode_amd(cpu); } #endif int __init save_microcode_in_initrd_amd(void) { enum ucode_state ret; - void *ucode; u32 eax; #ifdef CONFIG_X86_32 @@ -280,22 +346,35 @@ int __init save_microcode_in_initrd_amd(void) if (!uci->cpu_sig.sig) smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); + + /* + * Take into account the fact that the ramdisk might get relocated + * and therefore we need to recompute the container's position in + * virtual memory space. + */ + container = (u8 *)(__va((u32)relocated_ramdisk) + + ((u32)container - boot_params.hdr.ramdisk_image)); #endif if (ucode_new_rev) pr_info("microcode: updated early to new patch_level=0x%08x\n", ucode_new_rev); - if (ucode_loaded || !ucode_size || !initrd_start) - return 0; + if (!container) + return -EINVAL; - ucode = (void *)(initrd_start + ucode_offset); eax = cpuid_eax(0x00000001); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); - ret = load_microcode_amd(eax, ucode, ucode_size); + ret = load_microcode_amd(eax, container, container_size); if (ret != UCODE_OK) return -EINVAL; - ucode_loaded = true; + /* + * This will be freed any msec now, stash patches for the current + * family and switch to patch cache for cpu hotplug, etc later. + */ + container = NULL; + container_size = 0; + return 0; } diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/cpu/microcode/core.c similarity index 100% rename from arch/x86/kernel/microcode_core.c rename to arch/x86/kernel/cpu/microcode/core.c diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c similarity index 100% rename from arch/x86/kernel/microcode_core_early.c rename to arch/x86/kernel/cpu/microcode/core_early.c diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/cpu/microcode/intel.c similarity index 99% rename from arch/x86/kernel/microcode_intel.c rename to arch/x86/kernel/cpu/microcode/intel.c index 5fb2cebf556b1e..a276fa75d9b5d9 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -278,7 +278,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, sprintf(name, "intel-ucode/%02x-%02x-%02x", c->x86, c->x86_model, c->x86_mask); - if (request_firmware(&firmware, name, device)) { + if (request_firmware_direct(&firmware, name, device)) { pr_debug("data file %s load failed\n", name); return UCODE_NFOUND; } diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c similarity index 98% rename from arch/x86/kernel/microcode_intel_early.c rename to arch/x86/kernel/cpu/microcode/intel_early.c index 1575deb2e636af..18f739129e7208 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c @@ -365,16 +365,6 @@ get_matching_model_microcode(int cpu, unsigned long start, return state; } -#define native_rdmsr(msr, val1, val2) \ -do { \ - u64 __val = native_read_msr((msr)); \ - (void)((val1) = (u32)__val); \ - (void)((val2) = (u32)(__val >> 32)); \ -} while (0) - -#define native_wrmsr(msr, low, high) \ - native_write_msr(msr, low, high); - static int collect_cpu_info_early(struct ucode_cpu_info *uci) { unsigned int val[2]; diff --git a/arch/x86/kernel/microcode_intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c similarity index 100% rename from arch/x86/kernel/microcode_intel_lib.c rename to arch/x86/kernel/cpu/microcode/intel_lib.c diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8e132931614d61..b88645191fe559 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1883,21 +1883,27 @@ static struct pmu pmu = { void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) { + struct cyc2ns_data *data; + userpg->cap_user_time = 0; userpg->cap_user_time_zero = 0; userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; userpg->pmc_width = x86_pmu.cntval_bits; - if (!sched_clock_stable) + if (!sched_clock_stable()) return; + data = cyc2ns_read_begin(); + userpg->cap_user_time = 1; - userpg->time_mult = this_cpu_read(cyc2ns); - userpg->time_shift = CYC2NS_SCALE_FACTOR; - userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; + userpg->time_mult = data->cyc2ns_mul; + userpg->time_shift = data->cyc2ns_shift; + userpg->time_offset = data->cyc2ns_offset - now; userpg->cap_user_time_zero = 1; - userpg->time_zero = this_cpu_read(cyc2ns_offset); + userpg->time_zero = data->cyc2ns_offset; + + cyc2ns_read_end(data); } /* diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index e09f0bfb7b8f23..4b8e4d3cd6ea62 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -816,6 +817,18 @@ static int force_ibs_eilvt_setup(void) return ret; } +static void ibs_eilvt_setup(void) +{ + /* + * Force LVT offset assignment for family 10h: The offsets are + * not assigned by the BIOS for this family, so the OS is + * responsible for doing it. If the OS assignment fails, fall + * back to BIOS settings and try to setup this. + */ + if (boot_cpu_data.x86 == 0x10) + force_ibs_eilvt_setup(); +} + static inline int get_ibs_lvt_offset(void) { u64 val; @@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy) setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); } +#ifdef CONFIG_PM + +static int perf_ibs_suspend(void) +{ + clear_APIC_ibs(NULL); + return 0; +} + +static void perf_ibs_resume(void) +{ + ibs_eilvt_setup(); + setup_APIC_ibs(NULL); +} + +static struct syscore_ops perf_ibs_syscore_ops = { + .resume = perf_ibs_resume, + .suspend = perf_ibs_suspend, +}; + +static void perf_ibs_pm_init(void) +{ + register_syscore_ops(&perf_ibs_syscore_ops); +} + +#else + +static inline void perf_ibs_pm_init(void) { } + +#endif + static int perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -877,18 +920,12 @@ static __init int amd_ibs_init(void) if (!caps) return -ENODEV; /* ibs not supported by the cpu */ - /* - * Force LVT offset assignment for family 10h: The offsets are - * not assigned by the BIOS for this family, so the OS is - * responsible for doing it. If the OS assignment fails, fall - * back to BIOS settings and try to setup this. - */ - if (boot_cpu_data.x86 == 0x10) - force_ibs_eilvt_setup(); + ibs_eilvt_setup(); if (!ibs_eilvt_valid()) goto out; + perf_ibs_pm_init(); get_online_cpus(); ibs_caps = caps; /* make ibs_caps visible to other cpus: */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c new file mode 100644 index 00000000000000..5ad35ad94d0f81 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -0,0 +1,679 @@ +/* + * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters + * Copyright (C) 2013 Google, Inc., Stephane Eranian + * + * Intel RAPL interface is specified in the IA-32 Manual Vol3b + * section 14.7.1 (September 2013) + * + * RAPL provides more controls than just reporting energy consumption + * however here we only expose the 3 energy consumption free running + * counters (pp0, pkg, dram). + * + * Each of those counters increments in a power unit defined by the + * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules + * but it can vary. + * + * Counter to rapl events mappings: + * + * pp0 counter: consumption of all physical cores (power plane 0) + * event: rapl_energy_cores + * perf code: 0x1 + * + * pkg counter: consumption of the whole processor package + * event: rapl_energy_pkg + * perf code: 0x2 + * + * dram counter: consumption of the dram domain (servers only) + * event: rapl_energy_dram + * perf code: 0x3 + * + * dram counter: consumption of the builtin-gpu domain (client only) + * event: rapl_energy_gpu + * perf code: 0x4 + * + * We manage those counters as free running (read-only). They may be + * use simultaneously by other tools, such as turbostat. + * + * The events only support system-wide mode counting. There is no + * sampling support because it does not make sense and is not + * supported by the RAPL hardware. + * + * Because we want to avoid floating-point operations in the kernel, + * the events are all reported in fixed point arithmetic (32.32). + * Tools must adjust the counts to convert them to Watts using + * the duration of the measurement. Tools may use a function such as + * ldexp(raw_count, -32); + */ +#include +#include +#include +#include +#include "perf_event.h" + +/* + * RAPL energy status counters + */ +#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */ +#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */ +#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */ +#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ +#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ +#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ +#define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */ +#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ + +/* Clients have PP0, PKG */ +#define RAPL_IDX_CLN (1<config + * any other bit is reserved + */ +#define RAPL_EVENT_MASK 0xFFULL + +#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct kobj_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __rapl_##_var##_show, NULL) + +#define RAPL_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \ + .config = _config, \ +} + +#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ + +struct rapl_pmu { + spinlock_t lock; + int hw_unit; /* 1/2^hw_unit Joule */ + int n_active; /* number of active events */ + struct list_head active_list; + struct pmu *pmu; /* pointer to rapl_pmu_class */ + ktime_t timer_interval; /* in ktime_t unit */ + struct hrtimer hrtimer; +}; + +static struct pmu rapl_pmu_class; +static cpumask_t rapl_cpu_mask; +static int rapl_cntr_mask; + +static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu); +static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free); + +static inline u64 rapl_read_counter(struct perf_event *event) +{ + u64 raw; + rdmsrl(event->hw.event_base, raw); + return raw; +} + +static inline u64 rapl_scale(u64 v) +{ + /* + * scale delta to smallest unit (1/2^32) + * users must then scale back: count * 1/(1e9*2^32) to get Joules + * or use ldexp(count, -32). + * Watts = Joules/Time delta + */ + return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); +} + +static u64 rapl_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev_raw_count, new_raw_count; + s64 delta, sdelta; + int shift = RAPL_CNTR_WIDTH; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + rdmsrl(event->hw.event_base, new_raw_count); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) { + cpu_relax(); + goto again; + } + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + * + * Careful, not all hw sign-extends above the physical width + * of the count. + */ + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + sdelta = rapl_scale(delta); + + local64_add(sdelta, &event->count); + + return new_raw_count; +} + +static void rapl_start_hrtimer(struct rapl_pmu *pmu) +{ + __hrtimer_start_range_ns(&pmu->hrtimer, + pmu->timer_interval, 0, + HRTIMER_MODE_REL_PINNED, 0); +} + +static void rapl_stop_hrtimer(struct rapl_pmu *pmu) +{ + hrtimer_cancel(&pmu->hrtimer); +} + +static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct perf_event *event; + unsigned long flags; + + if (!pmu->n_active) + return HRTIMER_NORESTART; + + spin_lock_irqsave(&pmu->lock, flags); + + list_for_each_entry(event, &pmu->active_list, active_entry) { + rapl_event_update(event); + } + + spin_unlock_irqrestore(&pmu->lock, flags); + + hrtimer_forward_now(hrtimer, pmu->timer_interval); + + return HRTIMER_RESTART; +} + +static void rapl_hrtimer_init(struct rapl_pmu *pmu) +{ + struct hrtimer *hr = &pmu->hrtimer; + + hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hr->function = rapl_hrtimer_handle; +} + +static void __rapl_pmu_event_start(struct rapl_pmu *pmu, + struct perf_event *event) +{ + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + + list_add_tail(&event->active_entry, &pmu->active_list); + + local64_set(&event->hw.prev_count, rapl_read_counter(event)); + + pmu->n_active++; + if (pmu->n_active == 1) + rapl_start_hrtimer(pmu); +} + +static void rapl_pmu_event_start(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + __rapl_pmu_event_start(pmu, event); + spin_unlock_irqrestore(&pmu->lock, flags); +} + +static void rapl_pmu_event_stop(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + + /* mark event as deactivated and stopped */ + if (!(hwc->state & PERF_HES_STOPPED)) { + WARN_ON_ONCE(pmu->n_active <= 0); + pmu->n_active--; + if (pmu->n_active == 0) + rapl_stop_hrtimer(pmu); + + list_del(&event->active_entry); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } + + /* check if update of sw counter is necessary */ + if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + rapl_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } + + spin_unlock_irqrestore(&pmu->lock, flags); +} + +static int rapl_pmu_event_add(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (mode & PERF_EF_START) + __rapl_pmu_event_start(pmu, event); + + spin_unlock_irqrestore(&pmu->lock, flags); + + return 0; +} + +static void rapl_pmu_event_del(struct perf_event *event, int flags) +{ + rapl_pmu_event_stop(event, PERF_EF_UPDATE); +} + +static int rapl_pmu_event_init(struct perf_event *event) +{ + u64 cfg = event->attr.config & RAPL_EVENT_MASK; + int bit, msr, ret = 0; + + /* only look at RAPL events */ + if (event->attr.type != rapl_pmu_class.type) + return -ENOENT; + + /* check only supported bits are set */ + if (event->attr.config & ~RAPL_EVENT_MASK) + return -EINVAL; + + /* + * check event is known (determines counter) + */ + switch (cfg) { + case INTEL_RAPL_PP0: + bit = RAPL_IDX_PP0_NRG_STAT; + msr = MSR_PP0_ENERGY_STATUS; + break; + case INTEL_RAPL_PKG: + bit = RAPL_IDX_PKG_NRG_STAT; + msr = MSR_PKG_ENERGY_STATUS; + break; + case INTEL_RAPL_RAM: + bit = RAPL_IDX_RAM_NRG_STAT; + msr = MSR_DRAM_ENERGY_STATUS; + break; + case INTEL_RAPL_PP1: + bit = RAPL_IDX_PP1_NRG_STAT; + msr = MSR_PP1_ENERGY_STATUS; + break; + default: + return -EINVAL; + } + /* check event supported */ + if (!(rapl_cntr_mask & (1 << bit))) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest || + event->attr.sample_period) /* no sampling */ + return -EINVAL; + + /* must be done before validate_group */ + event->hw.event_base = msr; + event->hw.config = cfg; + event->hw.idx = bit; + + return ret; +} + +static void rapl_pmu_event_read(struct perf_event *event) +{ + rapl_event_update(event); +} + +static ssize_t rapl_get_attr_cpumask(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask); + + buf[n++] = '\n'; + buf[n] = '\0'; + return n; +} + +static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); + +static struct attribute *rapl_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group rapl_pmu_attr_group = { + .attrs = rapl_pmu_attrs, +}; + +EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); +EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); +EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); +EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); + +EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); +EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); +EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); +EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); + +/* + * we compute in 0.23 nJ increments regardless of MSR + */ +EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); + +static struct attribute *rapl_events_srv_attr[] = { + EVENT_PTR(rapl_cores), + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_ram), + + EVENT_PTR(rapl_cores_unit), + EVENT_PTR(rapl_pkg_unit), + EVENT_PTR(rapl_ram_unit), + + EVENT_PTR(rapl_cores_scale), + EVENT_PTR(rapl_pkg_scale), + EVENT_PTR(rapl_ram_scale), + NULL, +}; + +static struct attribute *rapl_events_cln_attr[] = { + EVENT_PTR(rapl_cores), + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_gpu), + + EVENT_PTR(rapl_cores_unit), + EVENT_PTR(rapl_pkg_unit), + EVENT_PTR(rapl_gpu_unit), + + EVENT_PTR(rapl_cores_scale), + EVENT_PTR(rapl_pkg_scale), + EVENT_PTR(rapl_gpu_scale), + NULL, +}; + +static struct attribute_group rapl_pmu_events_group = { + .name = "events", + .attrs = NULL, /* patched at runtime */ +}; + +DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); +static struct attribute *rapl_formats_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group rapl_pmu_format_group = { + .name = "format", + .attrs = rapl_formats_attr, +}; + +const struct attribute_group *rapl_attr_groups[] = { + &rapl_pmu_attr_group, + &rapl_pmu_format_group, + &rapl_pmu_events_group, + NULL, +}; + +static struct pmu rapl_pmu_class = { + .attr_groups = rapl_attr_groups, + .task_ctx_nr = perf_invalid_context, /* system-wide only */ + .event_init = rapl_pmu_event_init, + .add = rapl_pmu_event_add, /* must have */ + .del = rapl_pmu_event_del, /* must have */ + .start = rapl_pmu_event_start, + .stop = rapl_pmu_event_stop, + .read = rapl_pmu_event_read, +}; + +static void rapl_cpu_exit(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + int i, phys_id = topology_physical_package_id(cpu); + int target = -1; + + /* find a new cpu on same package */ + for_each_online_cpu(i) { + if (i == cpu) + continue; + if (phys_id == topology_physical_package_id(i)) { + target = i; + break; + } + } + /* + * clear cpu from cpumask + * if was set in cpumask and still some cpu on package, + * then move to new cpu + */ + if (cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask) && target >= 0) + cpumask_set_cpu(target, &rapl_cpu_mask); + + WARN_ON(cpumask_empty(&rapl_cpu_mask)); + /* + * migrate events and context to new cpu + */ + if (target >= 0) + perf_pmu_migrate_context(pmu->pmu, cpu, target); + + /* cancel overflow polling timer for CPU */ + rapl_stop_hrtimer(pmu); +} + +static void rapl_cpu_init(int cpu) +{ + int i, phys_id = topology_physical_package_id(cpu); + + /* check if phys_is is already covered */ + for_each_cpu(i, &rapl_cpu_mask) { + if (phys_id == topology_physical_package_id(i)) + return; + } + /* was not found, so add it */ + cpumask_set_cpu(cpu, &rapl_cpu_mask); +} + +static int rapl_cpu_prepare(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + int phys_id = topology_physical_package_id(cpu); + u64 ms; + + if (pmu) + return 0; + + if (phys_id < 0) + return -1; + + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); + if (!pmu) + return -1; + + spin_lock_init(&pmu->lock); + + INIT_LIST_HEAD(&pmu->active_list); + + /* + * grab power unit as: 1/2^unit Joules + * + * we cache in local PMU instance + */ + rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); + pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL; + pmu->pmu = &rapl_pmu_class; + + /* + * use reference of 200W for scaling the timeout + * to avoid missing counter overflows. + * 200W = 200 Joules/sec + * divide interval by 2 to avoid lockstep (2 * 100) + * if hw unit is 32, then we use 2 ms 1/200/2 + */ + if (pmu->hw_unit < 32) + ms = (1000 / (2 * 100)) * (1ULL << (32 - pmu->hw_unit - 1)); + else + ms = 2; + + pmu->timer_interval = ms_to_ktime(ms); + + rapl_hrtimer_init(pmu); + + /* set RAPL pmu for this cpu for now */ + per_cpu(rapl_pmu, cpu) = pmu; + per_cpu(rapl_pmu_to_free, cpu) = NULL; + + return 0; +} + +static void rapl_cpu_kfree(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu); + + kfree(pmu); + + per_cpu(rapl_pmu_to_free, cpu) = NULL; +} + +static int rapl_cpu_dying(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + + if (!pmu) + return 0; + + per_cpu(rapl_pmu, cpu) = NULL; + + per_cpu(rapl_pmu_to_free, cpu) = pmu; + + return 0; +} + +static int rapl_cpu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + rapl_cpu_prepare(cpu); + break; + case CPU_STARTING: + rapl_cpu_init(cpu); + break; + case CPU_UP_CANCELED: + case CPU_DYING: + rapl_cpu_dying(cpu); + break; + case CPU_ONLINE: + case CPU_DEAD: + rapl_cpu_kfree(cpu); + break; + case CPU_DOWN_PREPARE: + rapl_cpu_exit(cpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static const struct x86_cpu_id rapl_cpu_match[] = { + [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 }, + [1] = {}, +}; + +static int __init rapl_pmu_init(void) +{ + struct rapl_pmu *pmu; + int cpu, ret; + + /* + * check for Intel processor family 6 + */ + if (!x86_match_cpu(rapl_cpu_match)) + return 0; + + /* check supported CPU */ + switch (boot_cpu_data.x86_model) { + case 42: /* Sandy Bridge */ + case 58: /* Ivy Bridge */ + case 60: /* Haswell */ + case 69: /* Haswell-Celeron */ + rapl_cntr_mask = RAPL_IDX_CLN; + rapl_pmu_events_group.attrs = rapl_events_cln_attr; + break; + case 45: /* Sandy Bridge-EP */ + case 62: /* IvyTown */ + rapl_cntr_mask = RAPL_IDX_SRV; + rapl_pmu_events_group.attrs = rapl_events_srv_attr; + break; + + default: + /* unsupported */ + return 0; + } + get_online_cpus(); + + for_each_online_cpu(cpu) { + rapl_cpu_prepare(cpu); + rapl_cpu_init(cpu); + } + + perf_cpu_notifier(rapl_cpu_notifier); + + ret = perf_pmu_register(&rapl_pmu_class, "power", -1); + if (WARN_ON(ret)) { + pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret); + put_online_cpus(); + return -1; + } + + pmu = __get_cpu_var(rapl_pmu); + + pr_info("RAPL PMU detected, hw unit 2^-%d Joules," + " API unit is 2^-32 Joules," + " %d fixed counters" + " %llu ms ovfl timer\n", + pmu->hw_unit, + hweight32(rapl_cntr_mask), + ktime_to_ms(pmu->timer_interval)); + + put_online_cpus(); + + return 0; +} +device_initcall(rapl_pmu_init); diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 88db010845cb26..384df5105fbc98 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c @@ -31,20 +31,6 @@ static int __init x86_rdrand_setup(char *s) } __setup("nordrand", x86_rdrand_setup); -/* We can't use arch_get_random_long() here since alternatives haven't run */ -static inline int rdrand_long(unsigned long *v) -{ - int ok; - asm volatile("1: " RDRAND_LONG "\n\t" - "jc 2f\n\t" - "decl %0\n\t" - "jnz 1b\n\t" - "2:" - : "=r" (ok), "=a" (*v) - : "0" (RDRAND_RETRY_LOOPS)); - return ok; -} - /* * Force a reseed cycle; we are architecturally guaranteed a reseed * after no more than 512 128-bit chunks of random data. This also diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index aa0430d69b900b..3fa0e5ad86b445 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -1,6 +1,5 @@ #include #include -#include #include #include #include "cpu.h" diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index 75c5ad5d35cc1a..ef9c2a0078bd66 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c @@ -1,5 +1,4 @@ #include -#include #include #include "cpu.h" diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 18677a90d6a35e..a57902efe2d597 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -7,7 +7,6 @@ * */ -#include #include #include #include diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index 5d3fe8d36e4ac7..f6dfd9334b67e5 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -1,6 +1,5 @@ #include #include -#include #include #include diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 51e2988c5728d5..a2a4f469788965 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller) pushl $0 /* Pass NULL as regs pointer */ movl 4*4(%esp), %eax movl 0x4(%ebp), %edx - leal function_trace_op, %ecx + movl function_trace_op, %ecx subl $MCOUNT_INSN_SIZE, %eax .globl ftrace_call @@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ - leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e21b0785a85b2f..1e96c3628bf24f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -88,7 +88,7 @@ END(function_hook) MCOUNT_SAVE_FRAME \skip /* Load the ftrace_ops into the 3rd parameter */ - leaq function_trace_op, %rdx + movq function_trace_op(%rip), %rdx /* Load ip into the first parameter */ movq RIP(%rsp), %rdi diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index f66ff162dce864..a67b47c31314ba 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c new file mode 100644 index 00000000000000..c3aae66728438f --- /dev/null +++ b/arch/x86/kernel/iosf_mbi.c @@ -0,0 +1,226 @@ +/* + * IOSF-SB MailBox Interface Driver + * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * + * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a + * mailbox interface (MBI) to communicate with mutiple devices. This + * driver implements access to this interface for those platforms that can + * enumerate the device using PCI. + */ + +#include +#include +#include +#include + +#include + +static DEFINE_SPINLOCK(iosf_mbi_lock); + +static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset) +{ + return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE; +} + +static struct pci_dev *mbi_pdev; /* one mbi device */ + +static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr) +{ + int result; + + if (!mbi_pdev) + return -ENODEV; + + if (mcrx) { + result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET, + mcrx); + if (result < 0) + goto fail_read; + } + + result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr); + if (result < 0) + goto fail_read; + + result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr); + if (result < 0) + goto fail_read; + + return 0; + +fail_read: + dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); + return result; +} + +static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) +{ + int result; + + if (!mbi_pdev) + return -ENODEV; + + result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr); + if (result < 0) + goto fail_write; + + if (mcrx) { + result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET, + mcrx); + if (result < 0) + goto fail_write; + } + + result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr); + if (result < 0) + goto fail_write; + + return 0; + +fail_write: + dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); + return result; +} + +int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) +{ + u32 mcr, mcrx; + unsigned long flags; + int ret; + + /*Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr); + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_read); + +int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr) +{ + u32 mcr, mcrx; + unsigned long flags; + int ret; + + /*Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr); + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_write); + +int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask) +{ + u32 mcr, mcrx; + u32 value; + unsigned long flags; + int ret; + + /*Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + + /* Read current mdr value */ + ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value); + if (ret < 0) { + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + return ret; + } + + /* Apply mask */ + value &= ~mask; + mdr &= mask; + value |= mdr; + + /* Write back */ + ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value); + + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_modify); + +static int iosf_mbi_probe(struct pci_dev *pdev, + const struct pci_device_id *unused) +{ + int ret; + + ret = pci_enable_device(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "error: could not enable device\n"); + return ret; + } + + mbi_pdev = pci_dev_get(pdev); + return 0; +} + +static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids); + +static struct pci_driver iosf_mbi_pci_driver = { + .name = "iosf_mbi_pci", + .probe = iosf_mbi_probe, + .id_table = iosf_mbi_pci_ids, +}; + +static int __init iosf_mbi_init(void) +{ + return pci_register_driver(&iosf_mbi_pci_driver); +} + +static void __exit iosf_mbi_exit(void) +{ + pci_unregister_driver(&iosf_mbi_pci_driver); + if (mbi_pdev) { + pci_dev_put(mbi_pdev); + mbi_pdev = NULL; + } +} + +module_init(iosf_mbi_init); +module_exit(iosf_mbi_exit); + +MODULE_AUTHOR("David E. Box "); +MODULE_DESCRIPTION("IOSF Mailbox Interface accessor"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 22d0687e7fda15..dbb60878b744d9 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -193,9 +193,13 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) if (!handle_irq(irq, regs)) { ack_APIC_irq(); - if (printk_ratelimit()) - pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", - __func__, smp_processor_id(), vector, irq); + if (irq != VECTOR_RETRIGGERED) { + pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", + __func__, smp_processor_id(), + vector, irq); + } else { + __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); + } } irq_exit(); @@ -262,6 +266,76 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); #ifdef CONFIG_HOTPLUG_CPU +/* + * This cpu is going to be removed and its vectors migrated to the remaining + * online cpus. Check to see if there are enough vectors in the remaining cpus. + * This function is protected by stop_machine(). + */ +int check_irq_vectors_for_cpu_disable(void) +{ + int irq, cpu; + unsigned int this_cpu, vector, this_count, count; + struct irq_desc *desc; + struct irq_data *data; + struct cpumask affinity_new, online_new; + + this_cpu = smp_processor_id(); + cpumask_copy(&online_new, cpu_online_mask); + cpu_clear(this_cpu, online_new); + + this_count = 0; + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + irq = __this_cpu_read(vector_irq[vector]); + if (irq >= 0) { + desc = irq_to_desc(irq); + data = irq_desc_get_irq_data(desc); + cpumask_copy(&affinity_new, data->affinity); + cpu_clear(this_cpu, affinity_new); + + /* Do not count inactive or per-cpu irqs. */ + if (!irq_has_action(irq) || irqd_is_per_cpu(data)) + continue; + + /* + * A single irq may be mapped to multiple + * cpu's vector_irq[] (for example IOAPIC cluster + * mode). In this case we have two + * possibilities: + * + * 1) the resulting affinity mask is empty; that is + * this the down'd cpu is the last cpu in the irq's + * affinity mask, or + * + * 2) the resulting affinity mask is no longer + * a subset of the online cpus but the affinity + * mask is not zero; that is the down'd cpu is the + * last online cpu in a user set affinity mask. + */ + if (cpumask_empty(&affinity_new) || + !cpumask_subset(&affinity_new, &online_new)) + this_count++; + } + } + + count = 0; + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; + vector++) { + if (per_cpu(vector_irq, cpu)[vector] < 0) + count++; + } + } + + if (count < this_count) { + pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n", + this_cpu, this_count, count); + return -ERANGE; + } + return 0; +} + /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { @@ -344,7 +418,7 @@ void fixup_irqs(void) for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { unsigned int irr; - if (__this_cpu_read(vector_irq[vector]) < 0) + if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED) continue; irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); @@ -355,11 +429,14 @@ void fixup_irqs(void) data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); raw_spin_lock(&desc->lock); - if (chip->irq_retrigger) + if (chip->irq_retrigger) { chip->irq_retrigger(data); + __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); + } raw_spin_unlock(&desc->lock); } - __this_cpu_write(vector_irq[vector], -1); + if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) + __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); } } #endif diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index a2a1fbc594ff90..7f50156542fbde 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -52,7 +52,7 @@ static struct irqaction irq2 = { }; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { - [0 ... NR_VECTORS - 1] = -1, + [0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED, }; int vector_used_by_percpu_irq(unsigned int vector) @@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector) int cpu; for_each_online_cpu(cpu) { - if (per_cpu(vector_irq, cpu)[vector] != -1) + if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED) return 1; } diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 836f8322960e4b..7ec1d5f8d28339 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c new file mode 100644 index 00000000000000..c2bedaea11f7b9 --- /dev/null +++ b/arch/x86/kernel/ksysfs.c @@ -0,0 +1,340 @@ +/* + * Architecture specific sysfs attributes in /sys/kernel + * + * Copyright (C) 2007, Intel Corp. + * Huang Ying + * Copyright (C) 2013, 2013 Red Hat, Inc. + * Dave Young + * + * This file is released under the GPLv2 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static ssize_t version_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "0x%04x\n", boot_params.hdr.version); +} + +static struct kobj_attribute boot_params_version_attr = __ATTR_RO(version); + +static ssize_t boot_params_data_read(struct file *fp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + memcpy(buf, (void *)&boot_params + off, count); + return count; +} + +static struct bin_attribute boot_params_data_attr = { + .attr = { + .name = "data", + .mode = S_IRUGO, + }, + .read = boot_params_data_read, + .size = sizeof(boot_params), +}; + +static struct attribute *boot_params_version_attrs[] = { + &boot_params_version_attr.attr, + NULL, +}; + +static struct bin_attribute *boot_params_data_attrs[] = { + &boot_params_data_attr, + NULL, +}; + +static struct attribute_group boot_params_attr_group = { + .attrs = boot_params_version_attrs, + .bin_attrs = boot_params_data_attrs, +}; + +static int kobj_to_setup_data_nr(struct kobject *kobj, int *nr) +{ + const char *name; + + name = kobject_name(kobj); + return kstrtoint(name, 10, nr); +} + +static int get_setup_data_paddr(int nr, u64 *paddr) +{ + int i = 0; + struct setup_data *data; + u64 pa_data = boot_params.hdr.setup_data; + + while (pa_data) { + if (nr == i) { + *paddr = pa_data; + return 0; + } + data = ioremap_cache(pa_data, sizeof(*data)); + if (!data) + return -ENOMEM; + + pa_data = data->next; + iounmap(data); + i++; + } + return -EINVAL; +} + +static int __init get_setup_data_size(int nr, size_t *size) +{ + int i = 0; + struct setup_data *data; + u64 pa_data = boot_params.hdr.setup_data; + + while (pa_data) { + data = ioremap_cache(pa_data, sizeof(*data)); + if (!data) + return -ENOMEM; + if (nr == i) { + *size = data->len; + iounmap(data); + return 0; + } + + pa_data = data->next; + iounmap(data); + i++; + } + return -EINVAL; +} + +static ssize_t type_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int nr, ret; + u64 paddr; + struct setup_data *data; + + ret = kobj_to_setup_data_nr(kobj, &nr); + if (ret) + return ret; + + ret = get_setup_data_paddr(nr, &paddr); + if (ret) + return ret; + data = ioremap_cache(paddr, sizeof(*data)); + if (!data) + return -ENOMEM; + + ret = sprintf(buf, "0x%x\n", data->type); + iounmap(data); + return ret; +} + +static ssize_t setup_data_data_read(struct file *fp, + struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, + loff_t off, size_t count) +{ + int nr, ret = 0; + u64 paddr; + struct setup_data *data; + void *p; + + ret = kobj_to_setup_data_nr(kobj, &nr); + if (ret) + return ret; + + ret = get_setup_data_paddr(nr, &paddr); + if (ret) + return ret; + data = ioremap_cache(paddr, sizeof(*data)); + if (!data) + return -ENOMEM; + + if (off > data->len) { + ret = -EINVAL; + goto out; + } + + if (count > data->len - off) + count = data->len - off; + + if (!count) + goto out; + + ret = count; + p = ioremap_cache(paddr + sizeof(*data), data->len); + if (!p) { + ret = -ENOMEM; + goto out; + } + memcpy(buf, p + off, count); + iounmap(p); +out: + iounmap(data); + return ret; +} + +static struct kobj_attribute type_attr = __ATTR_RO(type); + +static struct bin_attribute data_attr = { + .attr = { + .name = "data", + .mode = S_IRUGO, + }, + .read = setup_data_data_read, +}; + +static struct attribute *setup_data_type_attrs[] = { + &type_attr.attr, + NULL, +}; + +static struct bin_attribute *setup_data_data_attrs[] = { + &data_attr, + NULL, +}; + +static struct attribute_group setup_data_attr_group = { + .attrs = setup_data_type_attrs, + .bin_attrs = setup_data_data_attrs, +}; + +static int __init create_setup_data_node(struct kobject *parent, + struct kobject **kobjp, int nr) +{ + int ret = 0; + size_t size; + struct kobject *kobj; + char name[16]; /* should be enough for setup_data nodes numbers */ + snprintf(name, 16, "%d", nr); + + kobj = kobject_create_and_add(name, parent); + if (!kobj) + return -ENOMEM; + + ret = get_setup_data_size(nr, &size); + if (ret) + goto out_kobj; + + data_attr.size = size; + ret = sysfs_create_group(kobj, &setup_data_attr_group); + if (ret) + goto out_kobj; + *kobjp = kobj; + + return 0; +out_kobj: + kobject_put(kobj); + return ret; +} + +static void __init cleanup_setup_data_node(struct kobject *kobj) +{ + sysfs_remove_group(kobj, &setup_data_attr_group); + kobject_put(kobj); +} + +static int __init get_setup_data_total_num(u64 pa_data, int *nr) +{ + int ret = 0; + struct setup_data *data; + + *nr = 0; + while (pa_data) { + *nr += 1; + data = ioremap_cache(pa_data, sizeof(*data)); + if (!data) { + ret = -ENOMEM; + goto out; + } + pa_data = data->next; + iounmap(data); + } + +out: + return ret; +} + +static int __init create_setup_data_nodes(struct kobject *parent) +{ + struct kobject *setup_data_kobj, **kobjp; + u64 pa_data; + int i, j, nr, ret = 0; + + pa_data = boot_params.hdr.setup_data; + if (!pa_data) + return 0; + + setup_data_kobj = kobject_create_and_add("setup_data", parent); + if (!setup_data_kobj) { + ret = -ENOMEM; + goto out; + } + + ret = get_setup_data_total_num(pa_data, &nr); + if (ret) + goto out_setup_data_kobj; + + kobjp = kmalloc(sizeof(*kobjp) * nr, GFP_KERNEL); + if (!kobjp) { + ret = -ENOMEM; + goto out_setup_data_kobj; + } + + for (i = 0; i < nr; i++) { + ret = create_setup_data_node(setup_data_kobj, kobjp + i, i); + if (ret) + goto out_clean_nodes; + } + + kfree(kobjp); + return 0; + +out_clean_nodes: + for (j = i - 1; j > 0; j--) + cleanup_setup_data_node(*(kobjp + j)); + kfree(kobjp); +out_setup_data_kobj: + kobject_put(setup_data_kobj); +out: + return ret; +} + +static int __init boot_params_ksysfs_init(void) +{ + int ret; + struct kobject *boot_params_kobj; + + boot_params_kobj = kobject_create_and_add("boot_params", + kernel_kobj); + if (!boot_params_kobj) { + ret = -ENOMEM; + goto out; + } + + ret = sysfs_create_group(boot_params_kobj, &boot_params_attr_group); + if (ret) + goto out_boot_params_kobj; + + ret = create_setup_data_nodes(boot_params_kobj); + if (ret) + goto out_create_group; + + return 0; +out_create_group: + sysfs_remove_group(boot_params_kobj, &boot_params_attr_group); +out_boot_params_kobj: + kobject_put(boot_params_kobj); +out: + return ret; +} + +arch_initcall(boot_params_ksysfs_init); diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 5b19e4d78b0085..1667b1de8d5d62 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 871be4a84c7d75..da15918d1c81ba 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 6f1236c29c4b57..0de43e98ce0860 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index cb233bc9dee356..06853e6703541f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -295,6 +295,8 @@ static void __init reserve_brk(void) _brk_start = 0; } +u64 relocated_ramdisk; + #ifdef CONFIG_BLK_DEV_INITRD static u64 __init get_ramdisk_image(void) @@ -321,25 +323,24 @@ static void __init relocate_initrd(void) u64 ramdisk_image = get_ramdisk_image(); u64 ramdisk_size = get_ramdisk_size(); u64 area_size = PAGE_ALIGN(ramdisk_size); - u64 ramdisk_here; unsigned long slop, clen, mapaddr; char *p, *q; /* We need to move the initrd down into directly mapped mem */ - ramdisk_here = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), - area_size, PAGE_SIZE); + relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), + area_size, PAGE_SIZE); - if (!ramdisk_here) + if (!relocated_ramdisk) panic("Cannot find place for new RAMDISK of size %lld\n", - ramdisk_size); + ramdisk_size); /* Note: this includes all the mem currently occupied by the initrd, we rely on that fact to keep the data intact. */ - memblock_reserve(ramdisk_here, area_size); - initrd_start = ramdisk_here + PAGE_OFFSET; + memblock_reserve(relocated_ramdisk, area_size); + initrd_start = relocated_ramdisk + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", - ramdisk_here, ramdisk_here + ramdisk_size - 1); + relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); q = (char *)initrd_start; @@ -363,7 +364,7 @@ static void __init relocate_initrd(void) printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" " [mem %#010llx-%#010llx]\n", ramdisk_image, ramdisk_image + ramdisk_size - 1, - ramdisk_here, ramdisk_here + ramdisk_size - 1); + relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); } static void __init early_reserve_initrd(void) @@ -447,6 +448,9 @@ static void __init parse_setup_data(void) case SETUP_DTB: add_dtb(pa_data); break; + case SETUP_EFI: + parse_efi_setup(pa_data, data_len); + break; default: break; } @@ -823,6 +827,20 @@ static void __init trim_low_memory_range(void) memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE)); } +/* + * Dump out kernel offset information on panic. + */ +static int +dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) +{ + pr_emerg("Kernel Offset: 0x%lx from 0x%lx " + "(relocation range: 0x%lx-0x%lx)\n", + (unsigned long)&_text - __START_KERNEL, __START_KERNEL, + __START_KERNEL_map, MODULES_VADDR-1); + + return 0; +} + /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures @@ -924,8 +942,6 @@ void __init setup_arch(char **cmdline_p) iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; setup_memory_map(); parse_setup_data(); - /* update the e820_saved too */ - e820_reserve_setup_data(); copy_edd(); @@ -987,6 +1003,8 @@ void __init setup_arch(char **cmdline_p) early_dump_pci_devices(); #endif + /* update the e820_saved too */ + e820_reserve_setup_data(); finish_e820_parsing(); if (efi_enabled(EFI_BOOT)) @@ -1248,3 +1266,15 @@ void __init i386_reserve_resources(void) } #endif /* CONFIG_X86_32 */ + +static struct notifier_block kernel_offset_notifier = { + .notifier_call = dump_kernel_offset +}; + +static int __init register_kernel_offset_dumper(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &kernel_offset_notifier); + return 0; +} +__initcall(register_kernel_offset_dumper); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 85dc05a3aa02b3..a32da804252e37 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1312,6 +1312,12 @@ void cpu_disable_common(void) int native_cpu_disable(void) { + int ret; + + ret = check_irq_vectors_for_cpu_disable(); + if (ret) + return ret; + clear_local_APIC(); cpu_disable_common(); @@ -1417,7 +1423,9 @@ static inline void mwait_play_dead(void) * The WBINVD is insufficient due to the spurious-wakeup * case where we return around the loop. */ + mb(); clflush(mwait_ptr); + mb(); __monitor(mwait_ptr, 0, 0); mb(); __mwait(eax, 0); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b857ed890b4c62..57409f6b8c623e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -211,21 +211,17 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ exception_exit(prev_state); \ } -DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, - regs->ip) -DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) -DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) -DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, - regs->ip) -DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", - coprocessor_segment_overrun) -DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) -DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) +DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip ) +DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow ) +DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds ) +DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip ) +DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun ) +DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS ) +DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present ) #ifdef CONFIG_X86_32 -DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) +DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment ) #endif -DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, - BUS_ADRALN, 0) +DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 ) #ifdef CONFIG_X86_64 /* Runs on IST stack */ diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 930e5d48f560d0..a3acbac2ee7254 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -37,13 +38,244 @@ static int __read_mostly tsc_unstable; erroneous rdtsc usage on !cpu_has_tsc processors */ static int __read_mostly tsc_disabled = -1; +static struct static_key __use_tsc = STATIC_KEY_INIT; + int tsc_clocksource_reliable; + +/* + * Use a ring-buffer like data structure, where a writer advances the head by + * writing a new data entry and a reader advances the tail when it observes a + * new entry. + * + * Writers are made to wait on readers until there's space to write a new + * entry. + * + * This means that we can always use an {offset, mul} pair to compute a ns + * value that is 'roughly' in the right direction, even if we're writing a new + * {offset, mul} pair during the clock read. + * + * The down-side is that we can no longer guarantee strict monotonicity anymore + * (assuming the TSC was that to begin with), because while we compute the + * intersection point of the two clock slopes and make sure the time is + * continuous at the point of switching; we can no longer guarantee a reader is + * strictly before or after the switch point. + * + * It does mean a reader no longer needs to disable IRQs in order to avoid + * CPU-Freq updates messing with his times, and similarly an NMI reader will + * no longer run the risk of hitting half-written state. + */ + +struct cyc2ns { + struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */ + struct cyc2ns_data *head; /* 48 + 8 = 56 */ + struct cyc2ns_data *tail; /* 56 + 8 = 64 */ +}; /* exactly fits one cacheline */ + +static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); + +struct cyc2ns_data *cyc2ns_read_begin(void) +{ + struct cyc2ns_data *head; + + preempt_disable(); + + head = this_cpu_read(cyc2ns.head); + /* + * Ensure we observe the entry when we observe the pointer to it. + * matches the wmb from cyc2ns_write_end(). + */ + smp_read_barrier_depends(); + head->__count++; + barrier(); + + return head; +} + +void cyc2ns_read_end(struct cyc2ns_data *head) +{ + barrier(); + /* + * If we're the outer most nested read; update the tail pointer + * when we're done. This notifies possible pending writers + * that we've observed the head pointer and that the other + * entry is now free. + */ + if (!--head->__count) { + /* + * x86-TSO does not reorder writes with older reads; + * therefore once this write becomes visible to another + * cpu, we must be finished reading the cyc2ns_data. + * + * matches with cyc2ns_write_begin(). + */ + this_cpu_write(cyc2ns.tail, head); + } + preempt_enable(); +} + +/* + * Begin writing a new @data entry for @cpu. + * + * Assumes some sort of write side lock; currently 'provided' by the assumption + * that cpufreq will call its notifiers sequentially. + */ +static struct cyc2ns_data *cyc2ns_write_begin(int cpu) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + struct cyc2ns_data *data = c2n->data; + + if (data == c2n->head) + data++; + + /* XXX send an IPI to @cpu in order to guarantee a read? */ + + /* + * When we observe the tail write from cyc2ns_read_end(), + * the cpu must be done with that entry and its safe + * to start writing to it. + */ + while (c2n->tail == data) + cpu_relax(); + + return data; +} + +static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + + /* + * Ensure the @data writes are visible before we publish the + * entry. Matches the data-depencency in cyc2ns_read_begin(). + */ + smp_wmb(); + + ACCESS_ONCE(c2n->head) = data; +} + +/* + * Accelerators for sched_clock() + * convert from cycles(64bits) => nanoseconds (64bits) + * basic equation: + * ns = cycles / (freq / ns_per_sec) + * ns = cycles * (ns_per_sec / freq) + * ns = cycles * (10^9 / (cpu_khz * 10^3)) + * ns = cycles * (10^6 / cpu_khz) + * + * Then we use scaling math (suggested by george@mvista.com) to get: + * ns = cycles * (10^6 * SC / cpu_khz) / SC + * ns = cycles * cyc2ns_scale / SC + * + * And since SC is a constant power of two, we can convert the div + * into a shift. + * + * We can use khz divisor instead of mhz to keep a better precision, since + * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. + * (mathieu.desnoyers@polymtl.ca) + * + * -johnstul@us.ibm.com "math is hard, lets go shopping!" + */ + +#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ + +static void cyc2ns_data_init(struct cyc2ns_data *data) +{ + data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR; + data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; + data->cyc2ns_offset = 0; + data->__count = 0; +} + +static void cyc2ns_init(int cpu) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + + cyc2ns_data_init(&c2n->data[0]); + cyc2ns_data_init(&c2n->data[1]); + + c2n->head = c2n->data; + c2n->tail = c2n->data; +} + +static inline unsigned long long cycles_2_ns(unsigned long long cyc) +{ + struct cyc2ns_data *data, *tail; + unsigned long long ns; + + /* + * See cyc2ns_read_*() for details; replicated in order to avoid + * an extra few instructions that came with the abstraction. + * Notable, it allows us to only do the __count and tail update + * dance when its actually needed. + */ + + preempt_disable(); + data = this_cpu_read(cyc2ns.head); + tail = this_cpu_read(cyc2ns.tail); + + if (likely(data == tail)) { + ns = data->cyc2ns_offset; + ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + } else { + data->__count++; + + barrier(); + + ns = data->cyc2ns_offset; + ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + + barrier(); + + if (!--data->__count) + this_cpu_write(cyc2ns.tail, data); + } + preempt_enable(); + + return ns; +} + +/* XXX surely we already have this someplace in the kernel?! */ +#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d)) + +static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) +{ + unsigned long long tsc_now, ns_now; + struct cyc2ns_data *data; + unsigned long flags; + + local_irq_save(flags); + sched_clock_idle_sleep_event(); + + if (!cpu_khz) + goto done; + + data = cyc2ns_write_begin(cpu); + + rdtscll(tsc_now); + ns_now = cycles_2_ns(tsc_now); + + /* + * Compute a new multiplier as per the above comment and ensure our + * time function is continuous; see the comment near struct + * cyc2ns_data. + */ + data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz); + data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; + data->cyc2ns_offset = ns_now - + mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + + cyc2ns_write_end(cpu, data); + +done: + sched_clock_idle_wakeup_event(0); + local_irq_restore(flags); +} /* * Scheduler clock - returns current time in nanosec units. */ u64 native_sched_clock(void) { - u64 this_offset; + u64 tsc_now; /* * Fall back to jiffies if there's no TSC available: @@ -53,16 +285,16 @@ u64 native_sched_clock(void) * very important for it to be as fast as the platform * can achieve it. ) */ - if (unlikely(tsc_disabled)) { + if (!static_key_false(&__use_tsc)) { /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); } /* read the Time Stamp Counter: */ - rdtscll(this_offset); + rdtscll(tsc_now); /* return the value in ns */ - return __cycles_2_ns(this_offset); + return cycles_2_ns(tsc_now); } /* We need to define a real function for sched_clock, to override the @@ -419,6 +651,16 @@ unsigned long native_calibrate_tsc(void) unsigned long flags, latch, ms, fast_calibrate; int hpet = is_hpet_enabled(), i, loopmin; + /* Calibrate TSC using MSR for Intel Atom SoCs */ + local_irq_save(flags); + i = try_msr_calibrate_tsc(&fast_calibrate); + local_irq_restore(flags); + if (i >= 0) { + if (i == 0) + pr_warn("Fast TSC calibration using MSR failed\n"); + return fast_calibrate; + } + local_irq_save(flags); fast_calibrate = quick_pit_calibrate(); local_irq_restore(flags); @@ -589,61 +831,11 @@ int recalibrate_cpu_khz(void) EXPORT_SYMBOL(recalibrate_cpu_khz); -/* Accelerators for sched_clock() - * convert from cycles(64bits) => nanoseconds (64bits) - * basic equation: - * ns = cycles / (freq / ns_per_sec) - * ns = cycles * (ns_per_sec / freq) - * ns = cycles * (10^9 / (cpu_khz * 10^3)) - * ns = cycles * (10^6 / cpu_khz) - * - * Then we use scaling math (suggested by george@mvista.com) to get: - * ns = cycles * (10^6 * SC / cpu_khz) / SC - * ns = cycles * cyc2ns_scale / SC - * - * And since SC is a constant power of two, we can convert the div - * into a shift. - * - * We can use khz divisor instead of mhz to keep a better precision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. - * (mathieu.desnoyers@polymtl.ca) - * - * -johnstul@us.ibm.com "math is hard, lets go shopping!" - */ - -DEFINE_PER_CPU(unsigned long, cyc2ns); -DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); - -static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) -{ - unsigned long long tsc_now, ns_now, *offset; - unsigned long flags, *scale; - - local_irq_save(flags); - sched_clock_idle_sleep_event(); - - scale = &per_cpu(cyc2ns, cpu); - offset = &per_cpu(cyc2ns_offset, cpu); - - rdtscll(tsc_now); - ns_now = __cycles_2_ns(tsc_now); - - if (cpu_khz) { - *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) + - cpu_khz / 2) / cpu_khz; - *offset = ns_now - mult_frac(tsc_now, *scale, - (1UL << CYC2NS_SCALE_FACTOR)); - } - - sched_clock_idle_wakeup_event(0); - local_irq_restore(flags); -} - static unsigned long long cyc2ns_suspend; void tsc_save_sched_clock_state(void) { - if (!sched_clock_stable) + if (!sched_clock_stable()) return; cyc2ns_suspend = sched_clock(); @@ -663,16 +855,26 @@ void tsc_restore_sched_clock_state(void) unsigned long flags; int cpu; - if (!sched_clock_stable) + if (!sched_clock_stable()) return; local_irq_save(flags); - __this_cpu_write(cyc2ns_offset, 0); + /* + * We're comming out of suspend, there's no concurrency yet; don't + * bother being nice about the RCU stuff, just write to both + * data fields. + */ + + this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); + this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); + offset = cyc2ns_suspend - sched_clock(); - for_each_possible_cpu(cpu) - per_cpu(cyc2ns_offset, cpu) = offset; + for_each_possible_cpu(cpu) { + per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; + per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; + } local_irq_restore(flags); } @@ -795,7 +997,7 @@ void mark_tsc_unstable(char *reason) { if (!tsc_unstable) { tsc_unstable = 1; - sched_clock_stable = 0; + clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to %s\n", reason); /* Change only the rating, when not registered */ @@ -995,14 +1197,18 @@ void __init tsc_init(void) * speed as the bootup CPU. (cpufreq notifiers will fix this * up if their speed diverges) */ - for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) { + cyc2ns_init(cpu); set_cyc2ns_scale(cpu_khz, cpu); + } if (tsc_disabled > 0) return; /* now allow native_sched_clock() to use rdtsc */ + tsc_disabled = 0; + static_key_slow_inc(&__use_tsc); if (!no_sched_irq_time) enable_sched_clock_irqtime(); diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c new file mode 100644 index 00000000000000..8b5434f4389fc4 --- /dev/null +++ b/arch/x86/kernel/tsc_msr.c @@ -0,0 +1,127 @@ +/* + * tsc_msr.c - MSR based TSC calibration on Intel Atom SoC platforms. + * + * TSC in Intel Atom SoC runs at a constant rate which can be figured + * by this formula: + * * + * See Intel 64 and IA-32 System Programming Guid section 16.12 and 30.11.5 + * for details. + * Especially some Intel Atom SoCs don't have PIT(i8254) or HPET, so MSR + * based calibration is the only option. + * + * + * Copyright (C) 2013 Intel Corporation + * Author: Bin Gao + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include +#include + +/* CPU reference clock frequency: in KHz */ +#define FREQ_83 83200 +#define FREQ_100 99840 +#define FREQ_133 133200 +#define FREQ_166 166400 + +#define MAX_NUM_FREQS 8 + +/* + * According to Intel 64 and IA-32 System Programming Guide, + * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be + * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. + * Unfortunately some Intel Atom SoCs aren't quite compliant to this, + * so we need manually differentiate SoC families. This is what the + * field msr_plat does. + */ +struct freq_desc { + u8 x86_family; /* CPU family */ + u8 x86_model; /* model */ + u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */ + u32 freqs[MAX_NUM_FREQS]; +}; + +static struct freq_desc freq_desc_tables[] = { + /* PNW */ + { 6, 0x27, 0, { 0, 0, 0, 0, 0, FREQ_100, 0, FREQ_83 } }, + /* CLV+ */ + { 6, 0x35, 0, { 0, FREQ_133, 0, 0, 0, FREQ_100, 0, FREQ_83 } }, + /* TNG */ + { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, + /* VLV2 */ + { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, + /* ANN */ + { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, +}; + +static int match_cpu(u8 family, u8 model) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) { + if ((family == freq_desc_tables[i].x86_family) && + (model == freq_desc_tables[i].x86_model)) + return i; + } + + return -1; +} + +/* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */ +#define id_to_freq(cpu_index, freq_id) \ + (freq_desc_tables[cpu_index].freqs[freq_id]) + +/* + * Do MSR calibration only for known/supported CPUs. + * Return values: + * -1: CPU is unknown/unsupported for MSR based calibration + * 0: CPU is known/supported, but calibration failed + * 1: CPU is known/supported, and calibration succeeded + */ +int try_msr_calibrate_tsc(unsigned long *fast_calibrate) +{ + int cpu_index; + u32 lo, hi, ratio, freq_id, freq; + + cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); + if (cpu_index < 0) + return -1; + + *fast_calibrate = 0; + + if (freq_desc_tables[cpu_index].msr_plat) { + rdmsr(MSR_PLATFORM_INFO, lo, hi); + ratio = (lo >> 8) & 0x1f; + } else { + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + ratio = (hi >> 8) & 0x1f; + } + pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); + + if (!ratio) + return 0; + + /* Get FSB FREQ ID */ + rdmsr(MSR_FSB_FREQ, lo, hi); + freq_id = lo & 0x7; + freq = id_to_freq(cpu_index, freq_id); + pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", + freq_id, freq); + if (!freq) + return 0; + + /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ + *fast_calibrate = freq * ratio; + pr_info("TSC runs at %lu KHz\n", *fast_calibrate); + +#ifdef CONFIG_X86_LOCAL_APIC + lapic_timer_frequency = (freq * 1000) / HZ; + pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); +#endif + + return 1; +} diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index adfdf56a3714ec..26488487bc61e8 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -16,7 +16,6 @@ */ #include #include -#include #include #include #include diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 422fd82234700f..a4b451c6addfb7 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -562,6 +562,16 @@ static void __init xstate_enable_boot_cpu(void) if (cpu_has_xsaveopt && eagerfpu != DISABLE) eagerfpu = ENABLE; + if (pcntxt_mask & XSTATE_EAGER) { + if (eagerfpu == DISABLE) { + pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n", + pcntxt_mask & XSTATE_EAGER); + pcntxt_mask &= ~XSTATE_EAGER; + } else { + eagerfpu = ENABLE; + } + } + pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", pcntxt_mask, xstate_size); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index dec48bfaddb8ff..775702f649ca68 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1350,8 +1350,12 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) return; } + if (!kvm_vcpu_is_bsp(apic->vcpu)) + value &= ~MSR_IA32_APICBASE_BSP; + vcpu->arch.apic_base = value; + /* update jump label if enable bit changes */ - if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { + if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { if (value & MSR_IA32_APICBASE_ENABLE) static_key_slow_dec_deferred(&apic_hw_disabled); else @@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) recalculate_apic_map(vcpu->kvm); } - if (!kvm_vcpu_is_bsp(apic->vcpu)) - value &= ~MSR_IA32_APICBASE_BSP; - - vcpu->arch.apic_base = value; if ((old_value ^ value) & X2APIC_ENABLE) { if (value & X2APIC_ENABLE) { u32 id = kvm_apic_id(apic); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b2fe1c252f35f9..da7837e1349da8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); kvm_set_cr4(vcpu, vmcs12->host_cr4); - if (nested_cpu_has_ept(vmcs12)) - nested_ept_uninit_mmu_context(vcpu); + nested_ept_uninit_mmu_context(vcpu); kvm_set_cr3(vcpu, vmcs12->host_cr3); kvm_mmu_reset_context(vcpu); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index a30ca15be21c8a..dee945d555941a 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -186,7 +186,7 @@ ENTRY(copy_user_generic_unrolled) 30: shll $6,%ecx addl %ecx,%edx jmp 60f -40: lea (%rdx,%rcx,8),%rdx +40: leal (%rdx,%rcx,8),%edx jmp 60f 50: movl %ecx,%edx 60: jmp copy_user_handle_tail /* ecx is zerorest also */ @@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled) ENTRY(copy_user_generic_string) CFI_STARTPROC ASM_STAC - andl %edx,%edx - jz 4f cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ ALIGN_DESTINATION @@ -249,12 +247,12 @@ ENTRY(copy_user_generic_string) 2: movl %edx,%ecx 3: rep movsb -4: xorl %eax,%eax + xorl %eax,%eax ASM_CLAC ret .section .fixup,"ax" -11: lea (%rdx,%rcx,8),%rcx +11: leal (%rdx,%rcx,8),%ecx 12: movl %ecx,%edx /* ecx is zerorest also */ jmp copy_user_handle_tail .previous @@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string) ENTRY(copy_user_enhanced_fast_string) CFI_STARTPROC ASM_STAC - andl %edx,%edx - jz 2f movl %edx,%ecx 1: rep movsb -2: xorl %eax,%eax + xorl %eax,%eax ASM_CLAC ret diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 7c3bee636e2fc3..39d6a3db0b964a 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 533a85e3a07e7d..1a2be7c6895d81 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -346,8 +346,8 @@ AVXcode: 1 17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) 18: Grp16 (1A) 19: -1a: -1b: +1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv +1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv 1c: 1d: 1e: diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9ff85bb8dd6981..9d591c89580310 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code, /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) { + /* + * Any interrupt that takes a fault gets the fixup. This makes + * the below recursive fault logic only apply to a faults from + * task context. + */ + if (in_interrupt()) + return; + + /* + * Per the above we're !in_interrupt(), aka. task context. + * + * In this case we need to make sure we're not recursively + * faulting through the emulate_vsyscall() logic. + */ if (current_thread_info()->sig_on_uaccess_error && signal) { tsk->thread.trap_nr = X86_TRAP_PF; tsk->thread.error_code = error_code | PF_USER; @@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code, /* XXX: hwpoison faults will set the wrong code. */ force_sig_info_fault(signal, si_code, address, tsk, 0); } + + /* + * Barring that, we can do the fixup and be happy. + */ return; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 9d980d88b7477a..8c9f647ff9e111 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -87,9 +87,7 @@ int pmd_huge_support(void) } #endif -/* x86_64 also uses this file */ - -#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#ifdef CONFIG_HUGETLB_PAGE static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -99,7 +97,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, info.flags = 0; info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; + info.low_limit = current->mm->mmap_legacy_base; info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; @@ -172,8 +170,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return hugetlb_get_unmapped_area_topdown(file, addr, len, pgoff, flags); } - -#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ +#endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_X86_64 static __init int setup_hugepagesz(char *opt) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 4287f1ffba7ef2..5bdc5430597cfe 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -806,6 +806,9 @@ void __init mem_init(void) BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); #undef high_memory #undef __FIXADDR_TOP +#ifdef CONFIG_RANDOMIZE_BASE + BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE); +#endif #ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index e5d5e2ce9f7734..637ab34ed63284 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 24aec58d6afdf4..c85da7bb6b603c 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -211,9 +211,13 @@ static void __init setup_node_data(int nid, u64 start, u64 end) */ nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); if (!nd_pa) { - pr_err("Cannot find %zu bytes in node %d\n", - nd_size, nid); - return; + nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, + MEMBLOCK_ALLOC_ACCESSIBLE); + if (!nd_pa) { + pr_err("Cannot find %zu bytes in node %d\n", + nd_size, nid); + return; + } } nd = __va(nd_pa); diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index d0b1773d9d2efb..461bc828902400 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bb32480c2d713d..b3b19f46c0164c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -30,6 +30,7 @@ */ struct cpa_data { unsigned long *vaddr; + pgd_t *pgd; pgprot_t mask_set; pgprot_t mask_clr; int numpages; @@ -322,17 +323,9 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, return prot; } -/* - * Lookup the page table entry for a virtual address. Return a pointer - * to the entry and the level of the mapping. - * - * Note: We return pud and pmd either when the entry is marked large - * or when the present bit is not set. Otherwise we would return a - * pointer to a nonexisting mapping. - */ -pte_t *lookup_address(unsigned long address, unsigned int *level) +static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address, + unsigned int *level) { - pgd_t *pgd = pgd_offset_k(address); pud_t *pud; pmd_t *pmd; @@ -361,8 +354,31 @@ pte_t *lookup_address(unsigned long address, unsigned int *level) return pte_offset_kernel(pmd, address); } + +/* + * Lookup the page table entry for a virtual address. Return a pointer + * to the entry and the level of the mapping. + * + * Note: We return pud and pmd either when the entry is marked large + * or when the present bit is not set. Otherwise we would return a + * pointer to a nonexisting mapping. + */ +pte_t *lookup_address(unsigned long address, unsigned int *level) +{ + return __lookup_address_in_pgd(pgd_offset_k(address), address, level); +} EXPORT_SYMBOL_GPL(lookup_address); +static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, + unsigned int *level) +{ + if (cpa->pgd) + return __lookup_address_in_pgd(cpa->pgd + pgd_index(address), + address, level); + + return lookup_address(address, level); +} + /* * This is necessary because __pa() does not work on some * kinds of memory, like vmalloc() or the alloc_remap() @@ -437,7 +453,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * Check for races, another CPU might have split this page * up already: */ - tmp = lookup_address(address, &level); + tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) goto out_unlock; @@ -543,7 +559,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, } static int -__split_large_page(pte_t *kpte, unsigned long address, struct page *base) +__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, + struct page *base) { pte_t *pbase = (pte_t *)page_address(base); unsigned long pfn, pfninc = 1; @@ -556,7 +573,7 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base) * Check for races, another CPU might have split this page * up for us already: */ - tmp = lookup_address(address, &level); + tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) { spin_unlock(&pgd_lock); return 1; @@ -632,7 +649,8 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base) return 0; } -static int split_large_page(pte_t *kpte, unsigned long address) +static int split_large_page(struct cpa_data *cpa, pte_t *kpte, + unsigned long address) { struct page *base; @@ -644,15 +662,390 @@ static int split_large_page(pte_t *kpte, unsigned long address) if (!base) return -ENOMEM; - if (__split_large_page(kpte, address, base)) + if (__split_large_page(cpa, kpte, address, base)) __free_page(base); return 0; } +static bool try_to_free_pte_page(pte_t *pte) +{ + int i; + + for (i = 0; i < PTRS_PER_PTE; i++) + if (!pte_none(pte[i])) + return false; + + free_page((unsigned long)pte); + return true; +} + +static bool try_to_free_pmd_page(pmd_t *pmd) +{ + int i; + + for (i = 0; i < PTRS_PER_PMD; i++) + if (!pmd_none(pmd[i])) + return false; + + free_page((unsigned long)pmd); + return true; +} + +static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) +{ + pte_t *pte = pte_offset_kernel(pmd, start); + + while (start < end) { + set_pte(pte, __pte(0)); + + start += PAGE_SIZE; + pte++; + } + + if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { + pmd_clear(pmd); + return true; + } + return false; +} + +static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, + unsigned long start, unsigned long end) +{ + if (unmap_pte_range(pmd, start, end)) + if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) + pud_clear(pud); +} + +static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) +{ + pmd_t *pmd = pmd_offset(pud, start); + + /* + * Not on a 2MB page boundary? + */ + if (start & (PMD_SIZE - 1)) { + unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; + unsigned long pre_end = min_t(unsigned long, end, next_page); + + __unmap_pmd_range(pud, pmd, start, pre_end); + + start = pre_end; + pmd++; + } + + /* + * Try to unmap in 2M chunks. + */ + while (end - start >= PMD_SIZE) { + if (pmd_large(*pmd)) + pmd_clear(pmd); + else + __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); + + start += PMD_SIZE; + pmd++; + } + + /* + * 4K leftovers? + */ + if (start < end) + return __unmap_pmd_range(pud, pmd, start, end); + + /* + * Try again to free the PMD page if haven't succeeded above. + */ + if (!pud_none(*pud)) + if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) + pud_clear(pud); +} + +static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) +{ + pud_t *pud = pud_offset(pgd, start); + + /* + * Not on a GB page boundary? + */ + if (start & (PUD_SIZE - 1)) { + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; + unsigned long pre_end = min_t(unsigned long, end, next_page); + + unmap_pmd_range(pud, start, pre_end); + + start = pre_end; + pud++; + } + + /* + * Try to unmap in 1G chunks? + */ + while (end - start >= PUD_SIZE) { + + if (pud_large(*pud)) + pud_clear(pud); + else + unmap_pmd_range(pud, start, start + PUD_SIZE); + + start += PUD_SIZE; + pud++; + } + + /* + * 2M leftovers? + */ + if (start < end) + unmap_pmd_range(pud, start, end); + + /* + * No need to try to free the PUD page because we'll free it in + * populate_pgd's error path + */ +} + +static int alloc_pte_page(pmd_t *pmd) +{ + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pte) + return -1; + + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); + return 0; +} + +static int alloc_pmd_page(pud_t *pud) +{ + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pmd) + return -1; + + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); + return 0; +} + +static void populate_pte(struct cpa_data *cpa, + unsigned long start, unsigned long end, + unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, start); + + while (num_pages-- && start < end) { + + /* deal with the NX bit */ + if (!(pgprot_val(pgprot) & _PAGE_NX)) + cpa->pfn &= ~_PAGE_NX; + + set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); + + start += PAGE_SIZE; + cpa->pfn += PAGE_SIZE; + pte++; + } +} + +static int populate_pmd(struct cpa_data *cpa, + unsigned long start, unsigned long end, + unsigned num_pages, pud_t *pud, pgprot_t pgprot) +{ + unsigned int cur_pages = 0; + pmd_t *pmd; + + /* + * Not on a 2M boundary? + */ + if (start & (PMD_SIZE - 1)) { + unsigned long pre_end = start + (num_pages << PAGE_SHIFT); + unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; + + pre_end = min_t(unsigned long, pre_end, next_page); + cur_pages = (pre_end - start) >> PAGE_SHIFT; + cur_pages = min_t(unsigned int, num_pages, cur_pages); + + /* + * Need a PTE page? + */ + pmd = pmd_offset(pud, start); + if (pmd_none(*pmd)) + if (alloc_pte_page(pmd)) + return -1; + + populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); + + start = pre_end; + } + + /* + * We mapped them all? + */ + if (num_pages == cur_pages) + return cur_pages; + + while (end - start >= PMD_SIZE) { + + /* + * We cannot use a 1G page so allocate a PMD page if needed. + */ + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + pmd = pmd_offset(pud, start); + + set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); + + start += PMD_SIZE; + cpa->pfn += PMD_SIZE; + cur_pages += PMD_SIZE >> PAGE_SHIFT; + } + + /* + * Map trailing 4K pages. + */ + if (start < end) { + pmd = pmd_offset(pud, start); + if (pmd_none(*pmd)) + if (alloc_pte_page(pmd)) + return -1; + + populate_pte(cpa, start, end, num_pages - cur_pages, + pmd, pgprot); + } + return num_pages; +} + +static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, + pgprot_t pgprot) +{ + pud_t *pud; + unsigned long end; + int cur_pages = 0; + + end = start + (cpa->numpages << PAGE_SHIFT); + + /* + * Not on a Gb page boundary? => map everything up to it with + * smaller pages. + */ + if (start & (PUD_SIZE - 1)) { + unsigned long pre_end; + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; + + pre_end = min_t(unsigned long, end, next_page); + cur_pages = (pre_end - start) >> PAGE_SHIFT; + cur_pages = min_t(int, (int)cpa->numpages, cur_pages); + + pud = pud_offset(pgd, start); + + /* + * Need a PMD page? + */ + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, + pud, pgprot); + if (cur_pages < 0) + return cur_pages; + + start = pre_end; + } + + /* We mapped them all? */ + if (cpa->numpages == cur_pages) + return cur_pages; + + pud = pud_offset(pgd, start); + + /* + * Map everything starting from the Gb boundary, possibly with 1G pages + */ + while (end - start >= PUD_SIZE) { + set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); + + start += PUD_SIZE; + cpa->pfn += PUD_SIZE; + cur_pages += PUD_SIZE >> PAGE_SHIFT; + pud++; + } + + /* Map trailing leftover */ + if (start < end) { + int tmp; + + pud = pud_offset(pgd, start); + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, + pud, pgprot); + if (tmp < 0) + return cur_pages; + + cur_pages += tmp; + } + return cur_pages; +} + +/* + * Restrictions for kernel page table do not necessarily apply when mapping in + * an alternate PGD. + */ +static int populate_pgd(struct cpa_data *cpa, unsigned long addr) +{ + pgprot_t pgprot = __pgprot(_KERNPG_TABLE); + bool allocd_pgd = false; + pgd_t *pgd_entry; + pud_t *pud = NULL; /* shut up gcc */ + int ret; + + pgd_entry = cpa->pgd + pgd_index(addr); + + /* + * Allocate a PUD page and hand it down for mapping. + */ + if (pgd_none(*pgd_entry)) { + pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pud) + return -1; + + set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); + allocd_pgd = true; + } + + pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); + pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); + + ret = populate_pud(cpa, addr, pgd_entry, pgprot); + if (ret < 0) { + unmap_pud_range(pgd_entry, addr, + addr + (cpa->numpages << PAGE_SHIFT)); + + if (allocd_pgd) { + /* + * If I allocated this PUD page, I can just as well + * free it in this error path. + */ + pgd_clear(pgd_entry); + free_page((unsigned long)pud); + } + return ret; + } + cpa->numpages = ret; + return 0; +} + static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, int primary) { + if (cpa->pgd) + return populate_pgd(cpa, vaddr); + /* * Ignore all non primary paths. */ @@ -697,7 +1090,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) else address = *cpa->vaddr; repeat: - kpte = lookup_address(address, &level); + kpte = _lookup_address_cpa(cpa, address, &level); if (!kpte) return __cpa_process_fault(cpa, address, primary); @@ -761,7 +1154,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) /* * We have to split the large page: */ - err = split_large_page(kpte, address); + err = split_large_page(cpa, kpte, address); if (!err) { /* * Do a global flush tlb after splitting the large page @@ -910,6 +1303,8 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, int ret, cache, checkalias; unsigned long baddr = 0; + memset(&cpa, 0, sizeof(cpa)); + /* * Check, if we are requested to change a not supported * feature: @@ -1356,6 +1751,7 @@ static int __set_pages_p(struct page *page, int numpages) { unsigned long tempaddr = (unsigned long) page_address(page); struct cpa_data cpa = { .vaddr = &tempaddr, + .pgd = NULL, .numpages = numpages, .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), .mask_clr = __pgprot(0), @@ -1374,6 +1770,7 @@ static int __set_pages_np(struct page *page, int numpages) { unsigned long tempaddr = (unsigned long) page_address(page); struct cpa_data cpa = { .vaddr = &tempaddr, + .pgd = NULL, .numpages = numpages, .mask_set = __pgprot(0), .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), @@ -1434,6 +1831,36 @@ bool kernel_page_present(struct page *page) #endif /* CONFIG_DEBUG_PAGEALLOC */ +int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, + unsigned numpages, unsigned long page_flags) +{ + int retval = -EINVAL; + + struct cpa_data cpa = { + .vaddr = &address, + .pfn = pfn, + .pgd = pgd, + .numpages = numpages, + .mask_set = __pgprot(0), + .mask_clr = __pgprot(0), + .flags = 0, + }; + + if (!(__supported_pte_mask & _PAGE_NX)) + goto out; + + if (!(page_flags & _PAGE_NX)) + cpa.mask_clr = __pgprot(_PAGE_NX); + + cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); + + retval = __change_page_attr_set_clr(&cpa, 0); + __flush_tlb_all(); + +out: + return retval; +} + /* * The testcases use internal knowledge of the implementation that shouldn't * be exposed to the rest of the kernel. Include these directly here. diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 26328e800869fc..4ed75dd81d052f 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp) EMIT2(0x89, 0xd0); /* mov %edx,%eax */ break; case BPF_S_ALU_MOD_K: /* A %= K; */ + if (K == 1) { + CLEAR_A(); + break; + } EMIT2(0x31, 0xd2); /* xor %edx,%edx */ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ EMIT2(0xf7, 0xf1); /* div %ecx */ EMIT2(0x89, 0xd0); /* mov %edx,%eax */ break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ - EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ - EMIT(K, 4); - EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; + EMIT2(0x31, 0xd2); /* xor %edx,%edx */ + EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ + EMIT2(0xf7, 0xf1); /* div %ecx */ break; case BPF_S_ALU_AND_X: seen |= SEEN_XREG; diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index b046e070e08868..bca9e85daaa55d 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 51384ca727ad1a..84b9d672843db2 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -31,6 +31,7 @@ #include #include #include +#include #define PCIE_CAP_OFFSET 0x100 @@ -219,7 +220,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) irq_attr.ioapic = mp_find_ioapic(dev->irq); irq_attr.ioapic_pin = dev->irq; irq_attr.trigger = 1; /* level */ - irq_attr.polarity = 1; /* active low */ + if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) + irq_attr.polarity = 0; /* active high */ + else + irq_attr.polarity = 1; /* active low */ io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr); return 0; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index cceb813044efc5..d62ec87a2b26d5 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -12,6 +12,8 @@ * Bibo Mao * Chandramouli Narayanan * Huang Ying + * Copyright (C) 2013 SuSE Labs + * Borislav Petkov - runtime services VA mapping * * Copied from efi_32.c to eliminate the duplicated code between EFI * 32/64 support code. --ying 2007-10-26 @@ -51,7 +53,7 @@ #include #include -#define EFI_DEBUG 1 +#define EFI_DEBUG #define EFI_MIN_RESERVE 5120 @@ -74,6 +76,8 @@ static __initdata efi_config_table_type_t arch_tables[] = { {NULL_GUID, NULL, NULL}, }; +u64 efi_setup; /* efi setup_data physical address */ + /* * Returns 1 if 'facility' is enabled, 0 otherwise. */ @@ -110,7 +114,6 @@ static int __init setup_storage_paranoia(char *arg) } early_param("efi_no_storage_paranoia", setup_storage_paranoia); - static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { unsigned long flags; @@ -398,9 +401,9 @@ int __init efi_memblock_x86_reserve_range(void) return 0; } -#if EFI_DEBUG static void __init print_efi_memmap(void) { +#ifdef EFI_DEBUG efi_memory_desc_t *md; void *p; int i; @@ -415,8 +418,8 @@ static void __init print_efi_memmap(void) md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), (md->num_pages >> (20 - EFI_PAGE_SHIFT))); } -} #endif /* EFI_DEBUG */ +} void __init efi_reserve_boot_services(void) { @@ -436,7 +439,7 @@ void __init efi_reserve_boot_services(void) * - Not within any part of the kernel * - Not the bios reserved area */ - if ((start+size >= __pa_symbol(_text) + if ((start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) || !e820_all_mapped(start, start+size, E820_RAM) || memblock_is_region_reserved(start, size)) { @@ -489,18 +492,27 @@ static int __init efi_systab_init(void *phys) { if (efi_enabled(EFI_64BIT)) { efi_system_table_64_t *systab64; + struct efi_setup_data *data = NULL; u64 tmp = 0; + if (efi_setup) { + data = early_memremap(efi_setup, sizeof(*data)); + if (!data) + return -ENOMEM; + } systab64 = early_ioremap((unsigned long)phys, sizeof(*systab64)); if (systab64 == NULL) { pr_err("Couldn't map the system table!\n"); + if (data) + early_iounmap(data, sizeof(*data)); return -ENOMEM; } efi_systab.hdr = systab64->hdr; - efi_systab.fw_vendor = systab64->fw_vendor; - tmp |= systab64->fw_vendor; + efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor : + systab64->fw_vendor; + tmp |= data ? data->fw_vendor : systab64->fw_vendor; efi_systab.fw_revision = systab64->fw_revision; efi_systab.con_in_handle = systab64->con_in_handle; tmp |= systab64->con_in_handle; @@ -514,15 +526,20 @@ static int __init efi_systab_init(void *phys) tmp |= systab64->stderr_handle; efi_systab.stderr = systab64->stderr; tmp |= systab64->stderr; - efi_systab.runtime = (void *)(unsigned long)systab64->runtime; - tmp |= systab64->runtime; + efi_systab.runtime = data ? + (void *)(unsigned long)data->runtime : + (void *)(unsigned long)systab64->runtime; + tmp |= data ? data->runtime : systab64->runtime; efi_systab.boottime = (void *)(unsigned long)systab64->boottime; tmp |= systab64->boottime; efi_systab.nr_tables = systab64->nr_tables; - efi_systab.tables = systab64->tables; - tmp |= systab64->tables; + efi_systab.tables = data ? (unsigned long)data->tables : + systab64->tables; + tmp |= data ? data->tables : systab64->tables; early_iounmap(systab64, sizeof(*systab64)); + if (data) + early_iounmap(data, sizeof(*data)); #ifdef CONFIG_X86_32 if (tmp >> 32) { pr_err("EFI data located above 4GB, disabling EFI.\n"); @@ -626,6 +643,62 @@ static int __init efi_memmap_init(void) return 0; } +/* + * A number of config table entries get remapped to virtual addresses + * after entering EFI virtual mode. However, the kexec kernel requires + * their physical addresses therefore we pass them via setup_data and + * correct those entries to their respective physical addresses here. + * + * Currently only handles smbios which is necessary for some firmware + * implementation. + */ +static int __init efi_reuse_config(u64 tables, int nr_tables) +{ + int i, sz, ret = 0; + void *p, *tablep; + struct efi_setup_data *data; + + if (!efi_setup) + return 0; + + if (!efi_enabled(EFI_64BIT)) + return 0; + + data = early_memremap(efi_setup, sizeof(*data)); + if (!data) { + ret = -ENOMEM; + goto out; + } + + if (!data->smbios) + goto out_memremap; + + sz = sizeof(efi_config_table_64_t); + + p = tablep = early_memremap(tables, nr_tables * sz); + if (!p) { + pr_err("Could not map Configuration table!\n"); + ret = -ENOMEM; + goto out_memremap; + } + + for (i = 0; i < efi.systab->nr_tables; i++) { + efi_guid_t guid; + + guid = ((efi_config_table_64_t *)p)->guid; + + if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) + ((efi_config_table_64_t *)p)->table = data->smbios; + p += sz; + } + early_iounmap(tablep, nr_tables * sz); + +out_memremap: + early_iounmap(data, sizeof(*data)); +out: + return ret; +} + void __init efi_init(void) { efi_char16_t *c16; @@ -651,6 +724,10 @@ void __init efi_init(void) set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); + efi.config_table = (unsigned long)efi.systab->tables; + efi.fw_vendor = (unsigned long)efi.systab->fw_vendor; + efi.runtime = (unsigned long)efi.systab->runtime; + /* * Show what we know for posterity */ @@ -667,6 +744,9 @@ void __init efi_init(void) efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); + if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables)) + return; + if (efi_config_init(arch_tables)) return; @@ -684,15 +764,12 @@ void __init efi_init(void) return; set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); } - if (efi_memmap_init()) return; set_bit(EFI_MEMMAP, &x86_efi_facility); -#if EFI_DEBUG print_efi_memmap(); -#endif } void __init efi_late_init(void) @@ -741,36 +818,38 @@ void efi_memory_uc(u64 addr, unsigned long size) set_memory_uc(addr, npages); } -/* - * This function will switch the EFI runtime services to virtual mode. - * Essentially, look through the EFI memmap and map every region that - * has the runtime attribute bit set in its memory descriptor and update - * that memory descriptor with the virtual address obtained from ioremap(). - * This enables the runtime services to be called without having to - * thunk back into physical mode for every invocation. - */ -void __init efi_enter_virtual_mode(void) +void __init old_map_region(efi_memory_desc_t *md) { - efi_memory_desc_t *md, *prev_md = NULL; - efi_status_t status; + u64 start_pfn, end_pfn, end; unsigned long size; - u64 end, systab, start_pfn, end_pfn; - void *p, *va, *new_memmap = NULL; - int count = 0; + void *va; - efi.systab = NULL; + start_pfn = PFN_DOWN(md->phys_addr); + size = md->num_pages << PAGE_SHIFT; + end = md->phys_addr + size; + end_pfn = PFN_UP(end); - /* - * We don't do virtual mode, since we don't do runtime services, on - * non-native EFI - */ + if (pfn_range_is_mapped(start_pfn, end_pfn)) { + va = __va(md->phys_addr); - if (!efi_is_native()) { - efi_unmap_memmap(); - return; - } + if (!(md->attribute & EFI_MEMORY_WB)) + efi_memory_uc((u64)(unsigned long)va, size); + } else + va = efi_ioremap(md->phys_addr, size, + md->type, md->attribute); + + md->virt_addr = (u64) (unsigned long) va; + if (!va) + pr_err("ioremap of 0x%llX failed!\n", + (unsigned long long)md->phys_addr); +} + +/* Merge contiguous regions of the same type and attribute */ +static void __init efi_merge_regions(void) +{ + void *p; + efi_memory_desc_t *md, *prev_md = NULL; - /* Merge contiguous regions of the same type and attribute */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { u64 prev_size; md = p; @@ -796,6 +875,77 @@ void __init efi_enter_virtual_mode(void) } prev_md = md; } +} + +static void __init get_systab_virt_addr(efi_memory_desc_t *md) +{ + unsigned long size; + u64 end, systab; + + size = md->num_pages << EFI_PAGE_SHIFT; + end = md->phys_addr + size; + systab = (u64)(unsigned long)efi_phys.systab; + if (md->phys_addr <= systab && systab < end) { + systab += md->virt_addr - md->phys_addr; + efi.systab = (efi_system_table_t *)(unsigned long)systab; + } +} + +static int __init save_runtime_map(void) +{ + efi_memory_desc_t *md; + void *tmp, *p, *q = NULL; + int count = 0; + + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + md = p; + + if (!(md->attribute & EFI_MEMORY_RUNTIME) || + (md->type == EFI_BOOT_SERVICES_CODE) || + (md->type == EFI_BOOT_SERVICES_DATA)) + continue; + tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL); + if (!tmp) + goto out; + q = tmp; + + memcpy(q + count * memmap.desc_size, md, memmap.desc_size); + count++; + } + + efi_runtime_map_setup(q, count, memmap.desc_size); + + return 0; +out: + kfree(q); + return -ENOMEM; +} + +/* + * Map efi regions which were passed via setup_data. The virt_addr is a fixed + * addr which was used in first kernel of a kexec boot. + */ +static void __init efi_map_regions_fixed(void) +{ + void *p; + efi_memory_desc_t *md; + + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + md = p; + efi_map_region_fixed(md); /* FIXME: add error handling */ + get_systab_virt_addr(md); + } + +} + +/* + * Map efi memory ranges for runtime serivce and update new_memmap with virtual + * addresses. + */ +static void * __init efi_map_regions(int *count) +{ + efi_memory_desc_t *md; + void *p, *tmp, *new_memmap = NULL; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; @@ -807,53 +957,95 @@ void __init efi_enter_virtual_mode(void) continue; } - size = md->num_pages << EFI_PAGE_SHIFT; - end = md->phys_addr + size; + efi_map_region(md); + get_systab_virt_addr(md); - start_pfn = PFN_DOWN(md->phys_addr); - end_pfn = PFN_UP(end); - if (pfn_range_is_mapped(start_pfn, end_pfn)) { - va = __va(md->phys_addr); + tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size, + GFP_KERNEL); + if (!tmp) + goto out; + new_memmap = tmp; + memcpy(new_memmap + (*count * memmap.desc_size), md, + memmap.desc_size); + (*count)++; + } - if (!(md->attribute & EFI_MEMORY_WB)) - efi_memory_uc((u64)(unsigned long)va, size); - } else - va = efi_ioremap(md->phys_addr, size, - md->type, md->attribute); + return new_memmap; +out: + kfree(new_memmap); + return NULL; +} + +/* + * This function will switch the EFI runtime services to virtual mode. + * Essentially, we look through the EFI memmap and map every region that + * has the runtime attribute bit set in its memory descriptor into the + * ->trampoline_pgd page table using a top-down VA allocation scheme. + * + * The old method which used to update that memory descriptor with the + * virtual address obtained from ioremap() is still supported when the + * kernel is booted with efi=old_map on its command line. Same old + * method enabled the runtime services to be called without having to + * thunk back into physical mode for every invocation. + * + * The new method does a pagetable switch in a preemption-safe manner + * so that we're in a different address space when calling a runtime + * function. For function arguments passing we do copy the PGDs of the + * kernel page table into ->trampoline_pgd prior to each call. + * + * Specially for kexec boot, efi runtime maps in previous kernel should + * be passed in via setup_data. In that case runtime ranges will be mapped + * to the same virtual addresses as the first kernel. + */ +void __init efi_enter_virtual_mode(void) +{ + efi_status_t status; + void *new_memmap = NULL; + int err, count = 0; - md->virt_addr = (u64) (unsigned long) va; + efi.systab = NULL; - if (!va) { - pr_err("ioremap of 0x%llX failed!\n", - (unsigned long long)md->phys_addr); - continue; - } + /* + * We don't do virtual mode, since we don't do runtime services, on + * non-native EFI + */ + if (!efi_is_native()) { + efi_unmap_memmap(); + return; + } - systab = (u64) (unsigned long) efi_phys.systab; - if (md->phys_addr <= systab && systab < end) { - systab += md->virt_addr - md->phys_addr; - efi.systab = (efi_system_table_t *) (unsigned long) systab; + if (efi_setup) { + efi_map_regions_fixed(); + } else { + efi_merge_regions(); + new_memmap = efi_map_regions(&count); + if (!new_memmap) { + pr_err("Error reallocating memory, EFI runtime non-functional!\n"); + return; } - new_memmap = krealloc(new_memmap, - (count + 1) * memmap.desc_size, - GFP_KERNEL); - memcpy(new_memmap + (count * memmap.desc_size), md, - memmap.desc_size); - count++; } + err = save_runtime_map(); + if (err) + pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n"); + BUG_ON(!efi.systab); - status = phys_efi_set_virtual_address_map( - memmap.desc_size * count, - memmap.desc_size, - memmap.desc_version, - (efi_memory_desc_t *)__pa(new_memmap)); + efi_setup_page_tables(); + efi_sync_low_kernel_mappings(); - if (status != EFI_SUCCESS) { - pr_alert("Unable to switch EFI into virtual mode " - "(status=%lx)!\n", status); - panic("EFI call to SetVirtualAddressMap() failed!"); + if (!efi_setup) { + status = phys_efi_set_virtual_address_map( + memmap.desc_size * count, + memmap.desc_size, + memmap.desc_version, + (efi_memory_desc_t *)__pa(new_memmap)); + + if (status != EFI_SUCCESS) { + pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n", + status); + panic("EFI call to SetVirtualAddressMap() failed!"); + } } /* @@ -876,7 +1068,8 @@ void __init efi_enter_virtual_mode(void) efi.query_variable_info = virt_efi_query_variable_info; efi.update_capsule = virt_efi_update_capsule; efi.query_capsule_caps = virt_efi_query_capsule_caps; - if (__supported_pte_mask & _PAGE_NX) + + if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) runtime_code_page_mkexec(); kfree(new_memmap); @@ -1006,3 +1199,15 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) return EFI_SUCCESS; } EXPORT_SYMBOL_GPL(efi_query_variable_store); + +static int __init parse_efi_cmdline(char *str) +{ + if (*str == '=') + str++; + + if (!strncmp(str, "old_map", 7)) + set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); + + return 0; +} +early_param("efi", parse_efi_cmdline); diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 40e446941dd7ec..249b183cf41799 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -37,9 +37,19 @@ * claim EFI runtime service handler exclusively and to duplicate a memory in * low memory space say 0 - 3G. */ - static unsigned long efi_rt_eflags; +void efi_sync_low_kernel_mappings(void) {} +void efi_setup_page_tables(void) {} + +void __init efi_map_region(efi_memory_desc_t *md) +{ + old_map_region(md); +} + +void __init efi_map_region_fixed(efi_memory_desc_t *md) {} +void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} + void efi_call_phys_prelog(void) { struct desc_ptr gdt_descr; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 39a0e7f1f0a3ec..6284f158a47d85 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -38,10 +38,28 @@ #include #include #include +#include static pgd_t *save_pgd __initdata; static unsigned long efi_flags __initdata; +/* + * We allocate runtime services regions bottom-up, starting from -4G, i.e. + * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. + */ +static u64 efi_va = -4 * (1UL << 30); +#define EFI_VA_END (-68 * (1UL << 30)) + +/* + * Scratch space used for switching the pagetable in the EFI stub + */ +struct efi_scratch { + u64 r15; + u64 prev_cr3; + pgd_t *efi_pgt; + bool use_pgd; +}; + static void __init early_code_mapping_set_exec(int executable) { efi_memory_desc_t *md; @@ -65,6 +83,9 @@ void __init efi_call_phys_prelog(void) int pgd; int n_pgds; + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + early_code_mapping_set_exec(1); local_irq_save(efi_flags); @@ -86,6 +107,10 @@ void __init efi_call_phys_epilog(void) */ int pgd; int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); + + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + for (pgd = 0; pgd < n_pgds; pgd++) set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); kfree(save_pgd); @@ -94,6 +119,96 @@ void __init efi_call_phys_epilog(void) early_code_mapping_set_exec(0); } +/* + * Add low kernel mappings for passing arguments to EFI functions. + */ +void efi_sync_low_kernel_mappings(void) +{ + unsigned num_pgds; + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); + + if (efi_enabled(EFI_OLD_MEMMAP)) + return; + + num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); + + memcpy(pgd + pgd_index(PAGE_OFFSET), + init_mm.pgd + pgd_index(PAGE_OFFSET), + sizeof(pgd_t) * num_pgds); +} + +void efi_setup_page_tables(void) +{ + efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; + + if (!efi_enabled(EFI_OLD_MEMMAP)) + efi_scratch.use_pgd = true; +} + +static void __init __map_region(efi_memory_desc_t *md, u64 va) +{ + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); + unsigned long pf = 0; + + if (!(md->attribute & EFI_MEMORY_WB)) + pf |= _PAGE_PCD; + + if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) + pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", + md->phys_addr, va); +} + +void __init efi_map_region(efi_memory_desc_t *md) +{ + unsigned long size = md->num_pages << PAGE_SHIFT; + u64 pa = md->phys_addr; + + if (efi_enabled(EFI_OLD_MEMMAP)) + return old_map_region(md); + + /* + * Make sure the 1:1 mappings are present as a catch-all for b0rked + * firmware which doesn't update all internal pointers after switching + * to virtual mode and would otherwise crap on us. + */ + __map_region(md, md->phys_addr); + + efi_va -= size; + + /* Is PA 2M-aligned? */ + if (!(pa & (PMD_SIZE - 1))) { + efi_va &= PMD_MASK; + } else { + u64 pa_offset = pa & (PMD_SIZE - 1); + u64 prev_va = efi_va; + + /* get us the same offset within this 2M page */ + efi_va = (efi_va & PMD_MASK) + pa_offset; + + if (efi_va > prev_va) + efi_va -= PMD_SIZE; + } + + if (efi_va < EFI_VA_END) { + pr_warn(FW_WARN "VA address range overflow!\n"); + return; + } + + /* Do the VA map */ + __map_region(md, efi_va); + md->virt_addr = efi_va; +} + +/* + * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges. + * md->virt_addr is the original virtual address which had been mapped in kexec + * 1st kernel. + */ +void __init efi_map_region_fixed(efi_memory_desc_t *md) +{ + __map_region(md, md->virt_addr); +} + void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, u32 type, u64 attribute) { @@ -113,3 +228,8 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, return (void __iomem *)__va(phys_addr); } + +void __init parse_efi_setup(u64 phys_addr, u32 data_len) +{ + efi_setup = phys_addr + sizeof(struct setup_data); +} diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 4c07ccab8146f0..88073b1402988b 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -34,10 +34,47 @@ mov %rsi, %cr0; \ mov (%rsp), %rsp + /* stolen from gcc */ + .macro FLUSH_TLB_ALL + movq %r15, efi_scratch(%rip) + movq %r14, efi_scratch+8(%rip) + movq %cr4, %r15 + movq %r15, %r14 + andb $0x7f, %r14b + movq %r14, %cr4 + movq %r15, %cr4 + movq efi_scratch+8(%rip), %r14 + movq efi_scratch(%rip), %r15 + .endm + + .macro SWITCH_PGT + cmpb $0, efi_scratch+24(%rip) + je 1f + movq %r15, efi_scratch(%rip) # r15 + # save previous CR3 + movq %cr3, %r15 + movq %r15, efi_scratch+8(%rip) # prev_cr3 + movq efi_scratch+16(%rip), %r15 # EFI pgt + movq %r15, %cr3 + 1: + .endm + + .macro RESTORE_PGT + cmpb $0, efi_scratch+24(%rip) + je 2f + movq efi_scratch+8(%rip), %r15 + movq %r15, %cr3 + movq efi_scratch(%rip), %r15 + FLUSH_TLB_ALL + 2: + .endm + ENTRY(efi_call0) SAVE_XMM subq $32, %rsp + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -47,7 +84,9 @@ ENTRY(efi_call1) SAVE_XMM subq $32, %rsp mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -57,7 +96,9 @@ ENTRY(efi_call2) SAVE_XMM subq $32, %rsp mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -68,7 +109,9 @@ ENTRY(efi_call3) subq $32, %rsp mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -80,7 +123,9 @@ ENTRY(efi_call4) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -93,7 +138,9 @@ ENTRY(efi_call5) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $48, %rsp RESTORE_XMM ret @@ -109,8 +156,15 @@ ENTRY(efi_call6) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call6) + + .data +ENTRY(efi_scratch) + .fill 3,8,0 + .byte 0 diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile index 01cc29ea5ff713..0a8ee703b9fae9 100644 --- a/arch/x86/platform/intel-mid/Makefile +++ b/arch/x86/platform/intel-mid/Makefile @@ -1,6 +1,6 @@ -obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o -obj-$(CONFIG_X86_INTEL_MID) += intel_mid_vrtc.o +obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o + # SFI specific code ifdef CONFIG_X86_INTEL_MID obj-$(CONFIG_SFI) += sfi.o device_libs/ diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c index 0d942c1d26d578..69a783689d21b7 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c @@ -22,7 +22,9 @@ static void __init *emc1403_platform_data(void *info) int intr = get_gpio_by_name("thermal_int"); int intr2nd = get_gpio_by_name("thermal_alert"); - if (intr == -1 || intr2nd == -1) + if (intr < 0) + return NULL; + if (intr2nd < 0) return NULL; i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c index a013a4834bbe7d..dccae6b0413f86 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c @@ -66,7 +66,7 @@ static int __init pb_keys_init(void) gb[i].gpio = get_gpio_by_name(gb[i].desc); pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, gb[i].gpio); - if (gb[i].gpio == -1) + if (gb[i].gpio < 0) continue; if (i != good) diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c index 15278c11f7147b..54226de7541a7f 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c @@ -21,7 +21,9 @@ static void __init *lis331dl_platform_data(void *info) int intr = get_gpio_by_name("accel_int"); int intr2nd = get_gpio_by_name("accel_2"); - if (intr == -1 || intr2nd == -1) + if (intr < 0) + return NULL; + if (intr2nd < 0) return NULL; i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c index 94ade10024ae01..2c8acbc1e9ad90 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c @@ -48,7 +48,7 @@ static void __init *max7315_platform_data(void *info) gpio_base = get_gpio_by_name(base_pin_name); intr = get_gpio_by_name(intr_pin_name); - if (gpio_base == -1) + if (gpio_base < 0) return NULL; max7315->gpio_base = gpio_base; if (intr != -1) { diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c index dd28d63c84fb1b..cfe9a47a1e8778 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c @@ -19,7 +19,7 @@ static void *mpu3050_platform_data(void *info) struct i2c_board_info *i2c_info = info; int intr = get_gpio_by_name("mpu3050_int"); - if (intr == -1) + if (intr < 0) return NULL; i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c index d87182a0926315..65c2a9a19db4d0 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c @@ -26,7 +26,7 @@ static void __init *pmic_gpio_platform_data(void *info) static struct intel_pmic_gpio_platform_data pmic_gpio_pdata; int gpio_base = get_gpio_by_name("pmic_gpio_base"); - if (gpio_base == -1) + if (gpio_base < 0) gpio_base = 64; pmic_gpio_pdata.gpio_base = gpio_base; pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c index 22881c9a6737b1..33be0b3be6e120 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c @@ -34,10 +34,10 @@ static void *tca6416_platform_data(void *info) gpio_base = get_gpio_by_name(base_pin_name); intr = get_gpio_by_name(intr_pin_name); - if (gpio_base == -1) + if (gpio_base < 0) return NULL; tca6416.gpio_base = gpio_base; - if (intr != -1) { + if (intr >= 0) { i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; } else { diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c index 4f702f554f6e01..e0bd082a80e00f 100644 --- a/arch/x86/platform/intel-mid/early_printk_intel_mid.c +++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index f90e290f689f38..1bbedc4b0f88d4 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -35,6 +35,8 @@ #include #include +#include "intel_mid_weak_decls.h" + /* * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock, * cmdline option x86_intel_mid_timer can be used to override the configuration @@ -58,12 +60,16 @@ enum intel_mid_timer_options intel_mid_timer_options; +/* intel_mid_ops to store sub arch ops */ +struct intel_mid_ops *intel_mid_ops; +/* getter function for sub arch ops*/ +static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT; enum intel_mid_cpu_type __intel_mid_cpu_chip; EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip); static void intel_mid_power_off(void) { -} +}; static void intel_mid_reboot(void) { @@ -72,32 +78,6 @@ static void intel_mid_reboot(void) static unsigned long __init intel_mid_calibrate_tsc(void) { - unsigned long fast_calibrate; - u32 lo, hi, ratio, fsb; - - rdmsr(MSR_IA32_PERF_STATUS, lo, hi); - pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi); - ratio = (hi >> 8) & 0x1f; - pr_debug("ratio is %d\n", ratio); - if (!ratio) { - pr_err("read a zero ratio, should be incorrect!\n"); - pr_err("force tsc ratio to 16 ...\n"); - ratio = 16; - } - rdmsr(MSR_FSB_FREQ, lo, hi); - if ((lo & 0x7) == 0x7) - fsb = PENWELL_FSB_FREQ_83SKU; - else - fsb = PENWELL_FSB_FREQ_100SKU; - fast_calibrate = ratio * fsb; - pr_debug("read penwell tsc %lu khz\n", fast_calibrate); - lapic_timer_frequency = fsb * 1000 / HZ; - /* mark tsc clocksource as reliable */ - set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); - - if (fast_calibrate) - return fast_calibrate; - return 0; } @@ -125,13 +105,37 @@ static void __init intel_mid_time_init(void) static void intel_mid_arch_setup(void) { - if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) - __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; - else { + if (boot_cpu_data.x86 != 6) { pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n", boot_cpu_data.x86, boot_cpu_data.x86_model); __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; + goto out; } + + switch (boot_cpu_data.x86_model) { + case 0x35: + __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW; + break; + case 0x3C: + case 0x4A: + __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER; + break; + case 0x27: + default: + __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; + break; + } + + if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops)) + intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); + else { + intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); + pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); + } + +out: + if (intel_mid_ops->arch_setup) + intel_mid_ops->arch_setup(); } /* MID systems don't have i8042 controller */ diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h new file mode 100644 index 00000000000000..a537ffc16299bf --- /dev/null +++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h @@ -0,0 +1,19 @@ +/* + * intel_mid_weak_decls.h: Weak declarations of intel-mid.c + * + * (C) Copyright 2013 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + + +/* __attribute__((weak)) makes these declarations overridable */ +/* For every CPU addition a new get__ops interface needs + * to be added. + */ +extern void * __cpuinit get_penwell_ops(void) __attribute__((weak)); +extern void * __cpuinit get_cloverview_ops(void) __attribute__((weak)); +extern void * __init get_tangier_ops(void) __attribute__((weak)); diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c new file mode 100644 index 00000000000000..4f7884eebc149a --- /dev/null +++ b/arch/x86/platform/intel-mid/mfld.c @@ -0,0 +1,75 @@ +/* + * mfld.c: Intel Medfield platform setup code + * + * (C) Copyright 2013 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include + +#include +#include +#include + +#include "intel_mid_weak_decls.h" + +static void penwell_arch_setup(void); +/* penwell arch ops */ +static struct intel_mid_ops penwell_ops = { + .arch_setup = penwell_arch_setup, +}; + +static void mfld_power_off(void) +{ +} + +static unsigned long __init mfld_calibrate_tsc(void) +{ + unsigned long fast_calibrate; + u32 lo, hi, ratio, fsb; + + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi); + ratio = (hi >> 8) & 0x1f; + pr_debug("ratio is %d\n", ratio); + if (!ratio) { + pr_err("read a zero ratio, should be incorrect!\n"); + pr_err("force tsc ratio to 16 ...\n"); + ratio = 16; + } + rdmsr(MSR_FSB_FREQ, lo, hi); + if ((lo & 0x7) == 0x7) + fsb = FSB_FREQ_83SKU; + else + fsb = FSB_FREQ_100SKU; + fast_calibrate = ratio * fsb; + pr_debug("read penwell tsc %lu khz\n", fast_calibrate); + lapic_timer_frequency = fsb * 1000 / HZ; + /* mark tsc clocksource as reliable */ + set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); + + if (fast_calibrate) + return fast_calibrate; + + return 0; +} + +static void __init penwell_arch_setup() +{ + x86_platform.calibrate_tsc = mfld_calibrate_tsc; + pm_power_off = mfld_power_off; +} + +void * __cpuinit get_penwell_ops() +{ + return &penwell_ops; +} + +void * __cpuinit get_cloverview_ops() +{ + return &penwell_ops; +} diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c new file mode 100644 index 00000000000000..09d10159e7b7f9 --- /dev/null +++ b/arch/x86/platform/intel-mid/mrfl.c @@ -0,0 +1,103 @@ +/* + * mrfl.c: Intel Merrifield platform specific setup code + * + * (C) Copyright 2013 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include + +#include +#include + +#include "intel_mid_weak_decls.h" + +static unsigned long __init tangier_calibrate_tsc(void) +{ + unsigned long fast_calibrate; + u32 lo, hi, ratio, fsb, bus_freq; + + /* *********************** */ + /* Compute TSC:Ratio * FSB */ + /* *********************** */ + + /* Compute Ratio */ + rdmsr(MSR_PLATFORM_INFO, lo, hi); + pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo); + + ratio = (lo >> 8) & 0xFF; + pr_debug("ratio is %d\n", ratio); + if (!ratio) { + pr_err("Read a zero ratio, force tsc ratio to 4 ...\n"); + ratio = 4; + } + + /* Compute FSB */ + rdmsr(MSR_FSB_FREQ, lo, hi); + pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n", + hi, lo); + + bus_freq = lo & 0x7; + pr_debug("bus_freq = 0x%x\n", bus_freq); + + if (bus_freq == 0) + fsb = FSB_FREQ_100SKU; + else if (bus_freq == 1) + fsb = FSB_FREQ_100SKU; + else if (bus_freq == 2) + fsb = FSB_FREQ_133SKU; + else if (bus_freq == 3) + fsb = FSB_FREQ_167SKU; + else if (bus_freq == 4) + fsb = FSB_FREQ_83SKU; + else if (bus_freq == 5) + fsb = FSB_FREQ_400SKU; + else if (bus_freq == 6) + fsb = FSB_FREQ_267SKU; + else if (bus_freq == 7) + fsb = FSB_FREQ_333SKU; + else { + BUG(); + pr_err("Invalid bus_freq! Setting to minimal value!\n"); + fsb = FSB_FREQ_100SKU; + } + + /* TSC = FSB Freq * Resolved HFM Ratio */ + fast_calibrate = ratio * fsb; + pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate); + + /* ************************************ */ + /* Calculate Local APIC Timer Frequency */ + /* ************************************ */ + lapic_timer_frequency = (fsb * 1000) / HZ; + + pr_debug("Setting lapic_timer_frequency = %d\n", + lapic_timer_frequency); + + /* mark tsc clocksource as reliable */ + set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); + + if (fast_calibrate) + return fast_calibrate; + + return 0; +} + +static void __init tangier_arch_setup(void) +{ + x86_platform.calibrate_tsc = tangier_calibrate_tsc; +} + +/* tangier arch ops */ +static struct intel_mid_ops tangier_ops = { + .arch_setup = tangier_arch_setup, +}; + +void * __cpuinit get_tangier_ops() +{ + return &tangier_ops; +} diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c index c84c1ca396bfa8..994c40bd7cb7c8 100644 --- a/arch/x86/platform/intel-mid/sfi.c +++ b/arch/x86/platform/intel-mid/sfi.c @@ -224,7 +224,7 @@ int get_gpio_by_name(const char *name) if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN)) return pentry->pin_no; } - return -1; + return -EINVAL; } void __init intel_scu_device_register(struct platform_device *pdev) @@ -250,7 +250,7 @@ static void __init intel_scu_spi_device_register(struct spi_board_info *sdev) sdev->modalias); return; } - memcpy(new_dev, sdev, sizeof(*sdev)); + *new_dev = *sdev; spi_devs[spi_next_dev++] = new_dev; } @@ -271,7 +271,7 @@ static void __init intel_scu_i2c_device_register(int bus, idev->type); return; } - memcpy(new_dev, idev, sizeof(*idev)); + *new_dev = *idev; i2c_bus[i2c_next_dev] = bus; i2c_devs[i2c_next_dev++] = new_dev; @@ -337,6 +337,8 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry, pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n", pentry->name, pentry->irq); pdata = intel_mid_sfi_get_pdata(dev, pentry); + if (IS_ERR(pdata)) + return; pdev = platform_device_alloc(pentry->name, 0); if (pdev == NULL) { @@ -370,6 +372,8 @@ static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry, spi_info.chip_select); pdata = intel_mid_sfi_get_pdata(dev, &spi_info); + if (IS_ERR(pdata)) + return; spi_info.platform_data = pdata; if (dev->delay) @@ -395,6 +399,8 @@ static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry, i2c_info.addr); pdata = intel_mid_sfi_get_pdata(dev, &i2c_info); i2c_info.platform_data = pdata; + if (IS_ERR(pdata)) + return; if (dev->delay) intel_scu_i2c_device_register(pentry->host_num, &i2c_info); @@ -443,13 +449,35 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) * so we have to enable them one by one here */ ioapic = mp_find_ioapic(irq); - irq_attr.ioapic = ioapic; - irq_attr.ioapic_pin = irq; - irq_attr.trigger = 1; - irq_attr.polarity = 1; - io_apic_set_pci_routing(NULL, irq, &irq_attr); - } else + if (ioapic >= 0) { + irq_attr.ioapic = ioapic; + irq_attr.ioapic_pin = irq; + irq_attr.trigger = 1; + if (intel_mid_identify_cpu() == + INTEL_MID_CPU_CHIP_TANGIER) { + if (!strncmp(pentry->name, + "r69001-ts-i2c", 13)) + /* active low */ + irq_attr.polarity = 1; + else if (!strncmp(pentry->name, + "synaptics_3202", 14)) + /* active low */ + irq_attr.polarity = 1; + else if (irq == 41) + /* fast_int_1 */ + irq_attr.polarity = 1; + else + /* active high */ + irq_attr.polarity = 0; + } else { + /* PNW and CLV go with active low */ + irq_attr.polarity = 1; + } + io_apic_set_pci_routing(NULL, irq, &irq_attr); + } + } else { irq = 0; /* No irq */ + } dev = get_device_id(pentry->type, pentry->name); diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c index e6cb80f620afff..4d171e8640efe7 100644 --- a/arch/x86/platform/iris/iris.c +++ b/arch/x86/platform/iris/iris.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index efe4d7220397ea..dfe605ac1bcd52 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -433,15 +433,49 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) return; } -static inline unsigned long cycles_2_us(unsigned long long cyc) +/* + * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative + * number, not an absolute. It converts a duration in cycles to a duration in + * ns. + */ +static inline unsigned long long cycles_2_ns(unsigned long long cyc) { + struct cyc2ns_data *data = cyc2ns_read_begin(); unsigned long long ns; - unsigned long us; - int cpu = smp_processor_id(); - ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR; - us = ns / 1000; - return us; + ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); + + cyc2ns_read_end(data); + return ns; +} + +/* + * The reverse of the above; converts a duration in ns to a duration in cycles. + */ +static inline unsigned long long ns_2_cycles(unsigned long long ns) +{ + struct cyc2ns_data *data = cyc2ns_read_begin(); + unsigned long long cyc; + + cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul; + + cyc2ns_read_end(data); + return cyc; +} + +static inline unsigned long cycles_2_us(unsigned long long cyc) +{ + return cycles_2_ns(cyc) / NSEC_PER_USEC; +} + +static inline cycles_t sec_2_cycles(unsigned long sec) +{ + return ns_2_cycles(sec * NSEC_PER_SEC); +} + +static inline unsigned long long usec_2_cycles(unsigned long usec) +{ + return ns_2_cycles(usec * NSEC_PER_USEC); } /* @@ -668,16 +702,6 @@ static int wait_completion(struct bau_desc *bau_desc, bcp, try); } -static inline cycles_t sec_2_cycles(unsigned long sec) -{ - unsigned long ns; - cycles_t cyc; - - ns = sec * 1000000000; - cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); - return cyc; -} - /* * Our retries are blocked by all destination sw ack resources being * in use, and a timeout is pending. In that case hardware immediately @@ -1327,16 +1351,6 @@ static void ptc_seq_stop(struct seq_file *file, void *data) { } -static inline unsigned long long usec_2_cycles(unsigned long microsec) -{ - unsigned long ns; - unsigned long long cyc; - - ns = microsec * 1000; - cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); - return cyc; -} - /* * Display the statistics thru /proc/sgi_uv/ptc_statistics * 'data' points to the cpu number diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index a44f457e70a19f..bad628a620c4ef 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -29,12 +29,10 @@ void __init reserve_real_mode(void) void __init setup_real_mode(void) { u16 real_mode_seg; - u32 *rel; + const u32 *rel; u32 count; - u32 *ptr; - u16 *seg; - int i; unsigned char *base; + unsigned long phys_base; struct trampoline_header *trampoline_header; size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); #ifdef CONFIG_X86_64 @@ -46,23 +44,23 @@ void __init setup_real_mode(void) memcpy(base, real_mode_blob, size); - real_mode_seg = __pa(base) >> 4; + phys_base = __pa(base); + real_mode_seg = phys_base >> 4; + rel = (u32 *) real_mode_relocs; /* 16-bit segment relocations. */ - count = rel[0]; - rel = &rel[1]; - for (i = 0; i < count; i++) { - seg = (u16 *) (base + rel[i]); + count = *rel++; + while (count--) { + u16 *seg = (u16 *) (base + *rel++); *seg = real_mode_seg; } /* 32-bit linear relocations. */ - count = rel[i]; - rel = &rel[i + 1]; - for (i = 0; i < count; i++) { - ptr = (u32 *) (base + rel[i]); - *ptr += __pa(base); + count = *rel++; + while (count--) { + u32 *ptr = (u32 *) (base + *rel++); + *ptr += phys_base; } /* Must be perfomed *after* relocation. */ diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S index f932ea61d1c844..d66c607bdc5809 100644 --- a/arch/x86/realmode/rm/reboot.S +++ b/arch/x86/realmode/rm/reboot.S @@ -1,5 +1,4 @@ #include -#include #include #include #include diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S index c1b2791183e7a6..48ddd76bc4c3d2 100644 --- a/arch/x86/realmode/rm/trampoline_32.S +++ b/arch/x86/realmode/rm/trampoline_32.S @@ -20,7 +20,6 @@ */ #include -#include #include #include #include "realmode.h" diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index bb360dc39d215e..dac7b20d2f9de4 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -25,7 +25,6 @@ */ #include -#include #include #include #include diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index aabfb8380a1c6c..96bc506ac6de7d 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -357,3 +357,5 @@ 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 349 i386 kcmp sys_kcmp 350 i386 finit_module sys_finit_module +351 i386 sched_setattr sys_sched_setattr +352 i386 sched_getattr sys_sched_getattr diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 38ae65dfd14ffe..a12bddc7ccea11 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -320,6 +320,8 @@ 311 64 process_vm_writev sys_process_vm_writev 312 common kcmp sys_kcmp 313 common finit_module sys_finit_module +314 common sched_setattr sys_sched_setattr +315 common sched_getattr sys_sched_getattr # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index f7bab68a4b83e4..11f9285a2ff667 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -722,15 +722,25 @@ static void percpu_init(void) /* * Check to see if a symbol lies in the .data..percpu section. - * For some as yet not understood reason the "__init_begin" - * symbol which immediately preceeds the .data..percpu section - * also shows up as it it were part of it so we do an explict - * check for that symbol name and ignore it. + * + * The linker incorrectly associates some symbols with the + * .data..percpu section so we also need to check the symbol + * name to make sure that we classify the symbol correctly. + * + * The GNU linker incorrectly associates: + * __init_begin + * __per_cpu_load + * + * The "gold" linker incorrectly associates: + * init_per_cpu__irq_stack_union + * init_per_cpu__gdt_page */ static int is_percpu_sym(ElfW(Sym) *sym, const char *symname) { return (sym->st_shndx == per_cpu_shndx) && - strcmp(symname, "__init_begin"); + strcmp(symname, "__init_begin") && + strcmp(symname, "__per_cpu_load") && + strncmp(symname, "init_per_cpu_", 13); } diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 2ada505067ccec..eb5d7a56f8d4b5 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts) ts->tv_nsec = 0; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->wall_time_sec; ns = gtod->wall_time_snsec; @@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts) ts->tv_nsec = 0; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->monotonic_time_sec; ns = gtod->monotonic_time_snsec; @@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); ts->tv_sec = gtod->wall_time_coarse.tv_sec; ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); @@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S index 01f5e3b4613cb8..1e13eb8c9656f7 100644 --- a/arch/x86/vdso/vdso.S +++ b/arch/x86/vdso/vdso.S @@ -1,6 +1,5 @@ #include #include -#include __PAGE_ALIGNED_DATA diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S index d6b9a7f42a8a43..295f1c7543d8bd 100644 --- a/arch/x86/vdso/vdsox32.S +++ b/arch/x86/vdso/vdsox32.S @@ -1,6 +1,5 @@ #include #include -#include __PAGE_ALIGNED_DATA diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index ef021677d53653..e1ee6b51dfc586 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -9,21 +9,14 @@ #ifndef _XTENSA_SYSTEM_H #define _XTENSA_SYSTEM_H -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) #define rmb() barrier() #define wmb() mb() #ifdef CONFIG_SMP #error smp_* not defined -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() #endif -#define set_mb(var, value) do { var = value; mb(); } while (0) +#include #endif /* _XTENSA_SYSTEM_H */ diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index ba6cf8e9aa0a59..b91ce75bd35db9 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = { void blk_mq_unregister_disk(struct gendisk *disk) { struct request_queue *q = disk->queue; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int i, j; + + queue_for_each_hw_ctx(q, hctx, i) { + hctx_for_each_ctx(hctx, ctx, j) { + kobject_del(&ctx->kobj); + kobject_put(&ctx->kobj); + } + kobject_del(&hctx->kobj); + kobject_put(&hctx->kobj); + } kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); kobject_del(&q->mq_kobj); + kobject_put(&q->mq_kobj); kobject_put(&disk_to_dev(disk)->kobj); } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5d9248526d780b..4770de5707b9b9 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig" config ACPI_EXTLOG tristate "Extended Error Log support" depends on X86_MCE && X86_LOCAL_APIC - select EFI select UEFI_CPER default n help diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 8711e3797165fa..3c2e4aa529c479 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev) goto end; result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), - ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac); + ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac); if (result) { power_supply_unregister(&ac->charger); goto end; @@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev) return -EINVAL; acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), - ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler); + ACPI_ALL_NOTIFY, acpi_ac_notify_handler); ac = platform_get_drvdata(pdev); if (ac->charger.dev) diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index a6869e110ce565..5d33c54154050b 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -43,6 +44,8 @@ struct extlog_l1_head { u8 rev1[12]; }; +static int old_edac_report_status; + static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295"; /* L1 table related physical address */ @@ -150,7 +153,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu); - return NOTIFY_DONE; + return NOTIFY_STOP; } static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret) @@ -231,8 +234,12 @@ static int __init extlog_init(void) u64 cap; int rc; - rc = -ENODEV; + if (get_edac_report_status() == EDAC_REPORTING_FORCE) { + pr_warn("Not loading eMCA, error reporting force-enabled through EDAC.\n"); + return -EPERM; + } + rc = -ENODEV; rdmsrl(MSR_IA32_MCG_CAP, cap); if (!(cap & MCG_ELOG_P)) return rc; @@ -287,6 +294,12 @@ static int __init extlog_init(void) if (elog_buf == NULL) goto err_release_elog; + /* + * eMCA event report method has higher priority than EDAC method, + * unless EDAC event report method is mandatory. + */ + old_edac_report_status = get_edac_report_status(); + set_edac_report_status(EDAC_REPORTING_DISABLED); mce_register_decode_chain(&extlog_mce_dec); /* enable OS to be involved to take over management from BIOS */ ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN; @@ -308,6 +321,7 @@ static int __init extlog_init(void) static void __exit extlog_exit(void) { + set_edac_report_status(old_edac_report_status); mce_unregister_decode_chain(&extlog_mce_dec); ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; if (extlog_l1_addr) diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index fc6008fbce35bd..509452a62f964d 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -193,10 +193,7 @@ static int power_saving_thread(void *data) CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); stop_critical_timings(); - __monitor((void *)¤t_thread_info()->flags, 0, 0); - smp_mb(); - if (!need_resched()) - __mwait(power_saving_mwait_eax, 1); + mwait_idle_with_hints(power_saving_mwait_eax, 1); start_critical_timings(); if (lapic_marked_unstable) diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 786294bb682c10..3650b21832279a 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -2,7 +2,6 @@ config ACPI_APEI bool "ACPI Platform Error Interface (APEI)" select MISC_FILESYSTEMS select PSTORE - select EFI select UEFI_CPER depends on X86 help diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 6d2c49b86b7fa8..e55584a072c632 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -41,6 +41,7 @@ #include #include #include +#include #include "apei-internal.h" @@ -567,8 +568,7 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr, bit_offset = reg->bit_offset; access_size_code = reg->access_width; space_id = reg->space_id; - /* Handle possible alignment issues */ - memcpy(paddr, ®->address, sizeof(*paddr)); + *paddr = get_unaligned(®->address); if (!*paddr) { pr_warning(FW_BUG APEI_PFX "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n", diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index fb57d03e698bd3..7dcc8a824aae30 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "apei-internal.h" @@ -216,7 +217,7 @@ static void check_vendor_extension(u64 paddr, static void *einj_get_parameter_address(void) { int i; - u64 paddrv4 = 0, paddrv5 = 0; + u64 pa_v4 = 0, pa_v5 = 0; struct acpi_whea_header *entry; entry = EINJ_TAB_ENTRY(einj_tab); @@ -225,30 +226,28 @@ static void *einj_get_parameter_address(void) entry->instruction == ACPI_EINJ_WRITE_REGISTER && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - memcpy(&paddrv4, &entry->register_region.address, - sizeof(paddrv4)); + pa_v4 = get_unaligned(&entry->register_region.address); if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS && entry->instruction == ACPI_EINJ_WRITE_REGISTER && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - memcpy(&paddrv5, &entry->register_region.address, - sizeof(paddrv5)); + pa_v5 = get_unaligned(&entry->register_region.address); entry++; } - if (paddrv5) { + if (pa_v5) { struct set_error_type_with_address *v5param; - v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param)); + v5param = acpi_os_map_memory(pa_v5, sizeof(*v5param)); if (v5param) { acpi5 = 1; - check_vendor_extension(paddrv5, v5param); + check_vendor_extension(pa_v5, v5param); return v5param; } } - if (param_extension && paddrv4) { + if (param_extension && pa_v4) { struct einj_parameter *v4param; - v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param)); + v4param = acpi_os_map_memory(pa_v4, sizeof(*v4param)); if (!v4param) return NULL; if (v4param->reserved1 || v4param->reserved2) { @@ -416,7 +415,8 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, return rc; } -static int __einj_error_inject(u32 type, u64 param1, u64 param2) +static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, + u64 param3, u64 param4) { struct apei_exec_context ctx; u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; @@ -446,6 +446,12 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) break; } v5param->flags = vendor_flags; + } else if (flags) { + v5param->flags = flags; + v5param->memory_address = param1; + v5param->memory_address_range = param2; + v5param->apicid = param3; + v5param->pcie_sbdf = param4; } else { switch (type) { case ACPI_EINJ_PROCESSOR_CORRECTABLE: @@ -514,11 +520,17 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) } /* Inject the specified hardware error */ -static int einj_error_inject(u32 type, u64 param1, u64 param2) +static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, + u64 param3, u64 param4) { int rc; unsigned long pfn; + /* If user manually set "flags", make sure it is legal */ + if (flags && (flags & + ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF))) + return -EINVAL; + /* * We need extra sanity checks for memory errors. * Other types leap directly to injection. @@ -532,7 +544,7 @@ static int einj_error_inject(u32 type, u64 param1, u64 param2) if (type & ACPI5_VENDOR_BIT) { if (vendor_flags != SETWA_FLAGS_MEM) goto inject; - } else if (!(type & MEM_ERROR_MASK)) + } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM)) goto inject; /* @@ -546,15 +558,18 @@ static int einj_error_inject(u32 type, u64 param1, u64 param2) inject: mutex_lock(&einj_mutex); - rc = __einj_error_inject(type, param1, param2); + rc = __einj_error_inject(type, flags, param1, param2, param3, param4); mutex_unlock(&einj_mutex); return rc; } static u32 error_type; +static u32 error_flags; static u64 error_param1; static u64 error_param2; +static u64 error_param3; +static u64 error_param4; static struct dentry *einj_debug_dir; static int available_error_type_show(struct seq_file *m, void *v) @@ -648,7 +663,8 @@ static int error_inject_set(void *data, u64 val) if (!error_type) return -EINVAL; - return einj_error_inject(error_type, error_param1, error_param2); + return einj_error_inject(error_type, error_flags, error_param1, error_param2, + error_param3, error_param4); } DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, @@ -729,6 +745,10 @@ static int __init einj_init(void) rc = -ENOMEM; einj_param = einj_get_parameter_address(); if ((param_extension || acpi5) && einj_param) { + fentry = debugfs_create_x32("flags", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_flags); + if (!fentry) + goto err_unmap; fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, einj_debug_dir, &error_param1); if (!fentry) @@ -737,6 +757,14 @@ static int __init einj_init(void) einj_debug_dir, &error_param2); if (!fentry) goto err_unmap; + fentry = debugfs_create_x64("param3", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param3); + if (!fentry) + goto err_unmap; + fentry = debugfs_create_x64("param4", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param4); + if (!fentry) + goto err_unmap; fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, einj_debug_dir, ¬rigger); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index cb1d557fc22c05..ed65e9c4b5b041 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -611,7 +611,7 @@ static void __erst_record_id_cache_compact(void) if (entries[i] == APEI_ERST_INVALID_RECORD_ID) continue; if (wpos != i) - memcpy(&entries[wpos], &entries[i], sizeof(entries[i])); + entries[wpos] = entries[i]; wpos++; } erst_record_id_cache.len = wpos; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index a30bc313787be6..46766ef7ef5de9 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -413,27 +413,31 @@ static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev) { #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE unsigned long pfn; + int flags = -1; int sec_sev = ghes_severity(gdata->error_severity); struct cper_sec_mem_err *mem_err; mem_err = (struct cper_sec_mem_err *)(gdata + 1); - if (sec_sev == GHES_SEV_CORRECTED && - (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) && - (mem_err->validation_bits & CPER_MEM_VALID_PA)) { - pfn = mem_err->physical_addr >> PAGE_SHIFT; - if (pfn_valid(pfn)) - memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE); - else if (printk_ratelimit()) - pr_warn(FW_WARN GHES_PFX - "Invalid address in generic error data: %#llx\n", - mem_err->physical_addr); - } - if (sev == GHES_SEV_RECOVERABLE && - sec_sev == GHES_SEV_RECOVERABLE && - mem_err->validation_bits & CPER_MEM_VALID_PA) { - pfn = mem_err->physical_addr >> PAGE_SHIFT; - memory_failure_queue(pfn, 0, 0); + if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) + return; + + pfn = mem_err->physical_addr >> PAGE_SHIFT; + if (!pfn_valid(pfn)) { + pr_warn_ratelimited(FW_WARN GHES_PFX + "Invalid address in generic error data: %#llx\n", + mem_err->physical_addr); + return; } + + /* iff following two events can be handled properly by now */ + if (sec_sev == GHES_SEV_CORRECTED && + (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED)) + flags = MF_SOFT_OFFLINE; + if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) + flags = 0; + + if (flags != -1) + memory_failure_queue(pfn, 0, flags); #endif } @@ -453,8 +457,7 @@ static void ghes_do_proc(struct ghes *ghes, ghes_edac_report_mem_error(ghes, sev, mem_err); #ifdef CONFIG_X86_MCE - apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, - mem_err); + apei_mce_report_mem_error(sev, mem_err); #endif ghes_handle_memory_failure(gdata, sev); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index fbf1aceda8b8ab..5876a49dfd386a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy "); MODULE_DESCRIPTION("ACPI Battery Driver"); MODULE_LICENSE("GPL"); +static int battery_bix_broken_package; static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); @@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery) ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); return -ENODEV; } - if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) + + if (battery_bix_broken_package) + result = extract_package(battery, buffer.pointer, + extended_info_offsets + 1, + ARRAY_SIZE(extended_info_offsets) - 1); + else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) result = extract_package(battery, buffer.pointer, extended_info_offsets, ARRAY_SIZE(extended_info_offsets)); @@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb, return 0; } +static struct dmi_system_id bat_dmi_table[] = { + { + .ident = "NEC LZ750/LS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"), + }, + }, + {}, +}; + static int acpi_battery_add(struct acpi_device *device) { int result = 0; @@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) { if (acpi_disabled) return; + + if (dmi_check_system(bat_dmi_table)) + battery_bix_broken_package = 1; acpi_bus_register_driver(&acpi_battery_driver); } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index bba9b72e25f823..0710004055c809 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data) } EXPORT_SYMBOL(acpi_bus_get_private_data); +void acpi_bus_no_hotplug(acpi_handle handle) +{ + struct acpi_device *adev = NULL; + + acpi_bus_get_device(handle, &adev); + if (adev) + adev->flags.no_hotplug = true; +} +EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug); + static void acpi_print_osc_error(acpi_handle handle, struct acpi_osc_context *context, char *error) { diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 644516d9bde6cf..f90c56c8379e8c 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, if (unlikely(!pr)) return -EINVAL; - if (cx->entry_method == ACPI_CSTATE_FFH) { - if (current_set_polling_and_test()) - return -EINVAL; - } - lapic_timer_state_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); @@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (unlikely(!pr)) return -EINVAL; - if (cx->entry_method == ACPI_CSTATE_FFH) { - if (current_set_polling_and_test()) - return -EINVAL; - } - /* * Must be done before busmaster disable as we might need to * access HPET ! @@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, } } - if (cx->entry_method == ACPI_CSTATE_FFH) { - if (current_set_polling_and_test()) - return -EINVAL; - } - acpi_unlazy_tlb(smp_processor_id()); /* Tell the scheduler that we are going deep-idle: */ diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 14f1e95063380a..e3a92a6da39ae2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178, + PCI_VENDOR_ID_MARVELL_EXT, 0x9170), + .driver_data = board_ahci_yes_fbs }, /* 88se9170 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), @@ -1238,15 +1241,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - /* AHCI controllers often implement SFF compatible interface. - * Grab all PCI BARs just in case. - */ - rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); - if (rc == -EBUSY) - pcim_pin_device(pdev); - if (rc) - return rc; - if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == 0x2652 || pdev->device == 0x2653)) { u8 map; @@ -1263,6 +1257,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } + /* AHCI controllers often implement SFF compatible interface. + * Grab all PCI BARs just in case. + */ + rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index ae2d73fe321e2f..3e23e9941dad00 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) /* * set PHY Paremeters, two steps to configure the GPR13, * one write for rest of parameters, mask of first write - * is 0x07fffffd, and the other one write for setting + * is 0x07ffffff, and the other one write for setting * the mpll_clk_en. */ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK @@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | IMX6Q_GPR13_SATA_TX_ATTEN_MASK | IMX6Q_GPR13_SATA_TX_BOOST_MASK | IMX6Q_GPR13_SATA_TX_LVL_MASK + | IMX6Q_GPR13_SATA_MPLL_CLK_EN | IMX6Q_GPR13_SATA_TX_EDGE_RATE , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 75b93678bbcd7f..1393a5890ed535 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev, "failed to get NCQ Send/Recv Log Emask 0x%x\n", err_mask); } else { + u8 *cmds = dev->ncq_send_recv_cmds; + dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; - memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, - ATA_LOG_NCQ_SEND_RECV_SIZE); + memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); + + if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { + ata_dev_dbg(dev, "disabling queued TRIM support\n"); + cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= + ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; + } } } @@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, + /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, @@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + /* devices that don't properly handle queued TRIM commands */ + { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + /* End Marker */ { } }; @@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur, { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, + { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, }; char *start = *cur, *p = *cur; char *id, *val, *endp; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index ab58556d347c19..377eb889f555dd 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work) return; } + /* + * XXX - UGLY HACK + * + * The block layer suspend/resume path is fundamentally broken due + * to freezable kthreads and workqueue and may deadlock if a block + * device gets removed while resume is in progress. I don't know + * what the solution is short of removing freezable kthreads and + * workqueues altogether. + * + * The following is an ugly hack to avoid kicking off device + * removal while freezer is active. This is a joke but does avoid + * this particular deadlock scenario. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=62801 + * http://marc.info/?l=linux-kernel&m=138695698516487 + */ +#ifdef CONFIG_FREEZER + while (pm_freezing) + msleep(10); +#endif + DPRINTK("ENTER\n"); mutex_lock(&ap->scsi_scan_mutex); diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index fe3ca0989b14ca..1ad2f62d34b98f 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c @@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = { .id_table = sis_pci_tbl, .probe = sis_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; static struct scsi_host_template sis_sht = { diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 94e8a80e87f87e..870ecfd503afe1 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -1,6 +1,6 @@ # Makefile for the Linux device tree -obj-y := core.o bus.o dd.o syscore.o \ +obj-y := component.o core.o bus.o dd.o syscore.o \ driver.o class.o platform.o \ cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o \ diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 73f6c2925281f4..59dc8086e4faa8 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -146,8 +146,19 @@ void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr) } EXPORT_SYMBOL_GPL(bus_remove_file); +static void bus_release(struct kobject *kobj) +{ + struct subsys_private *priv = + container_of(kobj, typeof(*priv), subsys.kobj); + struct bus_type *bus = priv->bus; + + kfree(priv); + bus->p = NULL; +} + static struct kobj_type bus_ktype = { .sysfs_ops = &bus_sysfs_ops, + .release = bus_release, }; static int bus_uevent_filter(struct kset *kset, struct kobject *kobj) @@ -953,8 +964,6 @@ void bus_unregister(struct bus_type *bus) kset_unregister(bus->p->devices_kset); bus_remove_file(bus, &bus_attr_uevent); kset_unregister(&bus->p->subsys); - kfree(bus->p); - bus->p = NULL; } EXPORT_SYMBOL_GPL(bus_unregister); diff --git a/drivers/base/component.c b/drivers/base/component.c new file mode 100644 index 00000000000000..c53efe6c6d8eb4 --- /dev/null +++ b/drivers/base/component.c @@ -0,0 +1,382 @@ +/* + * Componentized device handling. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is work in progress. We gather up the component devices into a list, + * and bind them when instructed. At the moment, we're specific to the DRM + * subsystem, and only handles one master device, but this doesn't have to be + * the case. + */ +#include +#include +#include +#include +#include +#include +#include + +struct master { + struct list_head node; + struct list_head components; + bool bound; + + const struct component_master_ops *ops; + struct device *dev; +}; + +struct component { + struct list_head node; + struct list_head master_node; + struct master *master; + bool bound; + + const struct component_ops *ops; + struct device *dev; +}; + +static DEFINE_MUTEX(component_mutex); +static LIST_HEAD(component_list); +static LIST_HEAD(masters); + +static struct master *__master_find(struct device *dev, + const struct component_master_ops *ops) +{ + struct master *m; + + list_for_each_entry(m, &masters, node) + if (m->dev == dev && (!ops || m->ops == ops)) + return m; + + return NULL; +} + +/* Attach an unattached component to a master. */ +static void component_attach_master(struct master *master, struct component *c) +{ + c->master = master; + + list_add_tail(&c->master_node, &master->components); +} + +/* Detach a component from a master. */ +static void component_detach_master(struct master *master, struct component *c) +{ + list_del(&c->master_node); + + c->master = NULL; +} + +int component_master_add_child(struct master *master, + int (*compare)(struct device *, void *), void *compare_data) +{ + struct component *c; + int ret = -ENXIO; + + list_for_each_entry(c, &component_list, node) { + if (c->master) + continue; + + if (compare(c->dev, compare_data)) { + component_attach_master(master, c); + ret = 0; + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(component_master_add_child); + +/* Detach all attached components from this master */ +static void master_remove_components(struct master *master) +{ + while (!list_empty(&master->components)) { + struct component *c = list_first_entry(&master->components, + struct component, master_node); + + WARN_ON(c->master != master); + + component_detach_master(master, c); + } +} + +/* + * Try to bring up a master. If component is NULL, we're interested in + * this master, otherwise it's a component which must be present to try + * and bring up the master. + * + * Returns 1 for successful bringup, 0 if not ready, or -ve errno. + */ +static int try_to_bring_up_master(struct master *master, + struct component *component) +{ + int ret = 0; + + if (!master->bound) { + /* + * Search the list of components, looking for components that + * belong to this master, and attach them to the master. + */ + if (master->ops->add_components(master->dev, master)) { + /* Failed to find all components */ + master_remove_components(master); + ret = 0; + goto out; + } + + if (component && component->master != master) { + master_remove_components(master); + ret = 0; + goto out; + } + + /* Found all components */ + ret = master->ops->bind(master->dev); + if (ret < 0) { + master_remove_components(master); + goto out; + } + + master->bound = true; + ret = 1; + } +out: + + return ret; +} + +static int try_to_bring_up_masters(struct component *component) +{ + struct master *m; + int ret = 0; + + list_for_each_entry(m, &masters, node) { + ret = try_to_bring_up_master(m, component); + if (ret != 0) + break; + } + + return ret; +} + +static void take_down_master(struct master *master) +{ + if (master->bound) { + master->ops->unbind(master->dev); + master->bound = false; + } + + master_remove_components(master); +} + +int component_master_add(struct device *dev, + const struct component_master_ops *ops) +{ + struct master *master; + int ret; + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return -ENOMEM; + + master->dev = dev; + master->ops = ops; + INIT_LIST_HEAD(&master->components); + + /* Add to the list of available masters. */ + mutex_lock(&component_mutex); + list_add(&master->node, &masters); + + ret = try_to_bring_up_master(master, NULL); + + if (ret < 0) { + /* Delete off the list if we weren't successful */ + list_del(&master->node); + kfree(master); + } + mutex_unlock(&component_mutex); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(component_master_add); + +void component_master_del(struct device *dev, + const struct component_master_ops *ops) +{ + struct master *master; + + mutex_lock(&component_mutex); + master = __master_find(dev, ops); + if (master) { + take_down_master(master); + + list_del(&master->node); + kfree(master); + } + mutex_unlock(&component_mutex); +} +EXPORT_SYMBOL_GPL(component_master_del); + +static void component_unbind(struct component *component, + struct master *master, void *data) +{ + WARN_ON(!component->bound); + + component->ops->unbind(component->dev, master->dev, data); + component->bound = false; + + /* Release all resources claimed in the binding of this component */ + devres_release_group(component->dev, component); +} + +void component_unbind_all(struct device *master_dev, void *data) +{ + struct master *master; + struct component *c; + + WARN_ON(!mutex_is_locked(&component_mutex)); + + master = __master_find(master_dev, NULL); + if (!master) + return; + + list_for_each_entry_reverse(c, &master->components, master_node) + component_unbind(c, master, data); +} +EXPORT_SYMBOL_GPL(component_unbind_all); + +static int component_bind(struct component *component, struct master *master, + void *data) +{ + int ret; + + /* + * Each component initialises inside its own devres group. + * This allows us to roll-back a failed component without + * affecting anything else. + */ + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + /* + * Also open a group for the device itself: this allows us + * to release the resources claimed against the sub-device + * at the appropriate moment. + */ + if (!devres_open_group(component->dev, component, GFP_KERNEL)) { + devres_release_group(master->dev, NULL); + return -ENOMEM; + } + + dev_dbg(master->dev, "binding %s (ops %ps)\n", + dev_name(component->dev), component->ops); + + ret = component->ops->bind(component->dev, master->dev, data); + if (!ret) { + component->bound = true; + + /* + * Close the component device's group so that resources + * allocated in the binding are encapsulated for removal + * at unbind. Remove the group on the DRM device as we + * can clean those resources up independently. + */ + devres_close_group(component->dev, NULL); + devres_remove_group(master->dev, NULL); + + dev_info(master->dev, "bound %s (ops %ps)\n", + dev_name(component->dev), component->ops); + } else { + devres_release_group(component->dev, NULL); + devres_release_group(master->dev, NULL); + + dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", + dev_name(component->dev), component->ops, ret); + } + + return ret; +} + +int component_bind_all(struct device *master_dev, void *data) +{ + struct master *master; + struct component *c; + int ret = 0; + + WARN_ON(!mutex_is_locked(&component_mutex)); + + master = __master_find(master_dev, NULL); + if (!master) + return -EINVAL; + + list_for_each_entry(c, &master->components, master_node) { + ret = component_bind(c, master, data); + if (ret) + break; + } + + if (ret != 0) { + list_for_each_entry_continue_reverse(c, &master->components, + master_node) + component_unbind(c, master, data); + } + + return ret; +} +EXPORT_SYMBOL_GPL(component_bind_all); + +int component_add(struct device *dev, const struct component_ops *ops) +{ + struct component *component; + int ret; + + component = kzalloc(sizeof(*component), GFP_KERNEL); + if (!component) + return -ENOMEM; + + component->ops = ops; + component->dev = dev; + + dev_dbg(dev, "adding component (ops %ps)\n", ops); + + mutex_lock(&component_mutex); + list_add_tail(&component->node, &component_list); + + ret = try_to_bring_up_masters(component); + if (ret < 0) { + list_del(&component->node); + + kfree(component); + } + mutex_unlock(&component_mutex); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(component_add); + +void component_del(struct device *dev, const struct component_ops *ops) +{ + struct component *c, *component = NULL; + + mutex_lock(&component_mutex); + list_for_each_entry(c, &component_list, node) + if (c->dev == dev && c->ops == ops) { + list_del(&c->node); + component = c; + break; + } + + if (component && component->master) + take_down_master(component->master); + + mutex_unlock(&component_mutex); + + WARN_ON(!component); + kfree(component); +} +EXPORT_SYMBOL_GPL(component_del); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/core.c b/drivers/base/core.c index 67b180d855b2c7..2b567177ef7829 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -491,11 +491,13 @@ static int device_add_attrs(struct device *dev) if (device_supports_offline(dev) && !dev->offline_disabled) { error = device_create_file(dev, &dev_attr_online); if (error) - goto err_remove_type_groups; + goto err_remove_dev_groups; } return 0; + err_remove_dev_groups: + device_remove_groups(dev, dev->groups); err_remove_type_groups: if (type) device_remove_groups(dev, type->groups); @@ -1603,6 +1605,7 @@ device_create_groups_vargs(struct class *class, struct device *parent, goto error; } + device_initialize(dev); dev->devt = devt; dev->class = class; dev->parent = parent; @@ -1614,7 +1617,7 @@ device_create_groups_vargs(struct class *class, struct device *parent, if (retval) goto error; - retval = device_register(dev); + retval = device_add(dev); if (retval) goto error; diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 0f3820121e02ef..25798db1455326 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -299,7 +299,7 @@ static int handle_remove(const char *nodename, struct device *dev) { struct path parent; struct dentry *dentry; - int deleted = 1; + int deleted = 0; int err; dentry = kern_path_locked(nodename, &parent); diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index eb8fb94ae2c527..8a97ddfa61222d 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -96,6 +96,15 @@ static inline long firmware_loading_timeout(void) return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT; } +/* firmware behavior options */ +#define FW_OPT_UEVENT (1U << 0) +#define FW_OPT_NOWAIT (1U << 1) +#ifdef CONFIG_FW_LOADER_USER_HELPER +#define FW_OPT_FALLBACK (1U << 2) +#else +#define FW_OPT_FALLBACK 0 +#endif + struct firmware_cache { /* firmware_buf instance will be added into the below list */ spinlock_t lock; @@ -219,6 +228,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name, } static void __fw_free_buf(struct kref *ref) + __releases(&fwc->lock) { struct firmware_buf *buf = to_fwbuf(ref); struct firmware_cache *fwc = buf->fwc; @@ -270,21 +280,21 @@ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); /* Don't inline this: 'struct kstat' is biggish */ -static noinline_for_stack long fw_file_size(struct file *file) +static noinline_for_stack int fw_file_size(struct file *file) { struct kstat st; if (vfs_getattr(&file->f_path, &st)) return -1; if (!S_ISREG(st.mode)) return -1; - if (st.size != (long)st.size) + if (st.size != (int)st.size) return -1; return st.size; } static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) { - long size; + int size; char *buf; int rc; @@ -820,7 +830,7 @@ static void firmware_class_timeout_work(struct work_struct *work) static struct firmware_priv * fw_create_instance(struct firmware *firmware, const char *fw_name, - struct device *device, bool uevent, bool nowait) + struct device *device, unsigned int opt_flags) { struct firmware_priv *fw_priv; struct device *f_dev; @@ -832,7 +842,7 @@ fw_create_instance(struct firmware *firmware, const char *fw_name, goto exit; } - fw_priv->nowait = nowait; + fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT); fw_priv->fw = firmware; INIT_DELAYED_WORK(&fw_priv->timeout_work, firmware_class_timeout_work); @@ -848,8 +858,8 @@ fw_create_instance(struct firmware *firmware, const char *fw_name, } /* load a firmware via user helper */ -static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, - long timeout) +static int _request_firmware_load(struct firmware_priv *fw_priv, + unsigned int opt_flags, long timeout) { int retval = 0; struct device *f_dev = &fw_priv->dev; @@ -885,7 +895,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, goto err_del_bin_attr; } - if (uevent) { + if (opt_flags & FW_OPT_UEVENT) { buf->need_uevent = true; dev_set_uevent_suppress(f_dev, false); dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id); @@ -911,16 +921,16 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, static int fw_load_from_user_helper(struct firmware *firmware, const char *name, struct device *device, - bool uevent, bool nowait, long timeout) + unsigned int opt_flags, long timeout) { struct firmware_priv *fw_priv; - fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); + fw_priv = fw_create_instance(firmware, name, device, opt_flags); if (IS_ERR(fw_priv)) return PTR_ERR(fw_priv); fw_priv->buf = firmware->priv; - return _request_firmware_load(fw_priv, uevent, timeout); + return _request_firmware_load(fw_priv, opt_flags, timeout); } #ifdef CONFIG_PM_SLEEP @@ -942,7 +952,7 @@ static void kill_requests_without_uevent(void) #else /* CONFIG_FW_LOADER_USER_HELPER */ static inline int fw_load_from_user_helper(struct firmware *firmware, const char *name, - struct device *device, bool uevent, bool nowait, + struct device *device, unsigned int opt_flags, long timeout) { return -ENOENT; @@ -1023,7 +1033,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, } static int assign_firmware_buf(struct firmware *fw, struct device *device, - bool skip_cache) + unsigned int opt_flags) { struct firmware_buf *buf = fw->priv; @@ -1040,7 +1050,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device, * device may has been deleted already, but the problem * should be fixed in devres or driver core. */ - if (device && !skip_cache) + /* don't cache firmware handled without uevent */ + if (device && (opt_flags & FW_OPT_UEVENT)) fw_add_devm_name(device, buf->fw_id); /* @@ -1061,7 +1072,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device, /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, - struct device *device, bool uevent, bool nowait) + struct device *device, unsigned int opt_flags) { struct firmware *fw; long timeout; @@ -1076,7 +1087,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, ret = 0; timeout = firmware_loading_timeout(); - if (nowait) { + if (opt_flags & FW_OPT_NOWAIT) { timeout = usermodehelper_read_lock_wait(timeout); if (!timeout) { dev_dbg(device, "firmware: %s loading timed out\n", @@ -1095,16 +1106,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name, ret = fw_get_filesystem_firmware(device, fw->priv); if (ret) { - dev_warn(device, "Direct firmware load failed with error %d\n", - ret); - dev_warn(device, "Falling back to user helper\n"); - ret = fw_load_from_user_helper(fw, name, device, - uevent, nowait, timeout); + if (opt_flags & FW_OPT_FALLBACK) { + dev_warn(device, + "Direct firmware load failed with error %d\n", + ret); + dev_warn(device, "Falling back to user helper\n"); + ret = fw_load_from_user_helper(fw, name, device, + opt_flags, timeout); + } } - /* don't cache firmware handled without uevent */ if (!ret) - ret = assign_firmware_buf(fw, device, !uevent); + ret = assign_firmware_buf(fw, device, opt_flags); usermodehelper_read_unlock(); @@ -1146,12 +1159,37 @@ request_firmware(const struct firmware **firmware_p, const char *name, /* Need to pin this module until return */ __module_get(THIS_MODULE); - ret = _request_firmware(firmware_p, name, device, true, false); + ret = _request_firmware(firmware_p, name, device, + FW_OPT_UEVENT | FW_OPT_FALLBACK); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL(request_firmware); +#ifdef CONFIG_FW_LOADER_USER_HELPER +/** + * request_firmware: - load firmware directly without usermode helper + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * This function works pretty much like request_firmware(), but this doesn't + * fall back to usermode helper even if the firmware couldn't be loaded + * directly from fs. Hence it's useful for loading optional firmwares, which + * aren't always present, without extra long timeouts of udev. + **/ +int request_firmware_direct(const struct firmware **firmware_p, + const char *name, struct device *device) +{ + int ret; + __module_get(THIS_MODULE); + ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL_GPL(request_firmware_direct); +#endif + /** * release_firmware: - release the resource associated with a firmware image * @fw: firmware resource to release @@ -1174,7 +1212,7 @@ struct firmware_work { struct device *device; void *context; void (*cont)(const struct firmware *fw, void *context); - bool uevent; + unsigned int opt_flags; }; static void request_firmware_work_func(struct work_struct *work) @@ -1185,7 +1223,7 @@ static void request_firmware_work_func(struct work_struct *work) fw_work = container_of(work, struct firmware_work, work); _request_firmware(&fw, fw_work->name, fw_work->device, - fw_work->uevent, true); + fw_work->opt_flags); fw_work->cont(fw, fw_work->context); put_device(fw_work->device); /* taken in request_firmware_nowait() */ @@ -1233,7 +1271,8 @@ request_firmware_nowait( fw_work->device = device; fw_work->context = context; fw_work->cont = cont; - fw_work->uevent = uevent; + fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | + (uevent ? FW_OPT_UEVENT : 0); if (!try_module_get(module)) { kfree(fw_work); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index f370fc13aea5d7..83a598ebb65a4a 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1,4 +1,5 @@ #include + #include #include #include @@ -65,7 +66,7 @@ enum { NULL_Q_MQ = 2, }; -static int submit_queues = 1; +static int submit_queues; module_param(submit_queues, int, S_IRUGO); MODULE_PARM_DESC(submit_queues, "Number of submission queues"); @@ -101,9 +102,9 @@ static int hw_queue_depth = 64; module_param(hw_queue_depth, int, S_IRUGO); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); -static bool use_per_node_hctx = true; +static bool use_per_node_hctx = false; module_param(use_per_node_hctx, bool, S_IRUGO); -MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); +MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); static void put_tag(struct nullb_queue *nq, unsigned int tag) { @@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) { - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, - hctx_index); + int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); + int tip = (reg->nr_hw_queues % nr_online_nodes); + int node = 0, i, n; + + /* + * Split submit queues evenly wrt to the number of nodes. If uneven, + * fill the first buckets with one extra, until the rest is filled with + * no extra. + */ + for (i = 0, n = 1; i < hctx_index; i++, n++) { + if (n % b_size == 0) { + n = 0; + node++; + + tip--; + if (!tip) + b_size = reg->nr_hw_queues / nr_online_nodes; + } + } + + /* + * A node might not be online, therefore map the relative node id to the + * real node id. + */ + for_each_online_node(n) { + if (!node) + break; + node--; + } + + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); } static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) @@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) kfree(hctx); } +static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) +{ + BUG_ON(!nullb); + BUG_ON(!nq); + + init_waitqueue_head(&nq->wait); + nq->queue_depth = nullb->queue_depth; +} + static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int index) { struct nullb *nullb = data; struct nullb_queue *nq = &nullb->queues[index]; - init_waitqueue_head(&nq->wait); - nq->queue_depth = nullb->queue_depth; - nullb->nr_queues++; hctx->driver_data = nq; + null_init_queue(nullb, nq); + nullb->nr_queues++; return 0; } @@ -387,10 +425,7 @@ static void null_del_dev(struct nullb *nullb) list_del_init(&nullb->list); del_gendisk(nullb->disk); - if (queue_mode == NULL_Q_MQ) - blk_mq_free_queue(nullb->q); - else - blk_cleanup_queue(nullb->q); + blk_cleanup_queue(nullb->q); put_disk(nullb->disk); kfree(nullb); } @@ -417,13 +452,13 @@ static int setup_commands(struct nullb_queue *nq) nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); if (!nq->cmds) - return 1; + return -ENOMEM; tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); if (!nq->tag_map) { kfree(nq->cmds); - return 1; + return -ENOMEM; } for (i = 0; i < nq->queue_depth; i++) { @@ -454,33 +489,37 @@ static void cleanup_queues(struct nullb *nullb) static int setup_queues(struct nullb *nullb) { - struct nullb_queue *nq; - int i; - - nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); + nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), + GFP_KERNEL); if (!nullb->queues) - return 1; + return -ENOMEM; nullb->nr_queues = 0; nullb->queue_depth = hw_queue_depth; - if (queue_mode == NULL_Q_MQ) - return 0; + return 0; +} + +static int init_driver_queues(struct nullb *nullb) +{ + struct nullb_queue *nq; + int i, ret = 0; for (i = 0; i < submit_queues; i++) { nq = &nullb->queues[i]; - init_waitqueue_head(&nq->wait); - nq->queue_depth = hw_queue_depth; - if (setup_commands(nq)) - break; + + null_init_queue(nullb, nq); + + ret = setup_commands(nq); + if (ret) + goto err_queue; nullb->nr_queues++; } - if (i == submit_queues) - return 0; - + return 0; +err_queue: cleanup_queues(nullb); - return 1; + return ret; } static int null_add_dev(void) @@ -518,11 +557,13 @@ static int null_add_dev(void) } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); blk_queue_make_request(nullb->q, null_queue_bio); + init_driver_queues(nullb); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); blk_queue_prep_rq(nullb->q, null_rq_prep_fn); if (nullb->q) blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + init_driver_queues(nullb); } if (!nullb->q) @@ -534,10 +575,7 @@ static int null_add_dev(void) disk = nullb->disk = alloc_disk_node(1, home_node); if (!disk) { queue_fail: - if (queue_mode == NULL_Q_MQ) - blk_mq_free_queue(nullb->q); - else - blk_cleanup_queue(nullb->q); + blk_cleanup_queue(nullb->q); cleanup_queues(nullb); err: kfree(nullb); @@ -579,7 +617,13 @@ static int __init null_init(void) } #endif - if (submit_queues > nr_cpu_ids) + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { + if (submit_queues < nr_online_nodes) { + pr_warn("null_blk: submit_queues param is set to %u.", + nr_online_nodes); + submit_queues = nr_online_nodes; + } + } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) submit_queues = 1; diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 9199c93be926ed..eb6e1e0e8db25f 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) } } -const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) +static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) { switch (state) { case SKD_MSG_STATE_IDLE: @@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) } } -const char *skd_skreq_state_to_str(enum skd_req_state state) +static const char *skd_skreq_state_to_str(enum skd_req_state state) { switch (state) { case SKD_REQ_STATE_IDLE: diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 5a95baf4b104e4..27de5046708a23 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -43,9 +43,6 @@ #include -extern int m68k_realnum_memory; -extern struct mem_info m68k_memory[NUM_MEMINFO]; - #define Z2MINOR_COMBINED (0) #define Z2MINOR_Z2ONLY (1) #define Z2MINOR_CHIPONLY (2) @@ -116,8 +113,8 @@ get_z2ram( void ) if ( test_bit( i, zorro_unused_z2ram ) ) { z2_count++; - z2ram_map[ z2ram_size++ ] = - ZTWO_VADDR( Z2RAM_START ) + ( i << Z2RAM_CHUNKSHIFT ); + z2ram_map[z2ram_size++] = (unsigned long)ZTWO_VADDR(Z2RAM_START) + + (i << Z2RAM_CHUNKSHIFT); clear_bit( i, zorro_unused_z2ram ); } } diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 6bfc1bb318f639..dceb85f8d9a825 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0CF3, 0xE005) }, { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0930, 0x0220) }, { USB_DEVICE(0x0489, 0xe057) }, { USB_DEVICE(0x13d3, 0x3393) }, { USB_DEVICE(0x0489, 0xe04e) }, @@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c0ff34f2d2df57..3980fd18f6eaee 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index d79d692d05b8f3..896413b59aaea9 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -735,7 +735,7 @@ static struct pci_device_id agp_amd64_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); -static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = { +static const struct pci_device_id agp_amd64_pci_promisc_table[] = { { PCI_DEVICE_CLASS(0, 0) }, { } }; diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index e6939e13e3388e..e210f858d3cbf8 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c @@ -1,12 +1,11 @@ /* * i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops. - * See http://www.debian.org/~dz/i8k/ for more information - * and for latest version of this driver. * * Copyright (C) 2001 Massimo Dal Zotto * * Hwmon integration: * Copyright (C) 2011 Jean Delvare + * Copyright (C) 2013 Guenter Roeck * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -19,6 +18,8 @@ * General Public License for more details. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -29,13 +30,12 @@ #include #include #include -#include -#include +#include +#include +#include #include -#define I8K_VERSION "1.14 21/02/2005" - #define I8K_SMM_FN_STATUS 0x0025 #define I8K_SMM_POWER_STATUS 0x0069 #define I8K_SMM_SET_FAN 0x01a3 @@ -44,7 +44,6 @@ #define I8K_SMM_GET_TEMP 0x10a3 #define I8K_SMM_GET_DELL_SIG1 0xfea3 #define I8K_SMM_GET_DELL_SIG2 0xffa3 -#define I8K_SMM_BIOS_VERSION 0x00a6 #define I8K_FAN_MULT 30 #define I8K_MAX_TEMP 127 @@ -64,6 +63,15 @@ static DEFINE_MUTEX(i8k_mutex); static char bios_version[4]; static struct device *i8k_hwmon_dev; +static u32 i8k_hwmon_flags; +static int i8k_fan_mult; + +#define I8K_HWMON_HAVE_TEMP1 (1 << 0) +#define I8K_HWMON_HAVE_TEMP2 (1 << 1) +#define I8K_HWMON_HAVE_TEMP3 (1 << 2) +#define I8K_HWMON_HAVE_TEMP4 (1 << 3) +#define I8K_HWMON_HAVE_FAN1 (1 << 4) +#define I8K_HWMON_HAVE_FAN2 (1 << 5) MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops"); @@ -103,11 +111,11 @@ static const struct file_operations i8k_fops = { struct smm_regs { unsigned int eax; - unsigned int ebx __attribute__ ((packed)); - unsigned int ecx __attribute__ ((packed)); - unsigned int edx __attribute__ ((packed)); - unsigned int esi __attribute__ ((packed)); - unsigned int edi __attribute__ ((packed)); + unsigned int ebx __packed; + unsigned int ecx __packed; + unsigned int edx __packed; + unsigned int esi __packed; + unsigned int edi __packed; }; static inline const char *i8k_get_dmi_data(int field) @@ -124,6 +132,17 @@ static int i8k_smm(struct smm_regs *regs) { int rc; int eax = regs->eax; + cpumask_var_t old_mask; + + /* SMM requires CPU 0 */ + if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_copy(old_mask, ¤t->cpus_allowed); + set_cpus_allowed_ptr(current, cpumask_of(0)); + if (smp_processor_id() != 0) { + rc = -EBUSY; + goto out; + } #if defined(CONFIG_X86_64) asm volatile("pushq %%rax\n\t" @@ -148,7 +167,7 @@ static int i8k_smm(struct smm_regs *regs) "pushfq\n\t" "popq %%rax\n\t" "andl $1,%%eax\n" - :"=a"(rc) + : "=a"(rc) : "a"(regs) : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); #else @@ -174,25 +193,17 @@ static int i8k_smm(struct smm_regs *regs) "lahf\n\t" "shrl $8,%%eax\n\t" "andl $1,%%eax\n" - :"=a"(rc) + : "=a"(rc) : "a"(regs) : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); #endif if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax) - return -EINVAL; + rc = -EINVAL; - return 0; -} - -/* - * Read the bios version. Return the version as an integer corresponding - * to the ascii value, for example "A17" is returned as 0x00413137. - */ -static int i8k_get_bios_version(void) -{ - struct smm_regs regs = { .eax = I8K_SMM_BIOS_VERSION, }; - - return i8k_smm(®s) ? : regs.eax; +out: + set_cpus_allowed_ptr(current, old_mask); + free_cpumask_var(old_mask); + return rc; } /* @@ -203,7 +214,8 @@ static int i8k_get_fn_status(void) struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, }; int rc; - if ((rc = i8k_smm(®s)) < 0) + rc = i8k_smm(®s); + if (rc < 0) return rc; switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) { @@ -226,7 +238,8 @@ static int i8k_get_power_status(void) struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, }; int rc; - if ((rc = i8k_smm(®s)) < 0) + rc = i8k_smm(®s); + if (rc < 0) return rc; return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY; @@ -251,7 +264,7 @@ static int i8k_get_fan_speed(int fan) struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, }; regs.ebx = fan & 0xff; - return i8k_smm(®s) ? : (regs.eax & 0xffff) * fan_mult; + return i8k_smm(®s) ? : (regs.eax & 0xffff) * i8k_fan_mult; } /* @@ -277,10 +290,11 @@ static int i8k_get_temp(int sensor) int temp; #ifdef I8K_TEMPERATURE_BUG - static int prev; + static int prev[4]; #endif regs.ebx = sensor & 0xff; - if ((rc = i8k_smm(®s)) < 0) + rc = i8k_smm(®s); + if (rc < 0) return rc; temp = regs.eax & 0xff; @@ -294,10 +308,10 @@ static int i8k_get_temp(int sensor) # 1003655139 00000054 00005c52 */ if (temp > I8K_MAX_TEMP) { - temp = prev; - prev = I8K_MAX_TEMP; + temp = prev[sensor]; + prev[sensor] = I8K_MAX_TEMP; } else { - prev = temp; + prev[sensor] = temp; } #endif @@ -309,7 +323,8 @@ static int i8k_get_dell_signature(int req_fn) struct smm_regs regs = { .eax = req_fn, }; int rc; - if ((rc = i8k_smm(®s)) < 0) + rc = i8k_smm(®s); + if (rc < 0) return rc; return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1; @@ -328,12 +343,14 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) switch (cmd) { case I8K_BIOS_VERSION: - val = i8k_get_bios_version(); + val = (bios_version[0] << 16) | + (bios_version[1] << 8) | bios_version[2]; break; case I8K_MACHINE_ID: memset(buff, 0, 16); - strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), sizeof(buff)); + strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), + sizeof(buff)); break; case I8K_FN_STATUS: @@ -470,12 +487,13 @@ static ssize_t i8k_hwmon_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { - int cpu_temp; + int index = to_sensor_dev_attr(devattr)->index; + int temp; - cpu_temp = i8k_get_temp(0); - if (cpu_temp < 0) - return cpu_temp; - return sprintf(buf, "%d\n", cpu_temp * 1000); + temp = i8k_get_temp(index); + if (temp < 0) + return temp; + return sprintf(buf, "%d\n", temp * 1000); } static ssize_t i8k_hwmon_show_fan(struct device *dev, @@ -491,12 +509,44 @@ static ssize_t i8k_hwmon_show_fan(struct device *dev, return sprintf(buf, "%d\n", fan_speed); } +static ssize_t i8k_hwmon_show_pwm(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + int index = to_sensor_dev_attr(devattr)->index; + int status; + + status = i8k_get_fan_status(index); + if (status < 0) + return -EIO; + return sprintf(buf, "%d\n", clamp_val(status * 128, 0, 255)); +} + +static ssize_t i8k_hwmon_set_pwm(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int index = to_sensor_dev_attr(attr)->index; + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + val = clamp_val(DIV_ROUND_CLOSEST(val, 128), 0, 2); + + mutex_lock(&i8k_mutex); + err = i8k_set_fan(index, val); + mutex_unlock(&i8k_mutex); + + return err < 0 ? -EIO : count; +} + static ssize_t i8k_hwmon_show_label(struct device *dev, struct device_attribute *devattr, char *buf) { - static const char *labels[4] = { - "i8k", + static const char *labels[3] = { "CPU", "Left Fan", "Right Fan", @@ -506,108 +556,108 @@ static ssize_t i8k_hwmon_show_label(struct device *dev, return sprintf(buf, "%s\n", labels[index]); } -static DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL); +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 3); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL, I8K_FAN_LEFT); +static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm, + i8k_hwmon_set_pwm, I8K_FAN_LEFT); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL, I8K_FAN_RIGHT); -static SENSOR_DEVICE_ATTR(name, S_IRUGO, i8k_hwmon_show_label, NULL, 0); -static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1); -static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2); -static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 3); +static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm, + i8k_hwmon_set_pwm, I8K_FAN_RIGHT); +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 0); +static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1); +static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2); + +static struct attribute *i8k_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */ + &sensor_dev_attr_temp1_label.dev_attr.attr, /* 1 */ + &sensor_dev_attr_temp2_input.dev_attr.attr, /* 2 */ + &sensor_dev_attr_temp3_input.dev_attr.attr, /* 3 */ + &sensor_dev_attr_temp4_input.dev_attr.attr, /* 4 */ + &sensor_dev_attr_fan1_input.dev_attr.attr, /* 5 */ + &sensor_dev_attr_pwm1.dev_attr.attr, /* 6 */ + &sensor_dev_attr_fan1_label.dev_attr.attr, /* 7 */ + &sensor_dev_attr_fan2_input.dev_attr.attr, /* 8 */ + &sensor_dev_attr_pwm2.dev_attr.attr, /* 9 */ + &sensor_dev_attr_fan2_label.dev_attr.attr, /* 10 */ + NULL +}; -static void i8k_hwmon_remove_files(struct device *dev) +static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, + int index) { - device_remove_file(dev, &dev_attr_temp1_input); - device_remove_file(dev, &sensor_dev_attr_fan1_input.dev_attr); - device_remove_file(dev, &sensor_dev_attr_fan2_input.dev_attr); - device_remove_file(dev, &sensor_dev_attr_temp1_label.dev_attr); - device_remove_file(dev, &sensor_dev_attr_fan1_label.dev_attr); - device_remove_file(dev, &sensor_dev_attr_fan2_label.dev_attr); - device_remove_file(dev, &sensor_dev_attr_name.dev_attr); + if ((index == 0 || index == 1) && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) + return 0; + if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2)) + return 0; + if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3)) + return 0; + if (index == 4 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4)) + return 0; + if (index >= 5 && index <= 7 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1)) + return 0; + if (index >= 8 && index <= 10 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2)) + return 0; + + return attr->mode; } +static const struct attribute_group i8k_group = { + .attrs = i8k_attrs, + .is_visible = i8k_is_visible, +}; +__ATTRIBUTE_GROUPS(i8k); + static int __init i8k_init_hwmon(void) { int err; - i8k_hwmon_dev = hwmon_device_register(NULL); - if (IS_ERR(i8k_hwmon_dev)) { - err = PTR_ERR(i8k_hwmon_dev); - i8k_hwmon_dev = NULL; - printk(KERN_ERR "i8k: hwmon registration failed (%d)\n", err); - return err; - } - - /* Required name attribute */ - err = device_create_file(i8k_hwmon_dev, - &sensor_dev_attr_name.dev_attr); - if (err) - goto exit_unregister; + i8k_hwmon_flags = 0; /* CPU temperature attributes, if temperature reading is OK */ err = i8k_get_temp(0); - if (err < 0) { - dev_dbg(i8k_hwmon_dev, - "Not creating temperature attributes (%d)\n", err); - } else { - err = device_create_file(i8k_hwmon_dev, &dev_attr_temp1_input); - if (err) - goto exit_remove_files; - err = device_create_file(i8k_hwmon_dev, - &sensor_dev_attr_temp1_label.dev_attr); - if (err) - goto exit_remove_files; - } + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1; + /* check for additional temperature sensors */ + err = i8k_get_temp(1); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2; + err = i8k_get_temp(2); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3; + err = i8k_get_temp(3); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; /* Left fan attributes, if left fan is present */ err = i8k_get_fan_status(I8K_FAN_LEFT); - if (err < 0) { - dev_dbg(i8k_hwmon_dev, - "Not creating %s fan attributes (%d)\n", "left", err); - } else { - err = device_create_file(i8k_hwmon_dev, - &sensor_dev_attr_fan1_input.dev_attr); - if (err) - goto exit_remove_files; - err = device_create_file(i8k_hwmon_dev, - &sensor_dev_attr_fan1_label.dev_attr); - if (err) - goto exit_remove_files; - } + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; /* Right fan attributes, if right fan is present */ err = i8k_get_fan_status(I8K_FAN_RIGHT); - if (err < 0) { - dev_dbg(i8k_hwmon_dev, - "Not creating %s fan attributes (%d)\n", "right", err); - } else { - err = device_create_file(i8k_hwmon_dev, - &sensor_dev_attr_fan2_input.dev_attr); - if (err) - goto exit_remove_files; - err = device_create_file(i8k_hwmon_dev, - &sensor_dev_attr_fan2_label.dev_attr); - if (err) - goto exit_remove_files; - } + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; + i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "i8k", NULL, + i8k_groups); + if (IS_ERR(i8k_hwmon_dev)) { + err = PTR_ERR(i8k_hwmon_dev); + i8k_hwmon_dev = NULL; + pr_err("hwmon registration failed (%d)\n", err); + return err; + } return 0; - - exit_remove_files: - i8k_hwmon_remove_files(i8k_hwmon_dev); - exit_unregister: - hwmon_device_unregister(i8k_hwmon_dev); - return err; } -static void __exit i8k_exit_hwmon(void) -{ - i8k_hwmon_remove_files(i8k_hwmon_dev); - hwmon_device_unregister(i8k_hwmon_dev); -} - -static struct dmi_system_id __initdata i8k_dmi_table[] = { +static struct dmi_system_id i8k_dmi_table[] __initdata = { { .ident = "Dell Inspiron", .matches = { @@ -671,7 +721,23 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"), }, }, - { } + { + .ident = "Dell Studio", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio"), + }, + .driver_data = (void *)1, /* fan multiplier override */ + }, + { + .ident = "Dell XPS M140", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"), + }, + .driver_data = (void *)1, /* fan multiplier override */ + }, + { } }; /* @@ -679,8 +745,7 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = { */ static int __init i8k_probe(void) { - char buff[4]; - int version; + const struct dmi_system_id *id; /* * Get DMI information @@ -689,49 +754,30 @@ static int __init i8k_probe(void) if (!ignore_dmi && !force) return -ENODEV; - printk(KERN_INFO "i8k: not running on a supported Dell system.\n"); - printk(KERN_INFO "i8k: vendor=%s, model=%s, version=%s\n", + pr_info("not running on a supported Dell system.\n"); + pr_info("vendor=%s, model=%s, version=%s\n", i8k_get_dmi_data(DMI_SYS_VENDOR), i8k_get_dmi_data(DMI_PRODUCT_NAME), i8k_get_dmi_data(DMI_BIOS_VERSION)); } - strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), sizeof(bios_version)); + strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), + sizeof(bios_version)); /* * Get SMM Dell signature */ if (i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG1) && i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG2)) { - printk(KERN_ERR "i8k: unable to get SMM Dell signature\n"); + pr_err("unable to get SMM Dell signature\n"); if (!force) return -ENODEV; } - /* - * Get SMM BIOS version. - */ - version = i8k_get_bios_version(); - if (version <= 0) { - printk(KERN_WARNING "i8k: unable to get SMM BIOS version\n"); - } else { - buff[0] = (version >> 16) & 0xff; - buff[1] = (version >> 8) & 0xff; - buff[2] = (version) & 0xff; - buff[3] = '\0'; - /* - * If DMI BIOS version is unknown use SMM BIOS version. - */ - if (!dmi_get_system_info(DMI_BIOS_VERSION)) - strlcpy(bios_version, buff, sizeof(bios_version)); - - /* - * Check if the two versions match. - */ - if (strncmp(buff, bios_version, sizeof(bios_version)) != 0) - printk(KERN_WARNING "i8k: BIOS version mismatch: %s != %s\n", - buff, bios_version); - } + i8k_fan_mult = fan_mult; + id = dmi_first_match(i8k_dmi_table); + if (id && fan_mult == I8K_FAN_MULT && id->driver_data) + i8k_fan_mult = (unsigned long)id->driver_data; return 0; } @@ -754,10 +800,6 @@ static int __init i8k_init(void) if (err) goto exit_remove_proc; - printk(KERN_INFO - "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n", - I8K_VERSION); - return 0; exit_remove_proc: @@ -767,7 +809,7 @@ static int __init i8k_init(void) static void __exit i8k_exit(void) { - i8k_exit_hwmon(); + hwmon_device_unregister(i8k_hwmon_dev); remove_proc_entry("i8k", NULL); } diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 0913d79424d3a3..c4094c4e22c11d 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -587,6 +587,8 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd, return -ENODEV; switch ( cmd ) { case LPTIME: + if (arg > UINT_MAX / HZ) + return -EINVAL; LP_TIME(minor) = arg * HZ/100; break; case LPCHAR: diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index 1fd00dc0689721..76c490fa051103 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c @@ -168,7 +168,10 @@ static irqreturn_t button_handler (int irq, void *dev_id) static int button_read (struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - interruptible_sleep_on (&button_wait_queue); + DEFINE_WAIT(wait); + prepare_to_wait(&button_wait_queue, &wait, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&button_wait_queue, &wait); return (copy_to_user (buffer, &button_output_buffer, bcount)) ? -EFAULT : bcount; } diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index d39cca659a3f48..8320abd1ef1473 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -2511,8 +2511,8 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp) /* If port is closing, signal caller to try again */ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){ - if (port->flags & ASYNC_CLOSING) - interruptible_sleep_on(&port->close_wait); + wait_event_interruptible_tty(tty, port->close_wait, + !(port->flags & ASYNC_CLOSING)); retval = ((port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); goto cleanup; diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index 8e562dc656016c..e1f3337a0cf9f7 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM"; static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, void **return_value) { - acpi_status status; + acpi_status status = AE_OK; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - if (strstr(buffer.pointer, context) != NULL) { - *return_value = handle; + + if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) { + if (strstr(buffer.pointer, context) != NULL) { + *return_value = handle; + status = AE_CTRL_TERMINATE; + } kfree(buffer.pointer); - return AE_CTRL_TERMINATE; } - return AE_OK; + + return status; } static inline void ppi_assign_params(union acpi_object params[4], diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index d5d2e4a985aa86..daea84c4174321 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -216,4 +216,4 @@ static int __init ttyprintk_init(void) ttyprintk_driver = NULL; return ret; } -module_init(ttyprintk_init); +device_initcall(ttyprintk_init); diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 8d3009e44fba40..5543b7df8e16c7 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table, return 0; } -static unsigned int _get_val(struct clk_divider *divider, u8 div) +static unsigned int _get_val(struct clk_divider *divider, unsigned int div) { if (divider->flags & CLK_DIVIDER_ONE_BASED) return div; diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 39b40aaede2b36..68e515d093d864 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data; #define ASS_CLK_DIV 0x4 #define ASS_CLK_GATE 0x8 +/* list of all parent clock list */ +static const char *mout_audss_p[] = { "fin_pll", "fout_epll" }; +static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" }; + +#ifdef CONFIG_PM_SLEEP static unsigned long reg_save[][2] = { {ASS_CLK_SRC, 0}, {ASS_CLK_DIV, 0}, {ASS_CLK_GATE, 0}, }; -/* list of all parent clock list */ -static const char *mout_audss_p[] = { "fin_pll", "fout_epll" }; -static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" }; - -#ifdef CONFIG_PM_SLEEP static int exynos_audss_clk_suspend(void) { int i; diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index ad5ff50c5f281a..1a7c1b929c690b 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -39,7 +39,7 @@ #define SRC_TOP1 0xc214 #define SRC_CAM 0xc220 #define SRC_TV 0xc224 -#define SRC_MFC 0xcc28 +#define SRC_MFC 0xc228 #define SRC_G3D 0xc22c #define E4210_SRC_IMAGE 0xc230 #define SRC_LCD0 0xc234 diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index adf32343c9f9c4..e52359cf9b6fe7 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -25,6 +25,7 @@ #define MPLL_LOCK 0x4000 #define MPLL_CON0 0x4100 #define SRC_CORE1 0x4204 +#define GATE_IP_ACP 0x8800 #define CPLL_LOCK 0x10020 #define EPLL_LOCK 0x10030 #define VPLL_LOCK 0x10040 @@ -75,7 +76,6 @@ #define SRC_CDREX 0x20200 #define PLL_DIV2_SEL 0x20a24 #define GATE_IP_DISP1 0x10928 -#define GATE_IP_ACP 0x10000 /* list of PLLs to be registered */ enum exynos5250_plls { @@ -120,7 +120,8 @@ enum exynos5250_clks { spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, - wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, + wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0, + smmu_mdma0, /* mux clocks */ mout_hdmi = 1024, @@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), - GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0), - GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0), + GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0), + GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0), GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), @@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), - GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), + GATE(sysreg, "sysreg", "aclk66", + GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0), GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), @@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), + GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0), + GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0), }; static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 634c4d6dd45a48..cd6950fd8caf06 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -37,6 +37,10 @@ config SUN4I_TIMER select CLKSRC_MMIO bool +config SUN5I_HSTIMER + select CLKSRC_MMIO + bool + config VT8500_TIMER bool diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 33621efb914896..358358d87b6dc4 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o obj-$(CONFIG_ARCH_MXS) += mxs_timer.o obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o +obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index c639b1a9e99686..0fc31d029e5222 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -202,7 +202,7 @@ static struct clocksource gt_clocksource = { }; #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK -static u32 notrace gt_sched_clock_read(void) +static u64 notrace gt_sched_clock_read(void) { return gt_counter_read(); } @@ -217,7 +217,7 @@ static void __init gt_clocksource_init(void) writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL); #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK - setup_sched_clock(gt_sched_clock_read, 32, gt_clk_rate); + sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate); #endif clocksource_register_hz(>_clocksource, gt_clk_rate); } diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 0d7d8c3ed6b222..5176e761166b27 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -98,12 +98,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) return; } -static const struct of_device_id bcm_timer_ids[] __initconst = { - {.compatible = "brcm,kona-timer"}, - {.compatible = "bcm,kona-timer"}, /* deprecated name */ - {}, -}; - static void __init kona_timers_init(struct device_node *node) { u32 freq; diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index b2bb3a4bc20542..63f176de0d0228 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -67,11 +67,13 @@ * struct ttc_timer - This definition defines local timer structure * * @base_addr: Base address of timer + * @freq: Timer input clock frequency * @clk: Associated clock source * @clk_rate_change_nb Notifier block for clock rate changes */ struct ttc_timer { void __iomem *base_addr; + unsigned long freq; struct clk *clk; struct notifier_block clk_rate_change_nb; }; @@ -158,7 +160,7 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs) TTC_COUNT_VAL_OFFSET); } -static u32 notrace ttc_sched_clock_read(void) +static u64 notrace ttc_sched_clock_read(void) { return __raw_readl(ttc_sched_clock_val_reg); } @@ -196,9 +198,8 @@ static void ttc_set_mode(enum clock_event_mode mode, switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - ttc_set_interval(timer, - DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk), - PRESCALE * HZ)); + ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq, + PRESCALE * HZ)); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: @@ -273,6 +274,8 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) return; } + ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk); + ttccs->ttc.clk_rate_change_nb.notifier_call = ttc_rate_change_clocksource_cb; ttccs->ttc.clk_rate_change_nb.next = NULL; @@ -298,16 +301,14 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) __raw_writel(CNT_CNTRL_RESET, ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); - err = clocksource_register_hz(&ttccs->cs, - clk_get_rate(ttccs->ttc.clk) / PRESCALE); + err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); if (WARN_ON(err)) { kfree(ttccs); return; } ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; - setup_sched_clock(ttc_sched_clock_read, 16, - clk_get_rate(ttccs->ttc.clk) / PRESCALE); + sched_clock_register(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE); } static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, @@ -334,6 +335,9 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, ndata->new_rate / PRESCALE); local_irq_restore(flags); + /* update cached frequency */ + ttc->freq = ndata->new_rate; + /* fall through */ } case PRE_RATE_CHANGE: @@ -367,6 +371,7 @@ static void __init ttc_setup_clockevent(struct clk *clk, if (clk_notifier_register(ttcce->ttc.clk, &ttcce->ttc.clk_rate_change_nb)) pr_warn("Unable to register clock notifier.\n"); + ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); ttcce->ttc.base_addr = base; ttcce->ce.name = "ttc_clockevent"; @@ -388,15 +393,14 @@ static void __init ttc_setup_clockevent(struct clk *clk, __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET); err = request_irq(irq, ttc_clock_event_interrupt, - IRQF_DISABLED | IRQF_TIMER, - ttcce->ce.name, ttcce); + IRQF_TIMER, ttcce->ce.name, ttcce); if (WARN_ON(err)) { kfree(ttcce); return; } clockevents_config_and_register(&ttcce->ce, - clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe); + ttcce->ttc.freq / PRESCALE, 1, 0xfffe); } /** diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index b9ddd9e3a2f599..ae2e4278c42abd 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c @@ -28,6 +28,7 @@ void __init clocksource_of_init(void) struct device_node *np; const struct of_device_id *match; clocksource_of_init_fn init_func; + unsigned clocksources = 0; for_each_matching_node_and_match(np, __clksrc_of_table, &match) { if (!of_device_is_available(np)) @@ -35,5 +36,8 @@ void __init clocksource_of_init(void) init_func = match->data; init_func(np); + clocksources++; } + if (!clocksources) + pr_crit("%s: no matching clocksources found\n", __func__); } diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index ea210482dd2099..db21052908984c 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c @@ -131,7 +131,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id) static struct irqaction mfgptirq = { .handler = mfgpt_tick, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED, + .flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED, .name = DRV_NAME, }; diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index e54ca1062d8e6a..f3656a6b0382c9 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -243,8 +243,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, dw_ced->irqaction.dev_id = &dw_ced->ced; dw_ced->irqaction.irq = irq; dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | - IRQF_NOBALANCING | - IRQF_DISABLED; + IRQF_NOBALANCING; dw_ced->eoi = apbt_eoi; err = setup_irq(irq, &dw_ced->irqaction); diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c index ed7b73b508e096..152a3f3875eeab 100644 --- a/drivers/clocksource/nomadik-mtu.c +++ b/drivers/clocksource/nomadik-mtu.c @@ -187,7 +187,7 @@ static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id) static struct irqaction nmdk_timer_irq = { .name = "Nomadik Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER, + .flags = IRQF_TIMER, .handler = nmdk_timer_interrupt, .dev_id = &nmdk_clkevt, }; diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index 85082e8d305298..5645cfc90c415e 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c @@ -264,7 +264,7 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) static struct irqaction samsung_clock_event_irq = { .name = "samsung_time_irq", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_TIMER | IRQF_IRQPOLL, .handler = samsung_clock_event_isr, .dev_id = &time_event_device, }; diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 0965e9848b3d18..0b1836a6c53975 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -634,12 +634,18 @@ static int sh_cmt_clock_event_next(unsigned long delta, static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) { - pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev); + struct sh_cmt_priv *p = ced_to_sh_cmt(ced); + + pm_genpd_syscore_poweroff(&p->pdev->dev); + clk_unprepare(p->clk); } static void sh_cmt_clock_event_resume(struct clock_event_device *ced) { - pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev); + struct sh_cmt_priv *p = ced_to_sh_cmt(ced); + + clk_prepare(p->clk); + pm_genpd_syscore_poweron(&p->pdev->dev); } static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, @@ -726,8 +732,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) p->irqaction.name = dev_name(&p->pdev->dev); p->irqaction.handler = sh_cmt_interrupt; p->irqaction.dev_id = p; - p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_NOBALANCING; + p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "cmt_fck"); @@ -737,6 +742,10 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) goto err2; } + ret = clk_prepare(p->clk); + if (ret < 0) + goto err3; + if (res2 && (resource_size(res2) == 4)) { /* assume both CMSTR and CMCSR to be 32-bit */ p->read_control = sh_cmt_read32; @@ -773,19 +782,21 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) cfg->clocksource_rating); if (ret) { dev_err(&p->pdev->dev, "registration failed\n"); - goto err3; + goto err4; } p->cs_enabled = false; ret = setup_irq(irq, &p->irqaction); if (ret) { dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); - goto err3; + goto err4; } platform_set_drvdata(pdev, p); return 0; +err4: + clk_unprepare(p->clk); err3: clk_put(p->clk); err2: diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 3cf12834681e83..e30d76e0a6fae9 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -302,8 +302,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) p->irqaction.handler = sh_mtu2_interrupt; p->irqaction.dev_id = p; p->irqaction.irq = irq; - p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_NOBALANCING; + p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); @@ -358,7 +357,6 @@ static int sh_mtu2_probe(struct platform_device *pdev) ret = sh_mtu2_setup(p, pdev); if (ret) { kfree(p); - platform_set_drvdata(pdev, NULL); pm_runtime_idle(&pdev->dev); return ret; } diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 63557cda0a7d59..ecd7b60bfdfa9d 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -462,8 +462,7 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) p->irqaction.handler = sh_tmu_interrupt; p->irqaction.dev_id = p; p->irqaction.irq = irq; - p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_NOBALANCING; + p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "tmu_fck"); @@ -523,7 +522,6 @@ static int sh_tmu_probe(struct platform_device *pdev) ret = sh_tmu_setup(p, pdev); if (ret) { kfree(p); - platform_set_drvdata(pdev, NULL); pm_runtime_idle(&pdev->dev); return ret; } diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index a4f6119aafd814..bf497afba9ad1e 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -114,7 +114,7 @@ static int sun4i_clkevt_next_event(unsigned long evt, static struct clock_event_device sun4i_clockevent = { .name = "sun4i_tick", - .rating = 300, + .rating = 350, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = sun4i_clkevt_mode, .set_next_event = sun4i_clkevt_next_event, @@ -138,7 +138,7 @@ static struct irqaction sun4i_timer_irq = { .dev_id = &sun4i_clockevent, }; -static u32 sun4i_timer_sched_read(void) +static u64 notrace sun4i_timer_sched_read(void) { return ~readl(timer_base + TIMER_CNTVAL_REG(1)); } @@ -170,9 +170,9 @@ static void __init sun4i_timer_init(struct device_node *node) TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_base + TIMER_CTL_REG(1)); - setup_sched_clock(sun4i_timer_sched_read, 32, rate); + sched_clock_register(sun4i_timer_sched_read, 32, rate); clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, - rate, 300, 32, clocksource_mmio_readl_down); + rate, 350, 32, clocksource_mmio_readl_down); ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); @@ -190,7 +190,8 @@ static void __init sun4i_timer_init(struct device_node *node) val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); - sun4i_clockevent.cpumask = cpumask_of(0); + sun4i_clockevent.cpumask = cpu_possible_mask; + sun4i_clockevent.irq = irq; clockevents_config_and_register(&sun4i_clockevent, rate, TIMER_SYNC_TICKS, 0xffffffff); diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index 642849256d82ec..d1869f02051cbe 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c @@ -149,7 +149,7 @@ static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id) static struct irqaction tegra_timer_irq = { .name = "timer0", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH, + .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH, .handler = tegra_timer_interrupt, .dev_id = &tegra_clockevent, }; diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 4e7f6802e840ba..ee8691b89944e3 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -76,6 +76,7 @@ static void __iomem *timer_base, *local_base; static unsigned int timer_clk; static bool timer25Mhz = true; +static u32 enable_mask; /* * Number of timer ticks per jiffy. @@ -121,8 +122,7 @@ armada_370_xp_clkevt_next_event(unsigned long delta, /* * Enable the timer. */ - local_timer_ctrl_clrset(TIMER0_RELOAD_EN, - TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); + local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask); return 0; } @@ -141,9 +141,7 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode, /* * Enable timer. */ - local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | - TIMER0_EN | - TIMER0_DIV(TIMER_DIVIDER_SHIFT)); + local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask); } else { /* * Disable timer. @@ -240,10 +238,13 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) WARN_ON(!timer_base); local_base = of_iomap(np, 1); - if (timer25Mhz) + if (timer25Mhz) { set = TIMER0_25MHZ; - else + enable_mask = TIMER0_EN; + } else { clr = TIMER0_25MHZ; + enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); + } timer_ctrl_clrset(clr, set); local_timer_ctrl_clrset(clr, set); @@ -262,8 +263,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); - timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | - TIMER0_DIV(TIMER_DIVIDER_SHIFT)); + timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask); /* * Set scale and timer for sched_clock. diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c index 9c7f018a67cad3..20066222f3f29c 100644 --- a/drivers/clocksource/time-orion.c +++ b/drivers/clocksource/time-orion.c @@ -53,7 +53,7 @@ EXPORT_SYMBOL(orion_timer_ctrl_clrset); /* * Free-running clocksource handling. */ -static u32 notrace orion_read_sched_clock(void) +static u64 notrace orion_read_sched_clock(void) { return ~readl(timer_base + TIMER0_VAL); } @@ -135,7 +135,7 @@ static void __init orion_timer_init(struct device_node *np) clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", clk_get_rate(clk), 300, 32, clocksource_mmio_readl_down); - setup_sched_clock(orion_read_sched_clock, 32, clk_get_rate(clk)); + sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk)); /* setup timer1 as clockevent timer */ if (setup_irq(irq, &orion_clkevt_irq)) diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c new file mode 100644 index 00000000000000..deebcd6469fced --- /dev/null +++ b/drivers/clocksource/timer-sun5i.c @@ -0,0 +1,192 @@ +/* + * Allwinner SoCs hstimer driver. + * + * Copyright (C) 2013 Maxime Ripard + * + * Maxime Ripard + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TIMER_IRQ_EN_REG 0x00 +#define TIMER_IRQ_EN(val) BIT(val) +#define TIMER_IRQ_ST_REG 0x04 +#define TIMER_CTL_REG(val) (0x20 * (val) + 0x10) +#define TIMER_CTL_ENABLE BIT(0) +#define TIMER_CTL_RELOAD BIT(1) +#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4) +#define TIMER_CTL_ONESHOT BIT(7) +#define TIMER_INTVAL_LO_REG(val) (0x20 * (val) + 0x14) +#define TIMER_INTVAL_HI_REG(val) (0x20 * (val) + 0x18) +#define TIMER_CNTVAL_LO_REG(val) (0x20 * (val) + 0x1c) +#define TIMER_CNTVAL_HI_REG(val) (0x20 * (val) + 0x20) + +#define TIMER_SYNC_TICKS 3 + +static void __iomem *timer_base; +static u32 ticks_per_jiffy; + +/* + * When we disable a timer, we need to wait at least for 2 cycles of + * the timer source clock. We will use for that the clocksource timer + * that is already setup and runs at the same frequency than the other + * timers, and we never will be disabled. + */ +static void sun5i_clkevt_sync(void) +{ + u32 old = readl(timer_base + TIMER_CNTVAL_LO_REG(1)); + + while ((old - readl(timer_base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS) + cpu_relax(); +} + +static void sun5i_clkevt_time_stop(u8 timer) +{ + u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer)); + + sun5i_clkevt_sync(); +} + +static void sun5i_clkevt_time_setup(u8 timer, u32 delay) +{ + writel(delay, timer_base + TIMER_INTVAL_LO_REG(timer)); +} + +static void sun5i_clkevt_time_start(u8 timer, bool periodic) +{ + u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + + if (periodic) + val &= ~TIMER_CTL_ONESHOT; + else + val |= TIMER_CTL_ONESHOT; + + writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, + timer_base + TIMER_CTL_REG(timer)); +} + +static void sun5i_clkevt_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + sun5i_clkevt_time_stop(0); + sun5i_clkevt_time_setup(0, ticks_per_jiffy); + sun5i_clkevt_time_start(0, true); + break; + case CLOCK_EVT_MODE_ONESHOT: + sun5i_clkevt_time_stop(0); + sun5i_clkevt_time_start(0, false); + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + default: + sun5i_clkevt_time_stop(0); + break; + } +} + +static int sun5i_clkevt_next_event(unsigned long evt, + struct clock_event_device *unused) +{ + sun5i_clkevt_time_stop(0); + sun5i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS); + sun5i_clkevt_time_start(0, false); + + return 0; +} + +static struct clock_event_device sun5i_clockevent = { + .name = "sun5i_tick", + .rating = 340, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_mode = sun5i_clkevt_mode, + .set_next_event = sun5i_clkevt_next_event, +}; + + +static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = (struct clock_event_device *)dev_id; + + writel(0x1, timer_base + TIMER_IRQ_ST_REG); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct irqaction sun5i_timer_irq = { + .name = "sun5i_timer0", + .flags = IRQF_TIMER | IRQF_IRQPOLL, + .handler = sun5i_timer_interrupt, + .dev_id = &sun5i_clockevent, +}; + +static u64 sun5i_timer_sched_read(void) +{ + return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1)); +} + +static void __init sun5i_timer_init(struct device_node *node) +{ + unsigned long rate; + struct clk *clk; + int ret, irq; + u32 val; + + timer_base = of_iomap(node, 0); + if (!timer_base) + panic("Can't map registers"); + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) + panic("Can't parse IRQ"); + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) + panic("Can't get timer clock"); + clk_prepare_enable(clk); + rate = clk_get_rate(clk); + + writel(~0, timer_base + TIMER_INTVAL_LO_REG(1)); + writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, + timer_base + TIMER_CTL_REG(1)); + + sched_clock_register(sun5i_timer_sched_read, 32, rate); + clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, + rate, 340, 32, clocksource_mmio_readl_down); + + ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); + + ret = setup_irq(irq, &sun5i_timer_irq); + if (ret) + pr_warn("failed to setup irq %d\n", irq); + + /* Enable timer0 interrupt */ + val = readl(timer_base + TIMER_IRQ_EN_REG); + writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); + + sun5i_clockevent.cpumask = cpu_possible_mask; + sun5i_clockevent.irq = irq; + + clockevents_config_and_register(&sun5i_clockevent, rate, + TIMER_SYNC_TICKS, 0xffffffff); +} +CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", + sun5i_timer_init); +CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer", + sun5i_timer_init); diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c index ad3c0e83a77956..1098ed3b9b89f2 100644 --- a/drivers/clocksource/vt8500_timer.c +++ b/drivers/clocksource/vt8500_timer.c @@ -124,7 +124,7 @@ static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id) static struct irqaction irq = { .name = "vt8500_timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_TIMER | IRQF_IRQPOLL, .handler = vt8500_timer_interrupt, .dev_id = &clockevent, }; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 02d534da22dda0..8d19f7c06010c3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -828,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) int ret = 0; memcpy(&new_policy, policy, sizeof(*policy)); + + /* Use the default policy if its valid. */ + if (cpufreq_driver->setpolicy) + cpufreq_parse_governor(policy->governor->name, + &new_policy.policy, NULL); + /* assure that the starting sequence is run in cpufreq_set_policy */ policy->governor = NULL; /* set default policy */ ret = cpufreq_set_policy(policy, &new_policy); - policy->user_policy.policy = policy->policy; - policy->user_policy.governor = policy->governor; - if (ret) { pr_debug("setting policy failed\n"); if (cpufreq_driver->exit) @@ -845,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) #ifdef CONFIG_HOTPLUG_CPU static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, - unsigned int cpu, struct device *dev, - bool frozen) + unsigned int cpu, struct device *dev) { int ret = 0; unsigned long flags; @@ -877,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, } } - /* Don't touch sysfs links during light-weight init */ - if (!frozen) - ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); - - return ret; + return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); } #endif @@ -926,6 +924,27 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) return NULL; } +static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) +{ + struct kobject *kobj; + struct completion *cmp; + + down_read(&policy->rwsem); + kobj = &policy->kobj; + cmp = &policy->kobj_unregister; + up_read(&policy->rwsem); + kobject_put(kobj); + + /* + * We need to make sure that the underlying kobj is + * actually not referenced anymore by anybody before we + * proceed with unloading. + */ + pr_debug("waiting for dropping of refcount\n"); + wait_for_completion(cmp); + pr_debug("wait complete\n"); +} + static void cpufreq_policy_free(struct cpufreq_policy *policy) { free_cpumask_var(policy->related_cpus); @@ -986,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { read_unlock_irqrestore(&cpufreq_driver_lock, flags); - ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); + ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); up_read(&cpufreq_rwsem); return ret; } @@ -994,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, read_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif - if (frozen) - /* Restore the saved policy when doing light-weight init */ - policy = cpufreq_policy_restore(cpu); - else + /* + * Restore the saved policy when doing light-weight init and fall back + * to the full init if that fails. + */ + policy = frozen ? cpufreq_policy_restore(cpu) : NULL; + if (!policy) { + frozen = false; policy = cpufreq_policy_alloc(); - - if (!policy) - goto nomem_out; - + if (!policy) + goto nomem_out; + } /* * In the resume path, since we restore a saved policy, the assignment @@ -1047,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, */ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); - policy->user_policy.min = policy->min; - policy->user_policy.max = policy->max; + if (!frozen) { + policy->user_policy.min = policy->min; + policy->user_policy.max = policy->max; + } blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); @@ -1079,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, cpufreq_init_policy(policy); + if (!frozen) { + policy->user_policy.policy = policy->policy; + policy->user_policy.governor = policy->governor; + } + kobject_uevent(&policy->kobj, KOBJ_ADD); up_read(&cpufreq_rwsem); @@ -1096,7 +1124,13 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, if (cpufreq_driver->exit) cpufreq_driver->exit(policy); err_set_policy_cpu: + if (frozen) { + /* Do not leave stale fallback data behind. */ + per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; + cpufreq_policy_put_kobj(policy); + } cpufreq_policy_free(policy); + nomem_out: up_read(&cpufreq_rwsem); @@ -1118,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) } static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, - unsigned int old_cpu, bool frozen) + unsigned int old_cpu) { struct device *cpu_dev; int ret; @@ -1126,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, /* first sibling now owns the new sysfs dir */ cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); - /* Don't touch sysfs files during light-weight tear-down */ - if (frozen) - return cpu_dev->id; - sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); ret = kobject_move(&policy->kobj, &cpu_dev->kobj); if (ret) { @@ -1196,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, if (!frozen) sysfs_remove_link(&dev->kobj, "cpufreq"); } else if (cpus > 1) { - new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); + new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); if (new_cpu >= 0) { update_policy_cpu(policy, new_cpu); @@ -1218,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, int ret; unsigned long flags; struct cpufreq_policy *policy; - struct kobject *kobj; - struct completion *cmp; read_lock_irqsave(&cpufreq_driver_lock, flags); policy = per_cpu(cpufreq_cpu_data, cpu); @@ -1249,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev, } } - if (!frozen) { - down_read(&policy->rwsem); - kobj = &policy->kobj; - cmp = &policy->kobj_unregister; - up_read(&policy->rwsem); - kobject_put(kobj); - - /* - * We need to make sure that the underlying kobj is - * actually not referenced anymore by anybody before we - * proceed with unloading. - */ - pr_debug("waiting for dropping of refcount\n"); - wait_for_completion(cmp); - pr_debug("wait complete\n"); - } + if (!frozen) + cpufreq_policy_put_kobj(policy); /* * Perform the ->exit() even during light-weight tear-down, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 5f1cbae3696110..d51f17ed691e02 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data) } #define ICPU(model, policy) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ + (unsigned long)&policy } static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, core_params), @@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; intel_pstate_get_cpu_pstates(cpu); + if (!cpu->pstate.current_pstate) { + all_cpu_data[cpunum] = NULL; + kfree(cpu); + return -ENODATA; + } cpu->cpu = cpunum; diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c index 36795639df0da2..6e51114057d096 100644 --- a/drivers/cpuidle/cpuidle-calxeda.c +++ b/drivers/cpuidle/cpuidle-calxeda.c @@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = { .state_count = 2, }; -static int __init calxeda_cpuidle_probe(struct platform_device *pdev) +static int calxeda_cpuidle_probe(struct platform_device *pdev) { return cpuidle_register(&calxeda_idle_driver, NULL); } diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 9dd6e01eac3305..f757a0f428bde8 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = { static int __init ixp_module_init(void) { int num = ARRAY_SIZE(ixp4xx_algos); - int i, err ; + int i, err; pdev = platform_device_register_full(&ixp_dev_info); if (IS_ERR(pdev)) return PTR_ERR(pdev); - dev = &pdev->dev; - spin_lock_init(&desc_lock); spin_lock_init(&emerg_lock); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a49c777607c50..87529181efccb9 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device) } dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_src)) { + dev_err(dev, "mapping src buffer failed\n"); + goto free_resources; + } dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_dest)) { + dev_err(dev, "mapping dest buffer failed\n"); + goto unmap_src; + } flags = DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); @@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device) } unmap_dma: - dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); +unmap_src: + dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); free_resources: dma->device_free_chan_resources(dma_chan); out: diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index b53d0de17e1552..98e14ee4833c4d 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1,7 +1,7 @@ #include "amd64_edac.h" #include -static struct edac_pci_ctl_info *amd64_ctl_pci; +static struct edac_pci_ctl_info *pci_ctl; static int report_gart_errors; module_param(report_gart_errors, int, 0644); @@ -162,7 +162,7 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, * scan the scrub rate mapping table for a close or matching bandwidth value to * issue. If requested is too big, then use last maximum value found. */ -static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) +static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) { u32 scrubval; int i; @@ -198,7 +198,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) return 0; } -static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) +static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw) { struct amd64_pvt *pvt = mci->pvt_info; u32 min_scrubrate = 0x5; @@ -210,10 +210,10 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) if (pvt->fam == 0x15 && pvt->model < 0x10) f15h_select_dct(pvt, 0); - return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); + return __set_scrub_rate(pvt->F3, bw, min_scrubrate); } -static int amd64_get_scrub_rate(struct mem_ctl_info *mci) +static int get_scrub_rate(struct mem_ctl_info *mci) { struct amd64_pvt *pvt = mci->pvt_info; u32 scrubval = 0; @@ -240,8 +240,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) * returns true if the SysAddr given by sys_addr matches the * DRAM base/limit associated with node_id */ -static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, - u8 nid) +static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid) { u64 addr; @@ -285,7 +284,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, if (intlv_en == 0) { for (node_id = 0; node_id < DRAM_RANGES; node_id++) { - if (amd64_base_limit_match(pvt, sys_addr, node_id)) + if (base_limit_match(pvt, sys_addr, node_id)) goto found; } goto err_no_match; @@ -309,7 +308,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, } /* sanity test for sys_addr */ - if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { + if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) { amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" "range for node %d with node interleaving enabled.\n", __func__, sys_addr, node_id); @@ -660,7 +659,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs * are ECC capable. */ -static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) +static unsigned long determine_edac_cap(struct amd64_pvt *pvt) { u8 bit; unsigned long edac_cap = EDAC_FLAG_NONE; @@ -675,9 +674,9 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) return edac_cap; } -static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); +static void debug_display_dimm_sizes(struct amd64_pvt *, u8); -static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) +static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) { edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); @@ -711,7 +710,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt) (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); - amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0); + debug_dump_dramcfg_low(pvt, pvt->dclr0, 0); edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); @@ -722,19 +721,19 @@ static void dump_misc_regs(struct amd64_pvt *pvt) edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); - amd64_debug_display_dimm_sizes(pvt, 0); + debug_display_dimm_sizes(pvt, 0); /* everything below this point is Fam10h and above */ if (pvt->fam == 0xf) return; - amd64_debug_display_dimm_sizes(pvt, 1); + debug_display_dimm_sizes(pvt, 1); amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); /* Only if NOT ganged does dclr1 have valid info */ if (!dct_ganging_enabled(pvt)) - amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1); + debug_dump_dramcfg_low(pvt, pvt->dclr1, 1); } /* @@ -800,7 +799,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) } } -static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) +static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs) { enum mem_type type; @@ -1578,7 +1577,7 @@ static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range, num_dcts_intlv, dct_sel); /* Verify we stay within the MAX number of channels allowed */ - if (channel > 4 || channel < 0) + if (channel > 3) return -EINVAL; leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0)); @@ -1702,7 +1701,7 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, * debug routine to display the memory sizes of all logical DIMMs and its * CSROWs */ -static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) +static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) { int dimm, size0, size1; u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; @@ -1744,7 +1743,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) } } -static struct amd64_family_type amd64_family_types[] = { +static struct amd64_family_type family_types[] = { [K8_CPUS] = { .ctl_name = "K8", .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, @@ -2005,9 +2004,9 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err, string, ""); } -static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, - struct mce *m) +static inline void decode_bus_error(int node_id, struct mce *m) { + struct mem_ctl_info *mci = mcis[node_id]; struct amd64_pvt *pvt = mci->pvt_info; u8 ecc_type = (m->status >> 45) & 0x3; u8 xec = XEC(m->status, 0x1f); @@ -2035,11 +2034,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, __log_bus_error(mci, &err, ecc_type); } -void amd64_decode_bus_error(int node_id, struct mce *m) -{ - __amd64_decode_bus_error(mcis[node_id], m); -} - /* * Use pvt->F2 which contains the F2 CPU PCI device to get the related * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. @@ -2196,7 +2190,7 @@ static void read_mc_regs(struct amd64_pvt *pvt) * encompasses * */ -static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) +static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) { u32 cs_mode, nr_pages; u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; @@ -2263,19 +2257,19 @@ static int init_csrows(struct mem_ctl_info *mci) pvt->mc_node_id, i); if (row_dct0) { - nr_pages = amd64_csrow_nr_pages(pvt, 0, i); + nr_pages = get_csrow_nr_pages(pvt, 0, i); csrow->channels[0]->dimm->nr_pages = nr_pages; } /* K8 has only one DCT */ if (pvt->fam != 0xf && row_dct1) { - int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); + int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i); csrow->channels[1]->dimm->nr_pages = row_dct1_pages; nr_pages += row_dct1_pages; } - mtype = amd64_determine_memory_type(pvt, i); + mtype = determine_memory_type(pvt, i); edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); @@ -2309,7 +2303,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid) } /* check MCG_CTL on all the cpus on this node */ -static bool amd64_nb_mce_bank_enabled_on_node(u16 nid) +static bool nb_mce_bank_enabled_on_node(u16 nid) { cpumask_var_t mask; int cpu, nbe; @@ -2482,7 +2476,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid) ecc_en = !!(value & NBCFG_ECC_ENABLE); amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); - nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); + nb_mce_en = nb_mce_bank_enabled_on_node(nid); if (!nb_mce_en) amd64_notice("NB MCE bank disabled, set MSR " "0x%08x[4] on node %d to enable.\n", @@ -2537,7 +2531,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci, if (pvt->nbcap & NBCAP_CHIPKILL) mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; - mci->edac_cap = amd64_determine_edac_cap(pvt); + mci->edac_cap = determine_edac_cap(pvt); mci->mod_name = EDAC_MOD_STR; mci->mod_ver = EDAC_AMD64_VERSION; mci->ctl_name = fam->ctl_name; @@ -2545,14 +2539,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci, mci->ctl_page_to_phys = NULL; /* memory scrubber interface */ - mci->set_sdram_scrub_rate = amd64_set_scrub_rate; - mci->get_sdram_scrub_rate = amd64_get_scrub_rate; + mci->set_sdram_scrub_rate = set_scrub_rate; + mci->get_sdram_scrub_rate = get_scrub_rate; } /* * returns a pointer to the family descriptor on success, NULL otherwise. */ -static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) +static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) { struct amd64_family_type *fam_type = NULL; @@ -2563,29 +2557,29 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) switch (pvt->fam) { case 0xf: - fam_type = &amd64_family_types[K8_CPUS]; - pvt->ops = &amd64_family_types[K8_CPUS].ops; + fam_type = &family_types[K8_CPUS]; + pvt->ops = &family_types[K8_CPUS].ops; break; case 0x10: - fam_type = &amd64_family_types[F10_CPUS]; - pvt->ops = &amd64_family_types[F10_CPUS].ops; + fam_type = &family_types[F10_CPUS]; + pvt->ops = &family_types[F10_CPUS].ops; break; case 0x15: if (pvt->model == 0x30) { - fam_type = &amd64_family_types[F15_M30H_CPUS]; - pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops; + fam_type = &family_types[F15_M30H_CPUS]; + pvt->ops = &family_types[F15_M30H_CPUS].ops; break; } - fam_type = &amd64_family_types[F15_CPUS]; - pvt->ops = &amd64_family_types[F15_CPUS].ops; + fam_type = &family_types[F15_CPUS]; + pvt->ops = &family_types[F15_CPUS].ops; break; case 0x16: - fam_type = &amd64_family_types[F16_CPUS]; - pvt->ops = &amd64_family_types[F16_CPUS].ops; + fam_type = &family_types[F16_CPUS]; + pvt->ops = &family_types[F16_CPUS].ops; break; default: @@ -2601,7 +2595,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) return fam_type; } -static int amd64_init_one_instance(struct pci_dev *F2) +static int init_one_instance(struct pci_dev *F2) { struct amd64_pvt *pvt = NULL; struct amd64_family_type *fam_type = NULL; @@ -2619,7 +2613,7 @@ static int amd64_init_one_instance(struct pci_dev *F2) pvt->F2 = F2; ret = -EINVAL; - fam_type = amd64_per_family_init(pvt); + fam_type = per_family_init(pvt); if (!fam_type) goto err_free; @@ -2680,7 +2674,7 @@ static int amd64_init_one_instance(struct pci_dev *F2) if (report_gart_errors) amd_report_gart_errors(true); - amd_register_ecc_decoder(amd64_decode_bus_error); + amd_register_ecc_decoder(decode_bus_error); mcis[nid] = mci; @@ -2703,8 +2697,8 @@ static int amd64_init_one_instance(struct pci_dev *F2) return ret; } -static int amd64_probe_one_instance(struct pci_dev *pdev, - const struct pci_device_id *mc_type) +static int probe_one_instance(struct pci_dev *pdev, + const struct pci_device_id *mc_type) { u16 nid = amd_get_node_id(pdev); struct pci_dev *F3 = node_to_amd_nb(nid)->misc; @@ -2736,7 +2730,7 @@ static int amd64_probe_one_instance(struct pci_dev *pdev, goto err_enable; } - ret = amd64_init_one_instance(pdev); + ret = init_one_instance(pdev); if (ret < 0) { amd64_err("Error probing instance: %d\n", nid); restore_ecc_error_reporting(s, nid, F3); @@ -2752,7 +2746,7 @@ static int amd64_probe_one_instance(struct pci_dev *pdev, return ret; } -static void amd64_remove_one_instance(struct pci_dev *pdev) +static void remove_one_instance(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct amd64_pvt *pvt; @@ -2777,7 +2771,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev) /* unregister from EDAC MCE */ amd_report_gart_errors(false); - amd_unregister_ecc_decoder(amd64_decode_bus_error); + amd_unregister_ecc_decoder(decode_bus_error); kfree(ecc_stngs[nid]); ecc_stngs[nid] = NULL; @@ -2795,7 +2789,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev) * PCI core identifies what devices are on a system during boot, and then * inquiry this table to see if this driver is for a given device found. */ -static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = { +static const struct pci_device_id amd64_pci_table[] = { { .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, @@ -2843,8 +2837,8 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table); static struct pci_driver amd64_pci_driver = { .name = EDAC_MOD_STR, - .probe = amd64_probe_one_instance, - .remove = amd64_remove_one_instance, + .probe = probe_one_instance, + .remove = remove_one_instance, .id_table = amd64_pci_table, }; @@ -2853,23 +2847,18 @@ static void setup_pci_device(void) struct mem_ctl_info *mci; struct amd64_pvt *pvt; - if (amd64_ctl_pci) + if (pci_ctl) return; mci = mcis[0]; - if (mci) { - - pvt = mci->pvt_info; - amd64_ctl_pci = - edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); - - if (!amd64_ctl_pci) { - pr_warning("%s(): Unable to create PCI control\n", - __func__); + if (!mci) + return; - pr_warning("%s(): PCI error report via EDAC not set\n", - __func__); - } + pvt = mci->pvt_info; + pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); + if (!pci_ctl) { + pr_warn("%s(): Unable to create PCI control\n", __func__); + pr_warn("%s(): PCI error report via EDAC not set\n", __func__); } } @@ -2925,8 +2914,8 @@ static int __init amd64_edac_init(void) static void __exit amd64_edac_exit(void) { - if (amd64_ctl_pci) - edac_pci_release_generic_ctl(amd64_ctl_pci); + if (pci_ctl) + edac_pci_release_generic_ctl(pci_ctl); pci_unregister_driver(&amd64_pci_driver); diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index 96e3ee3460a566..3a501b530e1134 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c @@ -333,7 +333,7 @@ static void amd76x_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(amd76x_pci_tbl) = { +static const struct pci_device_id amd76x_pci_tbl[] = { { PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, AMD762}, diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 644fec54681fcb..92d54fa65f939d 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c @@ -1182,9 +1182,11 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, pvt->dev_info->err_dev, pvt->bridge_ck); - if (pvt->bridge_ck == NULL) + if (pvt->bridge_ck == NULL) { pvt->bridge_ck = pci_scan_single_device(pdev->bus, PCI_DEVFN(0, 1)); + pci_dev_get(pvt->bridge_ck); + } if (pvt->bridge_ck == NULL) { e752x_printk(KERN_ERR, "error reporting device not found:" @@ -1421,7 +1423,7 @@ static void e752x_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = { +static const struct pci_device_id e752x_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7520}, diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 1c4056a5038396..3cda79bc8b0034 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c @@ -555,7 +555,7 @@ static void e7xxx_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = { +static const struct pci_device_id e7xxx_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7205}, diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 102674346035e1..592af5f0cf391d 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -437,6 +437,9 @@ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) { int status; + if (!edac_dev->edac_check) + return; + status = cancel_delayed_work(&edac_dev->work); if (status == 0) { /* workq instance might be running, wait for it */ diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 9f7e0e609516c5..51c0362acf5c45 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -914,7 +914,7 @@ void __exit edac_debugfs_exit(void) debugfs_remove(edac_debugfs); } -int edac_create_debug_nodes(struct mem_ctl_info *mci) +static int edac_create_debug_nodes(struct mem_ctl_info *mci) { struct dentry *d, *parent; char name[80]; diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c index 351945fa2ecdca..9d9e18aefaaa49 100644 --- a/drivers/edac/edac_stub.c +++ b/drivers/edac/edac_stub.c @@ -29,6 +29,25 @@ EXPORT_SYMBOL_GPL(edac_err_assert); static atomic_t edac_subsys_valid = ATOMIC_INIT(0); +int edac_report_status = EDAC_REPORTING_ENABLED; +EXPORT_SYMBOL_GPL(edac_report_status); + +static int __init edac_report_setup(char *str) +{ + if (!str) + return -EINVAL; + + if (!strncmp(str, "on", 2)) + set_edac_report_status(EDAC_REPORTING_ENABLED); + else if (!strncmp(str, "off", 3)) + set_edac_report_status(EDAC_REPORTING_DISABLED); + else if (!strncmp(str, "force", 5)) + set_edac_report_status(EDAC_REPORTING_FORCE); + + return 0; +} +__setup("edac_report=", edac_report_setup); + /* * called to determine if there is an EDAC driver interested in * knowing an event (such as NMI) occurred diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c index 694efcbf19c097..cd28b968e5c7a1 100644 --- a/drivers/edac/i3000_edac.c +++ b/drivers/edac/i3000_edac.c @@ -487,7 +487,7 @@ static void i3000_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(i3000_pci_tbl) = { +static const struct pci_device_id i3000_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I3000}, diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index be10a74b16ea71..fa1326e5a4b06d 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c @@ -466,7 +466,7 @@ static void i3200_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = { +static const struct pci_device_id i3200_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I3200}, diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 63b2194e8c2068..72e07e3cf718fb 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c @@ -1530,7 +1530,7 @@ static void i5000_remove_one(struct pci_dev *pdev) * * The "E500P" device is the first device supported. */ -static DEFINE_PCI_DEVICE_TABLE(i5000_pci_tbl) = { +static const struct pci_device_id i5000_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), .driver_data = I5000P}, diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 157b934e8ce3a5..36a38ee94fa8a5 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c @@ -1213,7 +1213,7 @@ static void i5100_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = { +static const struct pci_device_id i5100_pci_tbl[] = { /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, { 0, } diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index 0a05bbceb08f4e..e080cbfa8fc9d0 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c @@ -1416,7 +1416,7 @@ static void i5400_remove_one(struct pci_dev *pdev) * * The "E500P" device is the first device supported. */ -static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = { +static const struct pci_device_id i5400_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, {0,} /* 0 terminated list. */ }; diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index 9004c64b169e05..d63f4798f7d091 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c @@ -1160,7 +1160,7 @@ static void i7300_remove_one(struct pci_dev *pdev) * * Has only 8086:360c PCI ID */ -static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = { +static const struct pci_device_id i7300_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, {0,} /* 0 terminated list. */ }; diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 80a963d64e58c1..87533ca7752e01 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -394,7 +394,7 @@ static const struct pci_id_table pci_dev_table[] = { /* * pci_device_id table for which devices we are looking for */ -static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = { +static const struct pci_device_id i7core_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, {0,} /* 0 terminated list. */ diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c index 57fdb77903ba9a..d730e276d1a843 100644 --- a/drivers/edac/i82443bxgx_edac.c +++ b/drivers/edac/i82443bxgx_edac.c @@ -386,7 +386,7 @@ static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); -static DEFINE_PCI_DEVICE_TABLE(i82443bxgx_pci_tbl) = { +static const struct pci_device_id i82443bxgx_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index 3e3e431c830113..3382f6344e428b 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c @@ -288,7 +288,7 @@ static void i82860_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = { +static const struct pci_device_id i82860_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I82860}, diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 2f8535fc451e64..80573df0a4d77d 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c @@ -527,7 +527,7 @@ static void i82875p_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(i82875p_pci_tbl) = { +static const struct pci_device_id i82875p_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I82875P}, diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 0c8d4b0eaa3287..10b10521f62e19 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c @@ -628,7 +628,7 @@ static void i82975x_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = { +static const struct pci_device_id i82975x_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I82975X diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index fd46b0bd5f2ae2..8f9182179a7c80 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -1,6 +1,8 @@ /* * Freescale MPC85xx Memory Controller kenel module * + * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc. + * * Author: Dave Jiang * * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under @@ -196,6 +198,42 @@ static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci) edac_pci_handle_npe(pci, pci->ctl_name); } +static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci) +{ + struct mpc85xx_pci_pdata *pdata = pci->pvt_info; + u32 err_detect; + + err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR); + + pr_err("PCIe error(s) detected\n"); + pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect); + pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n", + in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR)); + pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n", + in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0)); + pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n", + in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R1)); + pr_err("PCIe ERR_CAP_R2 register: 0x%08x\n", + in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R2)); + pr_err("PCIe ERR_CAP_R3 register: 0x%08x\n", + in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R3)); + + /* clear error bits */ + out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect); +} + +static int mpc85xx_pcie_find_capability(struct device_node *np) +{ + struct pci_controller *hose; + + if (!np) + return -EINVAL; + + hose = pci_find_hose_for_OF_device(np); + + return early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP); +} + static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) { struct edac_pci_ctl_info *pci = dev_id; @@ -207,7 +245,10 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) if (!err_detect) return IRQ_NONE; - mpc85xx_pci_check(pci); + if (pdata->is_pcie) + mpc85xx_pcie_check(pci); + else + mpc85xx_pci_check(pci); return IRQ_HANDLED; } @@ -239,14 +280,22 @@ int mpc85xx_pci_err_probe(struct platform_device *op) pdata = pci->pvt_info; pdata->name = "mpc85xx_pci_err"; pdata->irq = NO_IRQ; + + if (mpc85xx_pcie_find_capability(op->dev.of_node) > 0) + pdata->is_pcie = true; + dev_set_drvdata(&op->dev, pci); pci->dev = &op->dev; pci->mod_name = EDAC_MOD_STR; pci->ctl_name = pdata->name; pci->dev_name = dev_name(&op->dev); - if (edac_op_state == EDAC_OPSTATE_POLL) - pci->edac_check = mpc85xx_pci_check; + if (edac_op_state == EDAC_OPSTATE_POLL) { + if (pdata->is_pcie) + pci->edac_check = mpc85xx_pcie_check; + else + pci->edac_check = mpc85xx_pci_check; + } pdata->edac_idx = edac_pci_idx++; @@ -275,16 +324,26 @@ int mpc85xx_pci_err_probe(struct platform_device *op) goto err; } - orig_pci_err_cap_dr = - in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR); + if (pdata->is_pcie) { + orig_pci_err_cap_dr = + in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR); + out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, ~0); + orig_pci_err_en = + in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN); + out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, 0); + } else { + orig_pci_err_cap_dr = + in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR); - /* PCI master abort is expected during config cycles */ - out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40); + /* PCI master abort is expected during config cycles */ + out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40); - orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN); + orig_pci_err_en = + in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN); - /* disable master abort reporting */ - out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40); + /* disable master abort reporting */ + out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40); + } /* clear error bits */ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); @@ -297,7 +356,8 @@ int mpc85xx_pci_err_probe(struct platform_device *op) if (edac_op_state == EDAC_OPSTATE_INT) { pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); res = devm_request_irq(&op->dev, pdata->irq, - mpc85xx_pci_isr, IRQF_DISABLED, + mpc85xx_pci_isr, + IRQF_DISABLED | IRQF_SHARED, "[EDAC] PCI err", pci); if (res < 0) { printk(KERN_ERR @@ -312,6 +372,22 @@ int mpc85xx_pci_err_probe(struct platform_device *op) pdata->irq); } + if (pdata->is_pcie) { + /* + * Enable all PCIe error interrupt & error detect except invalid + * PEX_CONFIG_ADDR/PEX_CONFIG_DATA access interrupt generation + * enable bit and invalid PEX_CONFIG_ADDR/PEX_CONFIG_DATA access + * detection enable bit. Because PCIe bus code to initialize and + * configure these PCIe devices on booting will use some invalid + * PEX_CONFIG_ADDR/PEX_CONFIG_DATA, edac driver prints the much + * notice information. So disable this detect to fix ugly print. + */ + out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0 + & ~PEX_ERR_ICCAIE_EN_BIT); + out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, 0 + | PEX_ERR_ICCAD_DISR_BIT); + } + devres_remove_group(&op->dev, mpc85xx_pci_err_probe); edac_dbg(3, "success\n"); printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h index 932016f2cf0603..8c6256436227be 100644 --- a/drivers/edac/mpc85xx_edac.h +++ b/drivers/edac/mpc85xx_edac.h @@ -134,13 +134,19 @@ #define MPC85XX_PCI_ERR_DR 0x0000 #define MPC85XX_PCI_ERR_CAP_DR 0x0004 #define MPC85XX_PCI_ERR_EN 0x0008 +#define PEX_ERR_ICCAIE_EN_BIT 0x00020000 #define MPC85XX_PCI_ERR_ATTRIB 0x000c #define MPC85XX_PCI_ERR_ADDR 0x0010 +#define PEX_ERR_ICCAD_DISR_BIT 0x00020000 #define MPC85XX_PCI_ERR_EXT_ADDR 0x0014 #define MPC85XX_PCI_ERR_DL 0x0018 #define MPC85XX_PCI_ERR_DH 0x001c #define MPC85XX_PCI_GAS_TIMR 0x0020 #define MPC85XX_PCI_PCIX_TIMR 0x0024 +#define MPC85XX_PCIE_ERR_CAP_R0 0x0028 +#define MPC85XX_PCIE_ERR_CAP_R1 0x002c +#define MPC85XX_PCIE_ERR_CAP_R2 0x0030 +#define MPC85XX_PCIE_ERR_CAP_R3 0x0034 struct mpc85xx_mc_pdata { char *name; @@ -158,6 +164,7 @@ struct mpc85xx_l2_pdata { struct mpc85xx_pci_pdata { char *name; + bool is_pcie; int edac_idx; void __iomem *pci_vbase; int irq; diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index 2fd6a549090584..8f936bc7a010cb 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c @@ -383,7 +383,7 @@ static void r82600_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(r82600_pci_tbl) = { +static const struct pci_device_id r82600_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) }, diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index d7f1b57bd3be07..54e2abe671f70a 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -461,7 +461,7 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = { /* * pci_device_id table for which devices we are looking for */ -static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = { +static const struct pci_device_id sbridge_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, {0,} /* 0 terminated list. */ @@ -915,7 +915,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci) } } -struct mem_ctl_info *get_mci_for_node_id(u8 node_id) +static struct mem_ctl_info *get_mci_for_node_id(u8 node_id) { struct sbridge_dev *sbridge_dev; @@ -1829,6 +1829,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, struct mem_ctl_info *mci; struct sbridge_pvt *pvt; + if (get_edac_report_status() == EDAC_REPORTING_DISABLED) + return NOTIFY_DONE; + mci = get_mci_for_node_id(mce->socketid); if (!mci) return NOTIFY_BAD; @@ -2142,9 +2145,10 @@ static int __init sbridge_init(void) opstate_init(); pci_rc = pci_register_driver(&sbridge_driver); - if (pci_rc >= 0) { mce_register_decode_chain(&sbridge_mce_dec); + if (get_edac_report_status() == EDAC_REPORTING_DISABLED) + sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n"); return 0; } diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c index 1a4df82376ba42..4891b450830bdd 100644 --- a/drivers/edac/x38_edac.c +++ b/drivers/edac/x38_edac.c @@ -448,7 +448,7 @@ static void x38_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } -static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = { +static const struct pci_device_id x38_pci_tbl[] = { { PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, X38}, diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index f1d54a3985bdfe..bdb5a00f1dfa9e 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -31,6 +31,16 @@ config EXTCON_ADC_JACK help Say Y here to enable extcon device driver based on ADC values. +config EXTCON_MAX14577 + tristate "MAX14577 EXTCON Support" + depends on MFD_MAX14577 + select IRQ_DOMAIN + select REGMAP_I2C + help + If you say yes here you get support for the MUIC device of + Maxim MAX14577 PMIC. The MAX14577 MUIC is a USB port accessory + detector and switch. + config EXTCON_MAX77693 tristate "MAX77693 EXTCON Support" depends on MFD_MAX77693 && INPUT diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile index 759fdae46f9574..43eccc0e3448de 100644 --- a/drivers/extcon/Makefile +++ b/drivers/extcon/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_OF_EXTCON) += of_extcon.o obj-$(CONFIG_EXTCON) += extcon-class.o obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o +obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index a287cece0593c3..c20602f601ee22 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c @@ -44,6 +44,15 @@ #define HPDET_DEBOUNCE 500 #define DEFAULT_MICD_TIMEOUT 2000 +#define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \ + ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \ + ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \ + ARIZONA_MICD_LVL_7) + +#define MICD_LVL_0_TO_7 (ARIZONA_MICD_LVL_0 | MICD_LVL_1_TO_7) + +#define MICD_LVL_0_TO_8 (MICD_LVL_0_TO_7 | ARIZONA_MICD_LVL_8) + struct arizona_extcon_info { struct device *dev; struct arizona *arizona; @@ -426,26 +435,15 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info) } val &= ARIZONA_HP_LVL_B_MASK; + /* Convert to ohms, the value is in 0.5 ohm increments */ + val /= 2; regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1, &range); range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK) >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT; - /* Skip up or down a range? */ - if (range && (val < arizona_hpdet_c_ranges[range].min)) { - range--; - dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n", - arizona_hpdet_c_ranges[range].min, - arizona_hpdet_c_ranges[range].max); - regmap_update_bits(arizona->regmap, - ARIZONA_HEADPHONE_DETECT_1, - ARIZONA_HP_IMPEDANCE_RANGE_MASK, - range << - ARIZONA_HP_IMPEDANCE_RANGE_SHIFT); - return -EAGAIN; - } - + /* Skip up a range, or report? */ if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 && (val >= arizona_hpdet_c_ranges[range].max)) { range++; @@ -459,6 +457,12 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info) ARIZONA_HP_IMPEDANCE_RANGE_SHIFT); return -EAGAIN; } + + if (range && (val < arizona_hpdet_c_ranges[range].min)) { + dev_dbg(arizona->dev, "Reporting range boundary %d\n", + arizona_hpdet_c_ranges[range].min); + val = arizona_hpdet_c_ranges[range].min; + } } dev_dbg(arizona->dev, "HP impedance %d ohms\n", val); @@ -594,9 +598,15 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) dev_err(arizona->dev, "Failed to report HP/line: %d\n", ret); +done: + /* Reset back to starting range */ + regmap_update_bits(arizona->regmap, + ARIZONA_HEADPHONE_DETECT_1, + ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL, + 0); + arizona_extcon_do_magic(info, 0); -done: if (id_gpio) gpio_set_value_cansleep(id_gpio, 0); @@ -765,7 +775,20 @@ static void arizona_micd_detect(struct work_struct *work) mutex_lock(&info->lock); - for (i = 0; i < 10 && !(val & 0x7fc); i++) { + /* If the cable was removed while measuring ignore the result */ + ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL); + if (ret < 0) { + dev_err(arizona->dev, "Failed to check cable state: %d\n", + ret); + mutex_unlock(&info->lock); + return; + } else if (!ret) { + dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n"); + mutex_unlock(&info->lock); + return; + } + + for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) { ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val); if (ret != 0) { dev_err(arizona->dev, @@ -784,7 +807,7 @@ static void arizona_micd_detect(struct work_struct *work) } } - if (i == 10 && !(val & 0x7fc)) { + if (i == 10 && !(val & MICD_LVL_0_TO_8)) { dev_err(arizona->dev, "Failed to get valid MICDET value\n"); mutex_unlock(&info->lock); return; @@ -798,7 +821,7 @@ static void arizona_micd_detect(struct work_struct *work) } /* If we got a high impedence we should have a headset, report it. */ - if (info->detecting && (val & 0x400)) { + if (info->detecting && (val & ARIZONA_MICD_LVL_8)) { arizona_identify_headphone(info); ret = extcon_update_state(&info->edev, @@ -827,7 +850,7 @@ static void arizona_micd_detect(struct work_struct *work) * plain headphones. If both polarities report a low * impedence then give up and report headphones. */ - if (info->detecting && (val & 0x3f8)) { + if (info->detecting && (val & MICD_LVL_1_TO_7)) { if (info->jack_flips >= info->micd_num_modes * 10) { dev_dbg(arizona->dev, "Detected HP/line\n"); arizona_identify_headphone(info); @@ -851,7 +874,7 @@ static void arizona_micd_detect(struct work_struct *work) * If we're still detecting and we detect a short then we've * got a headphone. Otherwise it's a button press. */ - if (val & 0x3fc) { + if (val & MICD_LVL_0_TO_7) { if (info->mic) { dev_dbg(arizona->dev, "Mic button detected\n"); @@ -1126,6 +1149,16 @@ static int arizona_extcon_probe(struct platform_device *pdev) break; } break; + case WM5110: + switch (arizona->rev) { + case 0 ... 2: + break; + default: + info->micd_clamp = true; + info->hpdet_ip = 2; + break; + } + break; default: break; } diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c index 7e0dff58e4943e..a63a6b21c9ad4a 100644 --- a/drivers/extcon/extcon-gpio.c +++ b/drivers/extcon/extcon-gpio.c @@ -40,6 +40,7 @@ struct gpio_extcon_data { int irq; struct delayed_work work; unsigned long debounce_jiffies; + bool check_on_resume; }; static void gpio_extcon_work(struct work_struct *work) @@ -103,8 +104,15 @@ static int gpio_extcon_probe(struct platform_device *pdev) extcon_data->gpio_active_low = pdata->gpio_active_low; extcon_data->state_on = pdata->state_on; extcon_data->state_off = pdata->state_off; + extcon_data->check_on_resume = pdata->check_on_resume; if (pdata->state_on && pdata->state_off) extcon_data->edev.print_state = extcon_gpio_print_state; + + ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, + pdev->name); + if (ret < 0) + return ret; + if (pdata->debounce) { ret = gpio_set_debounce(extcon_data->gpio, pdata->debounce * 1000); @@ -117,11 +125,6 @@ static int gpio_extcon_probe(struct platform_device *pdev) if (ret < 0) return ret; - ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, - pdev->name); - if (ret < 0) - goto err; - INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); extcon_data->irq = gpio_to_irq(extcon_data->gpio); @@ -159,12 +162,31 @@ static int gpio_extcon_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int gpio_extcon_resume(struct device *dev) +{ + struct gpio_extcon_data *extcon_data; + + extcon_data = dev_get_drvdata(dev); + if (extcon_data->check_on_resume) + queue_delayed_work(system_power_efficient_wq, + &extcon_data->work, extcon_data->debounce_jiffies); + + return 0; +} +#endif + +static const struct dev_pm_ops gpio_extcon_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, gpio_extcon_resume) +}; + static struct platform_driver gpio_extcon_driver = { .probe = gpio_extcon_probe, .remove = gpio_extcon_remove, .driver = { .name = "extcon-gpio", .owner = THIS_MODULE, + .pm = &gpio_extcon_pm_ops, }, }; diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c new file mode 100644 index 00000000000000..3846941801b8fd --- /dev/null +++ b/drivers/extcon/extcon-max14577.c @@ -0,0 +1,752 @@ +/* + * extcon-max14577.c - MAX14577 extcon driver to support MAX14577 MUIC + * + * Copyright (C) 2013 Samsung Electrnoics + * Chanwoo Choi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEV_NAME "max14577-muic" +#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */ + +enum max14577_muic_adc_debounce_time { + ADC_DEBOUNCE_TIME_5MS = 0, + ADC_DEBOUNCE_TIME_10MS, + ADC_DEBOUNCE_TIME_25MS, + ADC_DEBOUNCE_TIME_38_62MS, +}; + +enum max14577_muic_status { + MAX14577_MUIC_STATUS1 = 0, + MAX14577_MUIC_STATUS2 = 1, + MAX14577_MUIC_STATUS_END, +}; + +struct max14577_muic_info { + struct device *dev; + struct max14577 *max14577; + struct extcon_dev *edev; + int prev_cable_type; + int prev_chg_type; + u8 status[MAX14577_MUIC_STATUS_END]; + + bool irq_adc; + bool irq_chg; + struct work_struct irq_work; + struct mutex mutex; + + /* + * Use delayed workqueue to detect cable state and then + * notify cable state to notifiee/platform through uevent. + * After completing the booting of platform, the extcon provider + * driver should notify cable state to upper layer. + */ + struct delayed_work wq_detcable; + + /* + * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB + * h/w path of COMP2/COMN1 on CONTROL1 register. + */ + int path_usb; + int path_uart; +}; + +enum max14577_muic_cable_group { + MAX14577_CABLE_GROUP_ADC = 0, + MAX14577_CABLE_GROUP_CHG, +}; + +/** + * struct max14577_muic_irq + * @irq: the index of irq list of MUIC device. + * @name: the name of irq. + * @virq: the virtual irq to use irq domain + */ +struct max14577_muic_irq { + unsigned int irq; + const char *name; + unsigned int virq; +}; + +static struct max14577_muic_irq muic_irqs[] = { + { MAX14577_IRQ_INT1_ADC, "muic-ADC" }, + { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" }, + { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" }, + { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" }, + { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" }, + { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" }, + { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" }, + { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" }, +}; + +/* Define supported accessory type */ +enum max14577_muic_acc_type { + MAX14577_MUIC_ADC_GROUND = 0x0, + MAX14577_MUIC_ADC_SEND_END_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S1_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S2_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S3_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S4_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S5_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S6_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S7_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S8_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S9_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S10_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S11_BUTTON, + MAX14577_MUIC_ADC_REMOTE_S12_BUTTON, + MAX14577_MUIC_ADC_RESERVED_ACC_1, + MAX14577_MUIC_ADC_RESERVED_ACC_2, + MAX14577_MUIC_ADC_RESERVED_ACC_3, + MAX14577_MUIC_ADC_RESERVED_ACC_4, + MAX14577_MUIC_ADC_RESERVED_ACC_5, + MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2, + MAX14577_MUIC_ADC_PHONE_POWERED_DEV, + MAX14577_MUIC_ADC_TTY_CONVERTER, + MAX14577_MUIC_ADC_UART_CABLE, + MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG, + MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF, + MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON, + MAX14577_MUIC_ADC_AV_CABLE_NOLOAD, + MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG, + MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF, + MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON, + MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1, /* with Remote and Simple Ctrl */ + MAX14577_MUIC_ADC_OPEN, +}; + +/* max14577 MUIC device support below list of accessories(external connector) */ +enum { + EXTCON_CABLE_USB = 0, + EXTCON_CABLE_TA, + EXTCON_CABLE_FAST_CHARGER, + EXTCON_CABLE_SLOW_CHARGER, + EXTCON_CABLE_CHARGE_DOWNSTREAM, + EXTCON_CABLE_JIG_USB_ON, + EXTCON_CABLE_JIG_USB_OFF, + EXTCON_CABLE_JIG_UART_OFF, + EXTCON_CABLE_JIG_UART_ON, + + _EXTCON_CABLE_NUM, +}; + +static const char *max14577_extcon_cable[] = { + [EXTCON_CABLE_USB] = "USB", + [EXTCON_CABLE_TA] = "TA", + [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger", + [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger", + [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream", + [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON", + [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF", + [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF", + [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON", + + NULL, +}; + +/* + * max14577_muic_set_debounce_time - Set the debounce time of ADC + * @info: the instance including private data of max14577 MUIC + * @time: the debounce time of ADC + */ +static int max14577_muic_set_debounce_time(struct max14577_muic_info *info, + enum max14577_muic_adc_debounce_time time) +{ + u8 ret; + + switch (time) { + case ADC_DEBOUNCE_TIME_5MS: + case ADC_DEBOUNCE_TIME_10MS: + case ADC_DEBOUNCE_TIME_25MS: + case ADC_DEBOUNCE_TIME_38_62MS: + ret = max14577_update_reg(info->max14577->regmap, + MAX14577_MUIC_REG_CONTROL3, + CTRL3_ADCDBSET_MASK, + time << CTRL3_ADCDBSET_SHIFT); + if (ret) { + dev_err(info->dev, "failed to set ADC debounce time\n"); + return ret; + } + break; + default: + dev_err(info->dev, "invalid ADC debounce time\n"); + return -EINVAL; + } + + return 0; +}; + +/* + * max14577_muic_set_path - Set hardware line according to attached cable + * @info: the instance including private data of max14577 MUIC + * @value: the path according to attached cable + * @attached: the state of cable (true:attached, false:detached) + * + * The max14577 MUIC device share outside H/W line among a varity of cables + * so, this function set internal path of H/W line according to the type of + * attached cable. + */ +static int max14577_muic_set_path(struct max14577_muic_info *info, + u8 val, bool attached) +{ + int ret = 0; + u8 ctrl1, ctrl2 = 0; + + /* Set open state to path before changing hw path */ + ret = max14577_update_reg(info->max14577->regmap, + MAX14577_MUIC_REG_CONTROL1, + CLEAR_IDBEN_MICEN_MASK, CTRL1_SW_OPEN); + if (ret < 0) { + dev_err(info->dev, "failed to update MUIC register\n"); + return ret; + } + + if (attached) + ctrl1 = val; + else + ctrl1 = CTRL1_SW_OPEN; + + ret = max14577_update_reg(info->max14577->regmap, + MAX14577_MUIC_REG_CONTROL1, + CLEAR_IDBEN_MICEN_MASK, ctrl1); + if (ret < 0) { + dev_err(info->dev, "failed to update MUIC register\n"); + return ret; + } + + if (attached) + ctrl2 |= CTRL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */ + else + ctrl2 |= CTRL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */ + + ret = max14577_update_reg(info->max14577->regmap, + MAX14577_REG_CONTROL2, + CTRL2_LOWPWR_MASK | CTRL2_CPEN_MASK, ctrl2); + if (ret < 0) { + dev_err(info->dev, "failed to update MUIC register\n"); + return ret; + } + + dev_dbg(info->dev, + "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n", + ctrl1, ctrl2, attached ? "attached" : "detached"); + + return 0; +} + +/* + * max14577_muic_get_cable_type - Return cable type and check cable state + * @info: the instance including private data of max14577 MUIC + * @group: the path according to attached cable + * @attached: store cable state and return + * + * This function check the cable state either attached or detached, + * and then divide precise type of cable according to cable group. + * - max14577_CABLE_GROUP_ADC + * - max14577_CABLE_GROUP_CHG + */ +static int max14577_muic_get_cable_type(struct max14577_muic_info *info, + enum max14577_muic_cable_group group, bool *attached) +{ + int cable_type = 0; + int adc; + int chg_type; + + switch (group) { + case MAX14577_CABLE_GROUP_ADC: + /* + * Read ADC value to check cable type and decide cable state + * according to cable type + */ + adc = info->status[MAX14577_MUIC_STATUS1] & STATUS1_ADC_MASK; + adc >>= STATUS1_ADC_SHIFT; + + /* + * Check current cable state/cable type and store cable type + * (info->prev_cable_type) for handling cable when cable is + * detached. + */ + if (adc == MAX14577_MUIC_ADC_OPEN) { + *attached = false; + + cable_type = info->prev_cable_type; + info->prev_cable_type = MAX14577_MUIC_ADC_OPEN; + } else { + *attached = true; + + cable_type = info->prev_cable_type = adc; + } + break; + case MAX14577_CABLE_GROUP_CHG: + /* + * Read charger type to check cable type and decide cable state + * according to type of charger cable. + */ + chg_type = info->status[MAX14577_MUIC_STATUS2] & + STATUS2_CHGTYP_MASK; + chg_type >>= STATUS2_CHGTYP_SHIFT; + + if (chg_type == MAX14577_CHARGER_TYPE_NONE) { + *attached = false; + + cable_type = info->prev_chg_type; + info->prev_chg_type = MAX14577_CHARGER_TYPE_NONE; + } else { + *attached = true; + + /* + * Check current cable state/cable type and store cable + * type(info->prev_chg_type) for handling cable when + * charger cable is detached. + */ + cable_type = info->prev_chg_type = chg_type; + } + + break; + default: + dev_err(info->dev, "Unknown cable group (%d)\n", group); + cable_type = -EINVAL; + break; + } + + return cable_type; +} + +static int max14577_muic_jig_handler(struct max14577_muic_info *info, + int cable_type, bool attached) +{ + char cable_name[32]; + int ret = 0; + u8 path = CTRL1_SW_OPEN; + + dev_dbg(info->dev, + "external connector is %s (adc:0x%02x)\n", + attached ? "attached" : "detached", cable_type); + + switch (cable_type) { + case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */ + /* PATH:AP_USB */ + strcpy(cable_name, "JIG-USB-OFF"); + path = CTRL1_SW_USB; + break; + case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */ + /* PATH:AP_USB */ + strcpy(cable_name, "JIG-USB-ON"); + path = CTRL1_SW_USB; + break; + case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */ + /* PATH:AP_UART */ + strcpy(cable_name, "JIG-UART-OFF"); + path = CTRL1_SW_UART; + break; + default: + dev_err(info->dev, "failed to detect %s jig cable\n", + attached ? "attached" : "detached"); + return -EINVAL; + } + + ret = max14577_muic_set_path(info, path, attached); + if (ret < 0) + return ret; + + extcon_set_cable_state(info->edev, cable_name, attached); + + return 0; +} + +static int max14577_muic_adc_handler(struct max14577_muic_info *info) +{ + int cable_type; + bool attached; + int ret = 0; + + /* Check accessory state which is either detached or attached */ + cable_type = max14577_muic_get_cable_type(info, + MAX14577_CABLE_GROUP_ADC, &attached); + + dev_dbg(info->dev, + "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n", + attached ? "attached" : "detached", cable_type, + info->prev_cable_type); + + switch (cable_type) { + case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: + case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: + case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: + /* JIG */ + ret = max14577_muic_jig_handler(info, cable_type, attached); + if (ret < 0) + return ret; + break; + case MAX14577_MUIC_ADC_GROUND: + case MAX14577_MUIC_ADC_SEND_END_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S1_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S2_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S3_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S4_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S5_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S6_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S7_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S8_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S9_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S10_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S11_BUTTON: + case MAX14577_MUIC_ADC_REMOTE_S12_BUTTON: + case MAX14577_MUIC_ADC_RESERVED_ACC_1: + case MAX14577_MUIC_ADC_RESERVED_ACC_2: + case MAX14577_MUIC_ADC_RESERVED_ACC_3: + case MAX14577_MUIC_ADC_RESERVED_ACC_4: + case MAX14577_MUIC_ADC_RESERVED_ACC_5: + case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2: + case MAX14577_MUIC_ADC_PHONE_POWERED_DEV: + case MAX14577_MUIC_ADC_TTY_CONVERTER: + case MAX14577_MUIC_ADC_UART_CABLE: + case MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG: + case MAX14577_MUIC_ADC_AV_CABLE_NOLOAD: + case MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG: + case MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON: + case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1: + /* + * This accessory isn't used in general case if it is specially + * needed to detect additional accessory, should implement + * proper operation when this accessory is attached/detached. + */ + dev_info(info->dev, + "accessory is %s but it isn't used (adc:0x%x)\n", + attached ? "attached" : "detached", cable_type); + return -EAGAIN; + default: + dev_err(info->dev, + "failed to detect %s accessory (adc:0x%x)\n", + attached ? "attached" : "detached", cable_type); + return -EINVAL; + } + + return 0; +} + +static int max14577_muic_chg_handler(struct max14577_muic_info *info) +{ + int chg_type; + bool attached; + int ret = 0; + + chg_type = max14577_muic_get_cable_type(info, + MAX14577_CABLE_GROUP_CHG, &attached); + + dev_dbg(info->dev, + "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n", + attached ? "attached" : "detached", + chg_type, info->prev_chg_type); + + switch (chg_type) { + case MAX14577_CHARGER_TYPE_USB: + /* PATH:AP_USB */ + ret = max14577_muic_set_path(info, info->path_usb, attached); + if (ret < 0) + return ret; + + extcon_set_cable_state(info->edev, "USB", attached); + break; + case MAX14577_CHARGER_TYPE_DEDICATED_CHG: + extcon_set_cable_state(info->edev, "TA", attached); + break; + case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT: + extcon_set_cable_state(info->edev, + "Charge-downstream", attached); + break; + case MAX14577_CHARGER_TYPE_SPECIAL_500MA: + extcon_set_cable_state(info->edev, "Slow-charger", attached); + break; + case MAX14577_CHARGER_TYPE_SPECIAL_1A: + extcon_set_cable_state(info->edev, "Fast-charger", attached); + break; + case MAX14577_CHARGER_TYPE_NONE: + case MAX14577_CHARGER_TYPE_DEAD_BATTERY: + break; + default: + dev_err(info->dev, + "failed to detect %s accessory (chg_type:0x%x)\n", + attached ? "attached" : "detached", chg_type); + return -EINVAL; + } + + return 0; +} + +static void max14577_muic_irq_work(struct work_struct *work) +{ + struct max14577_muic_info *info = container_of(work, + struct max14577_muic_info, irq_work); + int ret = 0; + + if (!info->edev) + return; + + mutex_lock(&info->mutex); + + ret = max14577_bulk_read(info->max14577->regmap, + MAX14577_MUIC_REG_STATUS1, info->status, 2); + if (ret) { + dev_err(info->dev, "failed to read MUIC register\n"); + mutex_unlock(&info->mutex); + return; + } + + if (info->irq_adc) { + ret = max14577_muic_adc_handler(info); + info->irq_adc = false; + } + if (info->irq_chg) { + ret = max14577_muic_chg_handler(info); + info->irq_chg = false; + } + + if (ret < 0) + dev_err(info->dev, "failed to handle MUIC interrupt\n"); + + mutex_unlock(&info->mutex); + + return; +} + +static irqreturn_t max14577_muic_irq_handler(int irq, void *data) +{ + struct max14577_muic_info *info = data; + int i, irq_type = -1; + + /* + * We may be called multiple times for different nested IRQ-s. + * Including changes in INT1_ADC and INT2_CGHTYP at once. + * However we only need to know whether it was ADC, charger + * or both interrupts so decode IRQ and turn on proper flags. + */ + for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) + if (irq == muic_irqs[i].virq) + irq_type = muic_irqs[i].irq; + + switch (irq_type) { + case MAX14577_IRQ_INT1_ADC: + case MAX14577_IRQ_INT1_ADCLOW: + case MAX14577_IRQ_INT1_ADCERR: + /* Handle all of accessory except for + type of charger accessory */ + info->irq_adc = true; + break; + case MAX14577_IRQ_INT2_CHGTYP: + case MAX14577_IRQ_INT2_CHGDETRUN: + case MAX14577_IRQ_INT2_DCDTMR: + case MAX14577_IRQ_INT2_DBCHG: + case MAX14577_IRQ_INT2_VBVOLT: + /* Handle charger accessory */ + info->irq_chg = true; + break; + default: + dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n", + irq_type); + return IRQ_HANDLED; + } + schedule_work(&info->irq_work); + + return IRQ_HANDLED; +} + +static int max14577_muic_detect_accessory(struct max14577_muic_info *info) +{ + int ret = 0; + int adc; + int chg_type; + bool attached; + + mutex_lock(&info->mutex); + + /* Read STATUSx register to detect accessory */ + ret = max14577_bulk_read(info->max14577->regmap, + MAX14577_MUIC_REG_STATUS1, info->status, 2); + if (ret) { + dev_err(info->dev, "failed to read MUIC register\n"); + mutex_unlock(&info->mutex); + return ret; + } + + adc = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_ADC, + &attached); + if (attached && adc != MAX14577_MUIC_ADC_OPEN) { + ret = max14577_muic_adc_handler(info); + if (ret < 0) { + dev_err(info->dev, "Cannot detect accessory\n"); + mutex_unlock(&info->mutex); + return ret; + } + } + + chg_type = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_CHG, + &attached); + if (attached && chg_type != MAX14577_CHARGER_TYPE_NONE) { + ret = max14577_muic_chg_handler(info); + if (ret < 0) { + dev_err(info->dev, "Cannot detect charger accessory\n"); + mutex_unlock(&info->mutex); + return ret; + } + } + + mutex_unlock(&info->mutex); + + return 0; +} + +static void max14577_muic_detect_cable_wq(struct work_struct *work) +{ + struct max14577_muic_info *info = container_of(to_delayed_work(work), + struct max14577_muic_info, wq_detcable); + + max14577_muic_detect_accessory(info); +} + +static int max14577_muic_probe(struct platform_device *pdev) +{ + struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); + struct max14577_muic_info *info; + int delay_jiffies; + int ret; + int i; + u8 id; + + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + info->dev = &pdev->dev; + info->max14577 = max14577; + + platform_set_drvdata(pdev, info); + mutex_init(&info->mutex); + + INIT_WORK(&info->irq_work, max14577_muic_irq_work); + + /* Support irq domain for max14577 MUIC device */ + for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) { + struct max14577_muic_irq *muic_irq = &muic_irqs[i]; + unsigned int virq = 0; + + virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq); + if (!virq) + return -EINVAL; + muic_irq->virq = virq; + + ret = devm_request_threaded_irq(&pdev->dev, virq, NULL, + max14577_muic_irq_handler, + IRQF_NO_SUSPEND, + muic_irq->name, info); + if (ret) { + dev_err(&pdev->dev, + "failed: irq request (IRQ: %d," + " error :%d)\n", + muic_irq->irq, ret); + return ret; + } + } + + /* Initialize extcon device */ + info->edev = devm_kzalloc(&pdev->dev, sizeof(*info->edev), GFP_KERNEL); + if (!info->edev) { + dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); + return -ENOMEM; + } + info->edev->name = DEV_NAME; + info->edev->supported_cable = max14577_extcon_cable; + ret = extcon_dev_register(info->edev); + if (ret) { + dev_err(&pdev->dev, "failed to register extcon device\n"); + return ret; + } + + /* Default h/w line path */ + info->path_usb = CTRL1_SW_USB; + info->path_uart = CTRL1_SW_UART; + delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); + + /* Set initial path for UART */ + max14577_muic_set_path(info, info->path_uart, true); + + /* Check revision number of MUIC device*/ + ret = max14577_read_reg(info->max14577->regmap, + MAX14577_REG_DEVICEID, &id); + if (ret < 0) { + dev_err(&pdev->dev, "failed to read revision number\n"); + goto err_extcon; + } + dev_info(info->dev, "device ID : 0x%x\n", id); + + /* Set ADC debounce time */ + max14577_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS); + + /* + * Detect accessory after completing the initialization of platform + * + * - Use delayed workqueue to detect cable state and then + * notify cable state to notifiee/platform through uevent. + * After completing the booting of platform, the extcon provider + * driver should notify cable state to upper layer. + */ + INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq); + ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, + delay_jiffies); + if (ret < 0) { + dev_err(&pdev->dev, + "failed to schedule delayed work for cable detect\n"); + goto err_extcon; + } + + return ret; + +err_extcon: + extcon_dev_unregister(info->edev); + return ret; +} + +static int max14577_muic_remove(struct platform_device *pdev) +{ + struct max14577_muic_info *info = platform_get_drvdata(pdev); + + cancel_work_sync(&info->irq_work); + extcon_dev_unregister(info->edev); + + return 0; +} + +static struct platform_driver max14577_muic_driver = { + .driver = { + .name = DEV_NAME, + .owner = THIS_MODULE, + }, + .probe = max14577_muic_probe, + .remove = max14577_muic_remove, +}; + +module_platform_driver(max14577_muic_driver); + +MODULE_DESCRIPTION("MAXIM 14577 Extcon driver"); +MODULE_AUTHOR("Chanwoo Choi "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:extcon-max14577"); diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 6c91976dd82371..2aea4bcdd7f394 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -78,20 +78,24 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb) static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb) { - unsigned int set; + unsigned int set, id_src; struct palmas_usb *palmas_usb = _palmas_usb; palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE, PALMAS_USB_ID_INT_LATCH_SET, &set); + palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE, + PALMAS_USB_ID_INT_SRC, &id_src); - if (set & PALMAS_USB_ID_INT_SRC_ID_GND) { + if ((set & PALMAS_USB_ID_INT_SRC_ID_GND) && + (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) { palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, PALMAS_USB_ID_INT_LATCH_CLR, PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND); palmas_usb->linkstat = PALMAS_USB_STATE_ID; extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true); dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); - } else if (set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) { + } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) && + (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) { palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, PALMAS_USB_ID_INT_LATCH_CLR, PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT); @@ -103,6 +107,11 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb) palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false); dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); + } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) && + (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) { + palmas_usb->linkstat = PALMAS_USB_STATE_ID; + extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true); + dev_info(palmas_usb->dev, " USB-HOST cable is attached\n"); } return IRQ_HANDLED; @@ -269,7 +278,9 @@ static const struct dev_pm_ops palmas_pm_ops = { static struct of_device_id of_palmas_match_tbl[] = { { .compatible = "ti,palmas-usb", }, + { .compatible = "ti,palmas-usb-vid", }, { .compatible = "ti,twl6035-usb", }, + { .compatible = "ti,twl6035-usb-vid", }, { /* end */ } }; diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 299fad6b586728..5373dc5b60114f 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ obj-$(CONFIG_EFI) += efi/ +obj-$(CONFIG_UEFI_CPER) += efi/ diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c index eb26d62e5188a9..e0f1cb3d3598da 100644 --- a/drivers/firmware/dmi-sysfs.c +++ b/drivers/firmware/dmi-sysfs.c @@ -553,7 +553,7 @@ static const struct bin_attribute dmi_entry_raw_attr = { static void dmi_sysfs_entry_release(struct kobject *kobj) { struct dmi_sysfs_entry *entry = to_entry(kobj); - sysfs_remove_bin_file(&entry->kobj, &dmi_entry_raw_attr); + spin_lock(&entry_list_lock); list_del(&entry->list); spin_unlock(&entry_list_lock); @@ -685,6 +685,7 @@ static void __exit dmi_sysfs_exit(void) pr_debug("dmi-sysfs: unloading.\n"); cleanup_entry_list(); kset_unregister(dmi_kset); + kobject_del(dmi_kobj); kobject_put(dmi_kobj); } diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 3150aa4874e8f6..1e75f48b61f8a1 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -36,7 +36,18 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE backend for pstore by default. This setting can be overridden using the efivars module's pstore_disable parameter. -config UEFI_CPER - def_bool n +config EFI_RUNTIME_MAP + bool "Export efi runtime maps to sysfs" + depends on X86 && EFI && KEXEC + default y + help + Export efi runtime memory maps to /sys/firmware/efi/runtime-map. + That memory map is used for example by kexec to set up efi virtual + mapping the 2nd kernel, but can also be used for debugging purposes. + + See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. endmenu + +config UEFI_CPER + bool diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 9ba156d3c7755e..9553496b0f43ed 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -1,7 +1,8 @@ # # Makefile for linux kernel # -obj-y += efi.o vars.o +obj-$(CONFIG_EFI) += efi.o vars.o obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o +obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 2e2fbdec084541..4753bac6527985 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -32,6 +32,9 @@ struct efi __read_mostly efi = { .hcdp = EFI_INVALID_TABLE_ADDR, .uga = EFI_INVALID_TABLE_ADDR, .uv_systab = EFI_INVALID_TABLE_ADDR, + .fw_vendor = EFI_INVALID_TABLE_ADDR, + .runtime = EFI_INVALID_TABLE_ADDR, + .config_table = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); @@ -71,13 +74,49 @@ static ssize_t systab_show(struct kobject *kobj, static struct kobj_attribute efi_attr_systab = __ATTR(systab, 0400, systab_show, NULL); +#define EFI_FIELD(var) efi.var + +#define EFI_ATTR_SHOW(name) \ +static ssize_t name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \ +} + +EFI_ATTR_SHOW(fw_vendor); +EFI_ATTR_SHOW(runtime); +EFI_ATTR_SHOW(config_table); + +static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor); +static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime); +static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table); + static struct attribute *efi_subsys_attrs[] = { &efi_attr_systab.attr, - NULL, /* maybe more in the future? */ + &efi_attr_fw_vendor.attr, + &efi_attr_runtime.attr, + &efi_attr_config_table.attr, + NULL, }; +static umode_t efi_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + umode_t mode = attr->mode; + + if (attr == &efi_attr_fw_vendor.attr) + return (efi.fw_vendor == EFI_INVALID_TABLE_ADDR) ? 0 : mode; + else if (attr == &efi_attr_runtime.attr) + return (efi.runtime == EFI_INVALID_TABLE_ADDR) ? 0 : mode; + else if (attr == &efi_attr_config_table.attr) + return (efi.config_table == EFI_INVALID_TABLE_ADDR) ? 0 : mode; + + return mode; +} + static struct attribute_group efi_subsys_attr_group = { .attrs = efi_subsys_attrs, + .is_visible = efi_attr_is_visible, }; static struct efivars generic_efivars; @@ -128,6 +167,10 @@ static int __init efisubsys_init(void) goto err_unregister; } + error = efi_runtime_map_init(efi_kobj); + if (error) + goto err_remove_group; + /* and the standard mountpoint for efivarfs */ efivars_kobj = kobject_create_and_add("efivars", efi_kobj); if (!efivars_kobj) { diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c new file mode 100644 index 00000000000000..97cdd16a2169a1 --- /dev/null +++ b/drivers/firmware/efi/runtime-map.c @@ -0,0 +1,181 @@ +/* + * linux/drivers/efi/runtime-map.c + * Copyright (C) 2013 Red Hat, Inc., Dave Young + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include +#include +#include + +#include + +static void *efi_runtime_map; +static int nr_efi_runtime_map; +static u32 efi_memdesc_size; + +struct efi_runtime_map_entry { + efi_memory_desc_t md; + struct kobject kobj; /* kobject for each entry */ +}; + +static struct efi_runtime_map_entry **map_entries; + +struct map_attribute { + struct attribute attr; + ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf); +}; + +static inline struct map_attribute *to_map_attr(struct attribute *attr) +{ + return container_of(attr, struct map_attribute, attr); +} + +static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); +} + +#define EFI_RUNTIME_FIELD(var) entry->md.var + +#define EFI_RUNTIME_U64_ATTR_SHOW(name) \ +static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \ +{ \ + return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \ +} + +EFI_RUNTIME_U64_ATTR_SHOW(phys_addr); +EFI_RUNTIME_U64_ATTR_SHOW(virt_addr); +EFI_RUNTIME_U64_ATTR_SHOW(num_pages); +EFI_RUNTIME_U64_ATTR_SHOW(attribute); + +static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj) +{ + return container_of(kobj, struct efi_runtime_map_entry, kobj); +} + +static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct efi_runtime_map_entry *entry = to_map_entry(kobj); + struct map_attribute *map_attr = to_map_attr(attr); + + return map_attr->show(entry, buf); +} + +static struct map_attribute map_type_attr = __ATTR_RO(type); +static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr); +static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr); +static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages); +static struct map_attribute map_attribute_attr = __ATTR_RO(attribute); + +/* + * These are default attributes that are added for every memmap entry. + */ +static struct attribute *def_attrs[] = { + &map_type_attr.attr, + &map_phys_addr_attr.attr, + &map_virt_addr_attr.attr, + &map_num_pages_attr.attr, + &map_attribute_attr.attr, + NULL +}; + +static const struct sysfs_ops map_attr_ops = { + .show = map_attr_show, +}; + +static void map_release(struct kobject *kobj) +{ + struct efi_runtime_map_entry *entry; + + entry = to_map_entry(kobj); + kfree(entry); +} + +static struct kobj_type __refdata map_ktype = { + .sysfs_ops = &map_attr_ops, + .default_attrs = def_attrs, + .release = map_release, +}; + +static struct kset *map_kset; + +static struct efi_runtime_map_entry * +add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) +{ + int ret; + struct efi_runtime_map_entry *entry; + + if (!map_kset) { + map_kset = kset_create_and_add("runtime-map", NULL, kobj); + if (!map_kset) + return ERR_PTR(-ENOMEM); + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + kset_unregister(map_kset); + return entry; + } + + memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, + sizeof(efi_memory_desc_t)); + + kobject_init(&entry->kobj, &map_ktype); + entry->kobj.kset = map_kset; + ret = kobject_add(&entry->kobj, NULL, "%d", nr); + if (ret) { + kobject_put(&entry->kobj); + kset_unregister(map_kset); + return ERR_PTR(ret); + } + + return entry; +} + +void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) +{ + efi_runtime_map = map; + nr_efi_runtime_map = nr_entries; + efi_memdesc_size = desc_size; +} + +int __init efi_runtime_map_init(struct kobject *efi_kobj) +{ + int i, j, ret = 0; + struct efi_runtime_map_entry *entry; + + if (!efi_runtime_map) + return 0; + + map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL); + if (!map_entries) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < nr_efi_runtime_map; i++) { + entry = add_sysfs_runtime_map_entry(efi_kobj, i); + if (IS_ERR(entry)) { + ret = PTR_ERR(entry); + goto out_add_entry; + } + *(map_entries + i) = entry; + } + + return 0; +out_add_entry: + for (j = i - 1; j > 0; j--) { + entry = *(map_entries + j); + kobject_put(&entry->kobj); + } + if (map_kset) + kset_unregister(map_kset); +out: + return ret; +} diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 85f772c0b26a61..c8a7c810bade26 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -393,7 +393,7 @@ static const DEVICE_ATTR(value, 0644, static irqreturn_t gpio_sysfs_irq(int irq, void *priv) { - struct sysfs_dirent *value_sd = priv; + struct kernfs_node *value_sd = priv; sysfs_notify_dirent(value_sd); return IRQ_HANDLED; @@ -402,7 +402,7 @@ static irqreturn_t gpio_sysfs_irq(int irq, void *priv) static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, unsigned long gpio_flags) { - struct sysfs_dirent *value_sd; + struct kernfs_node *value_sd; unsigned long irq_flags; int ret, irq, id; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 85071a1c454792..b0733153dfd294 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector) /* if equal delete the probed mode */ mode->status = pmode->status; /* Merge type bits together */ - mode->type = pmode->type; + mode->type |= pmode->type; list_del(&pmode->head); drm_mode_destroy(connector->dev, pmode); break; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c79dd2b1f70ecc..d3c3b5b15824ee 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -906,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, WARN_ON(readq(>t_entries[i-1]) != gen8_pte_encode(addr, level, true)); -#if 0 /* TODO: Still needed on GEN8? */ /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates * have finished. */ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); -#endif } /* diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5d1dedc02f159c..f13d5edc39d56c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2713,6 +2713,8 @@ static void gen8_irq_preinstall(struct drm_device *dev) #undef GEN8_IRQ_INIT_NDX POSTING_READ(GEN8_PCU_IIR); + + ibx_irq_preinstall(dev); } static void ibx_hpd_irq_setup(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 526c8ded16b03b..b69dc3e66c165a 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1057,12 +1057,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev) enum pipe pipe; struct intel_crtc *intel_crtc; + dev_priv->ddi_plls.spll_refcount = 0; + dev_priv->ddi_plls.wrpll1_refcount = 0; + dev_priv->ddi_plls.wrpll2_refcount = 0; + for_each_pipe(pipe) { intel_crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - if (!intel_crtc->active) + if (!intel_crtc->active) { + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; continue; + } intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, pipe); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 54e82a80cf507f..2bde35d34eb99d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = { /* Sony Vaio Y cannot use SSC on LVDS */ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, - /* - * All GM45 Acer (and its brands eMachines and Packard Bell) laptops - * seem to use inverted backlight PWM. - */ - { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, + /* Acer Aspire 5734Z must invert backlight brightness */ + { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, + + /* Acer/eMachines G725 */ + { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, + + /* Acer/eMachines e725 */ + { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, + + /* Acer/Packard Bell NCL20 */ + { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, + + /* Acer Aspire 4736Z */ + { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, /* Dell XPS13 HD Sandy Bridge */ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, @@ -11044,10 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_setup_overlay(dev); - drm_modeset_lock_all(dev); + mutex_lock(&dev->mode_config.mutex); drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, false); - drm_modeset_unlock_all(dev); + mutex_unlock(&dev->mode_config.mutex); } void intel_modeset_cleanup(struct drm_device *dev) diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c index 48f06378d3f9d5..2ea5568b6cf59b 100644 --- a/drivers/gpu/drm/nouveau/core/core/subdev.c +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c @@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent, if (parent) { struct nouveau_device *device = nv_device(parent); - int subidx = nv_hclass(subdev) & 0xff; - subdev->debug = nouveau_dbgopt(device->dbgopt, subname); subdev->mmio = nv_subdev(device)->mmio; - device->subdev[subidx] = *pobject; } return 0; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c index 9135b25a29d09a..dd01c6c435d6e2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c @@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent, if (ret) return ret; + device->subdev[i] = devobj->subdev[i]; + /* note: can't init *any* subdevs until devinit has been run * due to not knowing exactly what the vbios init tables will * mess with. devinit also can't be run until all of its diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index 8d06eef2b9ee02..dbc5e33de94f8b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c @@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c index 434bb4b0fa2e65..5c8a63dc506aaf 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c @@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds) while ((mthd = &mthds[i++]) && (init = mthd->init)) { u32 addr = 0x80000000 | mthd->oclass; for (data = 0; init->count; init++) { - if (data != init->data) { + if (init == mthd->init || data != init->data) { nv_wr32(priv, 0x40448c, init->data); data = init->data; } diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h index 8541aa382ff224..d89dbdf39b0db5 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h @@ -75,6 +75,11 @@ struct nouveau_fb { static inline struct nouveau_fb * nouveau_fb(void *obj) { + /* fbram uses this before device subdev pointer is valid */ + if (nv_iclass(obj, NV_SUBDEV_CLASS) && + nv_subidx(obj) == NVDEV_SUBDEV_FB) + return obj; + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; } diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h index 9fa5da72387192..7f50a858b16f68 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h @@ -73,7 +73,7 @@ struct nouveau_i2c { int (*identify)(struct nouveau_i2c *, int index, const char *what, struct nouveau_i2c_board_info *, bool (*match)(struct nouveau_i2c_port *, - struct i2c_board_info *)); + struct i2c_board_info *, void *), void *); struct list_head ports; }; diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h index ec7a54e91a087c..4aca33887aaa63 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h @@ -50,6 +50,13 @@ struct nouveau_instmem { static inline struct nouveau_instmem * nouveau_instmem(void *obj) { + /* nv04/nv40 impls need to create objects in their constructor, + * which is before the subdev pointer is valid + */ + if (nv_iclass(obj, NV_SUBDEV_CLASS) && + nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM) + return obj; + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index 420908cb82b613..df1b1b42309337 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -365,13 +365,13 @@ static u16 init_script(struct nouveau_bios *bios, int index) { struct nvbios_init init = { .bios = bios }; - u16 data; + u16 bmp_ver = bmp_version(bios), data; - if (bmp_version(bios) && bmp_version(bios) < 0x0510) { - if (index > 1) + if (bmp_ver && bmp_ver < 0x0510) { + if (index > 1 || bmp_ver < 0x0100) return 0x0000; - data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); + data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18); return nv_ro16(bios, data + (index * 2)); } @@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init) u16 offset = nv_ro16(bios, init->offset + 1); trace("JUMP\t0x%04x\n", offset); - init->offset = offset; + + if (init_exec(init)) + init->offset = offset; + else + init->offset += 3; } /** diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c index 041fd5edaebf7d..c33c03d2f4af2f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c @@ -197,7 +197,7 @@ static int nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, struct nouveau_i2c_board_info *info, bool (*match)(struct nouveau_i2c_port *, - struct i2c_board_info *)) + struct i2c_board_info *, void *), void *data) { struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); int i; @@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, } if (nv_probe_i2c(port, info[i].dev.addr) && - (!match || match(port, &info[i].dev))) { + (!match || match(port, &info[i].dev, data))) { nv_info(i2c, "detected %s: %s\n", what, info[i].dev.type); return i; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c index af129c2e811388..64f8b4702bf727 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c @@ -100,7 +100,7 @@ mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info) static int mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb) { - struct nouveau_mxm *mxm = nouveau_mxm(bios); + struct nouveau_mxm *mxm = data; struct context ctx = { .outp = (u32 *)(bios->data + pdcb) }; u8 type, i2cidx, link, ver, len; u8 *conn; @@ -199,7 +199,7 @@ mxm_dcb_sanitise(struct nouveau_mxm *mxm) return; } - dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry); + dcb_outp_foreach(bios, mxm, mxm_dcb_sanitise_entry); mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c index e44ed7b93c6d88..7610fc5f8fa256 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c @@ -29,9 +29,9 @@ static bool probe_monitoring_device(struct nouveau_i2c_port *i2c, - struct i2c_board_info *info) + struct i2c_board_info *info, void *data) { - struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); + struct nouveau_therm_priv *priv = data; struct nvbios_therm_sensor *sensor = &priv->bios_sensor; struct i2c_client *client; @@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm) }; i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", - board, probe_monitoring_device); + board, probe_monitoring_device, therm); if (priv->ic) return; } @@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm) }; i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", - board, probe_monitoring_device); + board, probe_monitoring_device, therm); if (priv->ic) return; } @@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm) device. Let's try our static list. */ i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", - nv_board_infos, probe_monitoring_device); + nv_board_infos, probe_monitoring_device, therm); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 936a71c5908076..7fdc51e2a571be 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) get_tmds_slave(encoder)) return; - type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL); + type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL); if (type < 0) return; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index cc4b208ce546d5..244822df8ffc73 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c @@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index) struct nouveau_i2c *i2c = nouveau_i2c(drm->device); return i2c->identify(i2c, i2c_index, "TV encoder", - nv04_tv_encoder_info, NULL); + nv04_tv_encoder_info, NULL, NULL); } diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 6828d81ed7b99d..900fae01793e78 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; + info->offset = ntfy->node->offset; + done: if (ret) nouveau_abi16_ntfy_fini(chan, ntfy); diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 95c740454049ad..ba0183fb84f3b8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -51,6 +51,7 @@ static struct nouveau_dsm_priv { bool dsm_detected; bool optimus_detected; acpi_handle dhandle; + acpi_handle other_handle; acpi_handle rom_handle; } nouveau_dsm_priv; @@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) if (!dhandle) return false; - if (!acpi_has_method(dhandle, "_DSM")) + if (!acpi_has_method(dhandle, "_DSM")) { + nouveau_dsm_priv.other_handle = dhandle; return false; - + } if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) retval |= NOUVEAU_DSM_HAS_MUX; @@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void) printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", acpi_method_name); nouveau_dsm_priv.dsm_detected = true; + /* + * On some systems hotplug events are generated for the device + * being switched off when _DSM is executed. They cause ACPI + * hotplug to trigger and attempt to remove the device from + * the system, which causes it to break down. Prevent that from + * happening by setting the no_hotplug flag for the involved + * ACPI device objects. + */ + acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle); + acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle); ret = true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 29c3efdfc7dd71..25ea82f8def3cb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ret = nouveau_fence_sync(fence, chan); nouveau_fence_unref(&fence); if (ret) - return ret; + goto fail_free; if (new_bo != old_bo) { ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b1970596a782a4..0b9621c9aeea3b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, } if (tiling_flags & RADEON_TILING_MACRO) { - if (rdev->family >= CHIP_BONAIRE) - tmp = rdev->config.cik.tile_config; - else if (rdev->family >= CHIP_TAHITI) - tmp = rdev->config.si.tile_config; - else if (rdev->family >= CHIP_CAYMAN) - tmp = rdev->config.cayman.tile_config; - else - tmp = rdev->config.evergreen.tile_config; + evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); - switch ((tmp & 0xf0) >> 4) { - case 0: /* 4 banks */ - fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); - break; - case 1: /* 8 banks */ - default: - fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); - break; - case 2: /* 16 banks */ - fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); - break; + /* Set NUM_BANKS. */ + if (rdev->family >= CHIP_BONAIRE) { + unsigned tileb, index, num_banks, tile_split_bytes; + + /* Calculate the macrotile mode index. */ + tile_split_bytes = 64 << tile_split; + tileb = 8 * 8 * target_fb->bits_per_pixel / 8; + tileb = min(tile_split_bytes, tileb); + + for (index = 0; tileb > 64; index++) { + tileb >>= 1; + } + + if (index >= 16) { + DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", + target_fb->bits_per_pixel, tile_split); + return -EINVAL; + } + + num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; + fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); + } else { + /* SI and older. */ + if (rdev->family >= CHIP_TAHITI) + tmp = rdev->config.si.tile_config; + else if (rdev->family >= CHIP_CAYMAN) + tmp = rdev->config.cayman.tile_config; + else + tmp = rdev->config.evergreen.tile_config; + + switch ((tmp & 0xf0) >> 4) { + case 0: /* 4 banks */ + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); + break; + case 1: /* 8 banks */ + default: + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); + break; + case 2: /* 16 banks */ + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); + break; + } } fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); - - evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); @@ -1180,19 +1202,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); if (rdev->family >= CHIP_BONAIRE) { - u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; - u32 num_rb = rdev->config.cik.max_backends_per_se; - if (num_pipe_configs > 8) - num_pipe_configs = 8; - if (num_pipe_configs == 8) - fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); - else if (num_pipe_configs == 4) { - if (num_rb == 4) - fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16); - else if (num_rb < 4) - fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16); - } else if (num_pipe_configs == 2) - fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2); + /* Read the pipe config from the 2D TILED SCANOUT mode. + * It should be the same for the other modes too, but not all + * modes set the pipe config field. */ + u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f; + + fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config); } else if ((rdev->family == CHIP_TAHITI) || (rdev->family == CHIP_PITCAIRN)) fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b43a3a3c906719..e950fabd7f5e47 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width) * Returns the disabled RB bitmask. */ static u32 cik_get_rb_disabled(struct radeon_device *rdev, - u32 max_rb_num, u32 se_num, + u32 max_rb_num_per_se, u32 sh_per_se) { u32 data, mask; @@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev, data >>= BACKEND_DISABLE_SHIFT; - mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); + mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se); return data & mask; } @@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev, */ static void cik_setup_rb(struct radeon_device *rdev, u32 se_num, u32 sh_per_se, - u32 max_rb_num) + u32 max_rb_num_per_se) { int i, j; u32 data, mask; @@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev, for (i = 0; i < se_num; i++) { for (j = 0; j < sh_per_se; j++) { cik_select_se_sh(rdev, i, j); - data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); + data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); if (rdev->family == CHIP_HAWAII) disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); else @@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev, cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); mask = 1; - for (i = 0; i < max_rb_num; i++) { + for (i = 0; i < max_rb_num_per_se * se_num; i++) { if (!(disabled_rbs & mask)) enabled_rbs |= mask; mask <<= 1; } + rdev->config.cik.backend_enable_mask = enabled_rbs; + for (i = 0; i < se_num; i++) { cik_select_se_sh(rdev, i, 0xffffffff); data = 0; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b1f990d0eaa101..45e1f447bc794c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1940,7 +1940,7 @@ struct si_asic { unsigned sc_earlyz_tile_fifo_size; unsigned num_tile_pipes; - unsigned num_backends_per_se; + unsigned backend_enable_mask; unsigned backend_disable_mask_per_asic; unsigned backend_map; unsigned num_texture_channel_caches; @@ -1970,7 +1970,7 @@ struct cik_asic { unsigned sc_earlyz_tile_fifo_size; unsigned num_tile_pipes; - unsigned num_backends_per_se; + unsigned backend_enable_mask; unsigned backend_disable_mask_per_asic; unsigned backend_map; unsigned num_texture_channel_caches; diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 9d302eaeea1587..485848f889f55c 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -33,6 +33,7 @@ static struct radeon_atpx_priv { bool atpx_detected; /* handle for device - and atpx */ acpi_handle dhandle; + acpi_handle other_handle; struct radeon_atpx atpx; } radeon_atpx_priv; @@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) return false; status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { + radeon_atpx_priv.other_handle = dhandle; return false; - + } radeon_atpx_priv.dhandle = dhandle; radeon_atpx_priv.atpx.handle = atpx_handle; return true; @@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void) printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", acpi_method_name); radeon_atpx_priv.atpx_detected = true; + /* + * On some systems hotplug events are generated for the device + * being switched off when ATPX is executed. They cause ACPI + * hotplug to trigger and attempt to remove the device from + * the system, which causes it to break down. Prevent that from + * happening by setting the no_hotplug flag for the involved + * ACPI device objects. + */ + acpi_bus_no_hotplug(radeon_atpx_priv.dhandle); + acpi_bus_no_hotplug(radeon_atpx_priv.other_handle); return true; } return false; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 1958b36ad0e5cd..db39ea36bf22fc 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -77,9 +77,10 @@ * 2.33.0 - Add SI tiling mode array query * 2.34.0 - Add CIK tiling mode array query * 2.35.0 - Add CIK macrotile mode array query + * 2.36.0 - Fix CIK DCE tiling setup */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 35 +#define KMS_DRIVER_MINOR 36 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 55d0b474bd371a..21d593c0ecaf4e 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case RADEON_INFO_SI_CP_DMA_COMPUTE: *value = 1; break; + case RADEON_INFO_SI_BACKEND_ENABLED_MASK: + if (rdev->family >= CHIP_BONAIRE) { + *value = rdev->config.cik.backend_enable_mask; + } else if (rdev->family >= CHIP_TAHITI) { + *value = rdev->config.si.backend_enable_mask; + } else { + DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); + } + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 373d088bac66db..b9c0529b4a2e1e 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, return -EINVAL; } - if ((start >> 28) != (end >> 28)) { + if ((start >> 28) != ((end - 1) >> 28)) { DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", start, end); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index a36736dab5e069..85e1edfaa3bed0 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev, } static u32 si_get_rb_disabled(struct radeon_device *rdev, - u32 max_rb_num, u32 se_num, + u32 max_rb_num_per_se, u32 sh_per_se) { u32 data, mask; @@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev, data >>= BACKEND_DISABLE_SHIFT; - mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); + mask = si_create_bitmask(max_rb_num_per_se / sh_per_se); return data & mask; } static void si_setup_rb(struct radeon_device *rdev, u32 se_num, u32 sh_per_se, - u32 max_rb_num) + u32 max_rb_num_per_se) { int i, j; u32 data, mask; @@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev, for (i = 0; i < se_num; i++) { for (j = 0; j < sh_per_se; j++) { si_select_se_sh(rdev, i, j); - data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); + data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); } } si_select_se_sh(rdev, 0xffffffff, 0xffffffff); mask = 1; - for (i = 0; i < max_rb_num; i++) { + for (i = 0; i < max_rb_num_per_se * se_num; i++) { if (!(disabled_rbs & mask)) enabled_rbs |= mask; mask <<= 1; } + rdev->config.si.backend_enable_mask = enabled_rbs; + for (i = 0; i < se_num; i++) { si_select_se_sh(rdev, i, 0xffffffff); data = 0; diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index f0c5e07c25ec9f..bcb49502c3bf22 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -301,7 +301,7 @@ int hv_synic_alloc(void) return -ENOMEM; } -void hv_synic_free_cpu(int cpu) +static void hv_synic_free_cpu(int cpu) { kfree(hv_context.event_dpc[cpu]); if (hv_context.synic_event_page[cpu]) diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 78be66176840d1..bbb0b0d463f7e0 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -52,7 +53,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ -#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ +#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) @@ -176,20 +177,33 @@ static ssize_t show_temp(struct device *dev, /* Check whether the time interval has elapsed */ if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) { rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); - tdata->valid = 0; - /* Check whether the data is valid */ - if (eax & 0x80000000) { - tdata->temp = tdata->tjmax - - ((eax >> 16) & 0x7f) * 1000; - tdata->valid = 1; - } + /* + * Ignore the valid bit. In all observed cases the register + * value is either low or zero if the valid bit is 0. + * Return it instead of reporting an error which doesn't + * really help at all. + */ + tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; + tdata->valid = 1; tdata->last_updated = jiffies; } mutex_unlock(&tdata->update_lock); - return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; + return sprintf(buf, "%d\n", tdata->temp); } +struct tjmax_pci { + unsigned int device; + int tjmax; +}; + +static const struct tjmax_pci tjmax_pci_table[] = { + { 0x0708, 110000 }, /* CE41x0 (Sodaville ) */ + { 0x0c72, 102000 }, /* Atom S1240 (Centerton) */ + { 0x0c73, 95000 }, /* Atom S1220 (Centerton) */ + { 0x0c75, 95000 }, /* Atom S1260 (Centerton) */ +}; + struct tjmax { char const *id; int tjmax; @@ -198,9 +212,6 @@ struct tjmax { static const struct tjmax tjmax_table[] = { { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */ { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */ - { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */ - { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */ - { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */ }; struct tjmax_model { @@ -222,8 +233,11 @@ static const struct tjmax_model tjmax_model_table[] = { * is undetectable by software */ { 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */ - { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z2760) */ - { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */ + { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */ + { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) + * Also matches S12x0 (stepping 9), covered by + * PCI table + */ }; static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) @@ -236,8 +250,20 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) int err; u32 eax, edx; int i; + struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + + /* + * Explicit tjmax table entries override heuristics. + * First try PCI host bridge IDs, followed by model ID strings + * and model/stepping information. + */ + if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) { + for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) { + if (host_bridge->device == tjmax_pci_table[i].device) + return tjmax_pci_table[i].tjmax; + } + } - /* explicit tjmax table entries override heuristics */ for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) { if (strstr(c->x86_model_id, tjmax_table[i].id)) return tjmax_table[i].tjmax; @@ -343,12 +369,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) if (cpu_has_tjmax(c)) dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); } else { - val = (eax >> 16) & 0xff; + val = (eax >> 16) & 0x7f; /* * If the TjMax is not plausible, an assumption * will be used */ - if (val) { + if (val >= 85) { dev_dbg(dev, "TjMax is %d degrees C\n", val); return val * 1000; } diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c index 960fac3fb16648..afd31042b45207 100644 --- a/drivers/hwmon/da9052-hwmon.c +++ b/drivers/hwmon/da9052-hwmon.c @@ -45,7 +45,7 @@ static const char * const input_names[] = { /* Conversion function for VDDOUT and VBAT */ static inline int volt_reg_to_mv(int value) { - return DIV_ROUND_CLOSEST(value * 1000, 512) + 2500; + return DIV_ROUND_CLOSEST(value * 2000, 1023) + 2500; } /* Conversion function for ADC channels 4, 5 and 6 */ @@ -57,7 +57,7 @@ static inline int input_reg_to_mv(int value) /* Conversion function for VBBAT */ static inline int vbbat_reg_to_mv(int value) { - return DIV_ROUND_CLOSEST(value * 2500, 512); + return DIV_ROUND_CLOSEST(value * 5000, 1023); } static inline int da9052_enable_vddout_channel(struct da9052 *da9052) diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index dff841085bafc9..6040121a405a2c 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -249,7 +249,7 @@ static void fam15h_power_remove(struct pci_dev *pdev) sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group); } -static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = { +static const struct pci_device_id fam15h_power_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, {} diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index d65f3fd895ddda..baf375b5ab0db6 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -204,12 +204,13 @@ static void k10temp_remove(struct pci_dev *pdev) &sensor_dev_attr_temp1_crit_hyst.dev_attr); } -static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = { +static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, {} }; diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 5b50e9e4f96bb2..734d55d48cc88f 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -135,7 +135,7 @@ static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0); static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1); static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); -static DEFINE_PCI_DEVICE_TABLE(k8temp_ids) = { +static const struct pci_device_id k8temp_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, { 0 }, }; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index cf811c1a14756f..8686e966fa28e2 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -3936,6 +3936,18 @@ static int nct6775_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(hwmon_dev); } +static void nct6791_enable_io_mapping(int sioaddr) +{ + int val; + + val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE); + if (val & 0x10) { + pr_info("Enabling hardware monitor logical device mappings.\n"); + superio_outb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE, + val & ~0x10); + } +} + #ifdef CONFIG_PM static int nct6775_suspend(struct device *dev) { @@ -3955,11 +3967,20 @@ static int nct6775_suspend(struct device *dev) static int nct6775_resume(struct device *dev) { struct nct6775_data *data = dev_get_drvdata(dev); - int i, j; + int i, j, err = 0; mutex_lock(&data->update_lock); data->bank = 0xff; /* Force initial bank selection */ + if (data->kind == nct6791) { + err = superio_enter(data->sioreg); + if (err) + goto abort; + + nct6791_enable_io_mapping(data->sioreg); + superio_exit(data->sioreg); + } + /* Restore limits */ for (i = 0; i < data->in_num; i++) { if (!(data->have_in & (1 << i))) @@ -3996,11 +4017,12 @@ static int nct6775_resume(struct device *dev) nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2); } +abort: /* Force re-reading all values */ data->valid = false; mutex_unlock(&data->update_lock); - return 0; + return err; } static const struct dev_pm_ops nct6775_dev_pm_ops = { @@ -4088,15 +4110,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n"); superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); } - if (sio_data->kind == nct6791) { - val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE); - if (val & 0x10) { - pr_info("Enabling hardware monitor logical device mappings.\n"); - superio_outb(sioaddr, - NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE, - val & ~0x10); - } - } + + if (sio_data->kind == nct6791) + nct6791_enable_io_mapping(sioaddr); superio_exit(sioaddr); pr_info("Found %s or compatible chip at %#x:%#x\n", diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c index 72a889702f0dc0..e74bd7e620e814 100644 --- a/drivers/hwmon/sis5595.c +++ b/drivers/hwmon/sis5595.c @@ -754,7 +754,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev) return data; } -static DEFINE_PCI_DEVICE_TABLE(sis5595_pci_ids) = { +static const struct pci_device_id sis5595_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { 0, } }; diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c index c9dcce8c3dc34f..babd732b4e1843 100644 --- a/drivers/hwmon/via686a.c +++ b/drivers/hwmon/via686a.c @@ -824,7 +824,7 @@ static struct via686a_data *via686a_update_device(struct device *dev) return data; } -static DEFINE_PCI_DEVICE_TABLE(via686a_pci_ids) = { +static const struct pci_device_id via686a_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, { } }; diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c index aee14e2192f881..b3babe3326fb2d 100644 --- a/drivers/hwmon/vt8231.c +++ b/drivers/hwmon/vt8231.c @@ -766,7 +766,7 @@ static struct platform_driver vt8231_driver = { .remove = vt8231_remove, }; -static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = { +static const struct pci_device_id vt8231_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, { 0, } }; diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c index b1d38590ac0196..46eaf58d881b4e 100644 --- a/drivers/ide/buddha.c +++ b/drivers/ide/buddha.c @@ -198,7 +198,7 @@ static int __init buddha_init(void) continue; } } - buddha_board = ZTWO_VADDR(board); + buddha_board = (unsigned long)ZTWO_VADDR(board); /* write to BUDDHA_IRQ_MR to enable the board IRQ */ /* X-Surf doesn't have this. IRQs are always on */ diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 92d1206482a62c..6c0e0452dd9b48 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table; * which is also the index into the MWAIT hint array. * Thus C0 is a dummy. */ -static struct cpuidle_state nehalem_cstates[] __initdata = { +static struct cpuidle_state nehalem_cstates[] = { { .name = "C1-NHM", .desc = "MWAIT 0x00", @@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = { .enter = NULL } }; -static struct cpuidle_state snb_cstates[] __initdata = { +static struct cpuidle_state snb_cstates[] = { { .name = "C1-SNB", .desc = "MWAIT 0x00", @@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = { .enter = NULL } }; -static struct cpuidle_state ivb_cstates[] __initdata = { +static struct cpuidle_state ivb_cstates[] = { { .name = "C1-IVB", .desc = "MWAIT 0x00", @@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = { .enter = NULL } }; -static struct cpuidle_state hsw_cstates[] __initdata = { +static struct cpuidle_state hsw_cstates[] = { { .name = "C1-HSW", .desc = "MWAIT 0x00", @@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .enter = NULL } }; -static struct cpuidle_state atom_cstates[] __initdata = { +static struct cpuidle_state atom_cstates[] = { { .name = "C1E-ATM", .desc = "MWAIT 0x00", @@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = { { .enter = NULL } }; -static struct cpuidle_state avn_cstates[] __initdata = { +static struct cpuidle_state avn_cstates[] = { { .name = "C1-AVN", .desc = "MWAIT 0x00", @@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = { .exit_latency = 15, .target_residency = 45, .enter = &intel_idle }, + { + .enter = NULL } }; /** @@ -375,13 +377,7 @@ static int intel_idle(struct cpuidle_device *dev, if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); - if (!current_set_polling_and_test()) { - - __monitor((void *)¤t_thread_info()->flags, 0, 0); - smp_mb(); - if (!need_resched()) - __mwait(eax, ecx); - } + mwait_idle_with_hints(eax, ecx); if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index 90cf0cda50c49e..5dd0e120a504cd 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -65,9 +65,11 @@ source "drivers/iio/common/Kconfig" source "drivers/iio/dac/Kconfig" source "drivers/iio/frequency/Kconfig" source "drivers/iio/gyro/Kconfig" +source "drivers/iio/humidity/Kconfig" source "drivers/iio/imu/Kconfig" source "drivers/iio/light/Kconfig" source "drivers/iio/magnetometer/Kconfig" +source "drivers/iio/orientation/Kconfig" if IIO_TRIGGER source "drivers/iio/trigger/Kconfig" endif #IIO_TRIGGER diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index bcf7e9e3b053b0..887d39090d7568 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -18,9 +18,11 @@ obj-y += common/ obj-y += dac/ obj-y += gyro/ obj-y += frequency/ +obj-y += humidity/ obj-y += imu/ obj-y += light/ obj-y += magnetometer/ +obj-y += orientation/ obj-y += pressure/ obj-y += temperature/ obj-y += trigger/ diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 28b39283bccffa..3bec9220df04e2 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -455,7 +455,12 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = { BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .scan_index = (_index), \ - .scan_type = IIO_ST('s', 14, 16, 2), \ + .scan_type = { \ + .sign = 's', \ + .realbits = 14, \ + .storagebits = 16, \ + .shift = 2, \ + }, \ .ext_info = bma180_ext_info, \ } diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index 1cae4e920c9ba9..3dcdbad654565f 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -262,6 +262,18 @@ static int accel_3d_parse_report(struct platform_device *pdev, st->accel[1].index, st->accel[1].report_id, st->accel[2].index, st->accel[2].report_id); + /* Set Sensitivity field ids, when there is no individual modifier */ + if (st->common_attributes.sensitivity.index < 0) { + sensor_hub_input_get_attribute_info(hsdev, + HID_FEATURE_REPORT, usage_id, + HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS | + HID_USAGE_SENSOR_DATA_ACCELERATION, + &st->common_attributes.sensitivity); + dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n", + st->common_attributes.sensitivity.index, + st->common_attributes.sensitivity.report_id); + } + return ret; } diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 58e945594c7bb7..70f78c3062a718 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -43,19 +43,22 @@ struct ad7266_state { * The buffer needs to be large enough to hold two samples (4 bytes) and * the naturally aligned timestamp (8 bytes). */ - uint8_t data[ALIGN(4, sizeof(s64)) + sizeof(s64)] ____cacheline_aligned; + struct { + __be16 sample[2]; + s64 timestamp; + } data ____cacheline_aligned; }; static int ad7266_wakeup(struct ad7266_state *st) { /* Any read with >= 2 bytes will wake the device */ - return spi_read(st->spi, st->data, 2); + return spi_read(st->spi, &st->data.sample[0], 2); } static int ad7266_powerdown(struct ad7266_state *st) { /* Any read with < 2 bytes will powerdown the device */ - return spi_read(st->spi, st->data, 1); + return spi_read(st->spi, &st->data.sample[0], 1); } static int ad7266_preenable(struct iio_dev *indio_dev) @@ -84,9 +87,9 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p) struct ad7266_state *st = iio_priv(indio_dev); int ret; - ret = spi_read(st->spi, st->data, 4); + ret = spi_read(st->spi, st->data.sample, 4); if (ret == 0) { - iio_push_to_buffers_with_timestamp(indio_dev, st->data, + iio_push_to_buffers_with_timestamp(indio_dev, &st->data, pf->timestamp); } @@ -137,7 +140,7 @@ static int ad7266_read_single(struct ad7266_state *st, int *val, ad7266_select_input(st, address); ret = spi_sync(st->spi, &st->single_msg); - *val = be16_to_cpu(st->data[address % 2]); + *val = be16_to_cpu(st->data.sample[address % 2]); return ret; } @@ -442,15 +445,15 @@ static int ad7266_probe(struct spi_device *spi) ad7266_init_channels(indio_dev); /* wakeup */ - st->single_xfer[0].rx_buf = &st->data; + st->single_xfer[0].rx_buf = &st->data.sample[0]; st->single_xfer[0].len = 2; st->single_xfer[0].cs_change = 1; /* conversion */ - st->single_xfer[1].rx_buf = &st->data; + st->single_xfer[1].rx_buf = st->data.sample; st->single_xfer[1].len = 4; st->single_xfer[1].cs_change = 1; /* powerdown */ - st->single_xfer[2].tx_buf = &st->data; + st->single_xfer[2].tx_buf = &st->data.sample[0]; st->single_xfer[2].len = 1; spi_message_init(&st->single_msg); diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index 6118dced02b6ee..e283f2f2ee2fb8 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -1039,10 +1039,10 @@ static const struct iio_info max1238_info = { }; static const struct iio_info max1363_info = { - .read_event_value_new = &max1363_read_thresh, - .write_event_value_new = &max1363_write_thresh, - .read_event_config_new = &max1363_read_event_config, - .write_event_config_new = &max1363_write_event_config, + .read_event_value = &max1363_read_thresh, + .write_event_value = &max1363_write_thresh, + .read_event_config = &max1363_read_event_config, + .write_event_config = &max1363_write_event_config, .read_raw = &max1363_read_raw, .update_scan_mode = &max1363_update_scan_mode, .driver_module = THIS_MODULE, diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index c8c1baaec6c1bf..47dcb34ff44c47 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -362,7 +362,7 @@ static int mcp3422_probe(struct i2c_client *client, | MCP3422_SAMPLE_RATE_VALUE(MCP3422_SRATE_240)); mcp3422_update_config(adc, config); - err = iio_device_register(indio_dev); + err = devm_iio_device_register(&client->dev, indio_dev); if (err < 0) return err; @@ -371,12 +371,6 @@ static int mcp3422_probe(struct i2c_client *client, return 0; } -static int mcp3422_remove(struct i2c_client *client) -{ - iio_device_unregister(i2c_get_clientdata(client)); - return 0; -} - static const struct i2c_device_id mcp3422_id[] = { { "mcp3422", 2 }, { "mcp3423", 3 }, @@ -400,7 +394,6 @@ static struct i2c_driver mcp3422_driver = { .of_match_table = of_match_ptr(mcp3422_of_match), }, .probe = mcp3422_probe, - .remove = mcp3422_remove, .id_table = mcp3422_id, }; module_i2c_driver(mcp3422_driver); diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c index 09727a71e9fa15..d0add8f9416ba8 100644 --- a/drivers/iio/adc/viperboard_adc.c +++ b/drivers/iio/adc/viperboard_adc.c @@ -42,12 +42,6 @@ struct vprbrd_adc { .indexed = 1, \ .channel = _index, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ - .scan_index = _index, \ - .scan_type = { \ - .sign = 'u', \ - .realbits = 8, \ - .storagebits = 8, \ - }, \ } static struct iio_chan_spec const vprbrd_adc_iio_channels[] = { @@ -73,7 +67,7 @@ static int vprbrd_iio_read_raw(struct iio_dev *iio_dev, mutex_lock(&vb->lock); admsg->cmd = VPRBRD_ADC_CMD_GET; - admsg->chan = chan->scan_index; + admsg->chan = chan->channel; admsg->val = 0x00; ret = usb_control_msg(vb->usb_dev, @@ -139,7 +133,7 @@ static int vprbrd_adc_probe(struct platform_device *pdev) indio_dev->channels = vprbrd_adc_iio_channels; indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels); - ret = iio_device_register(indio_dev); + ret = devm_iio_device_register(&pdev->dev, indio_dev); if (ret) { dev_err(&pdev->dev, "could not register iio (adc)"); return ret; @@ -150,22 +144,12 @@ static int vprbrd_adc_probe(struct platform_device *pdev) return 0; } -static int vprbrd_adc_remove(struct platform_device *pdev) -{ - struct iio_dev *indio_dev = platform_get_drvdata(pdev); - - iio_device_unregister(indio_dev); - - return 0; -} - static struct platform_driver vprbrd_adc_driver = { .driver = { .name = "viperboard-adc", .owner = THIS_MODULE, }, .probe = vprbrd_adc_probe, - .remove = vprbrd_adc_remove, }; module_platform_driver(vprbrd_adc_driver); diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index cb9c6366032cd2..f03b92fd38031e 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -299,7 +299,12 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ .address = addr, \ - .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = 16, \ + .shift = 20 - bits, \ + }, \ .ext_info = ad5064_ext_info, \ } diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c index b968af50db0a1c..64634d7f578e2a 100644 --- a/drivers/iio/dac/ad5360.c +++ b/drivers/iio/dac/ad5360.c @@ -107,7 +107,12 @@ enum ad5360_type { BIT(IIO_CHAN_INFO_OFFSET) | \ BIT(IIO_CHAN_INFO_CALIBSCALE) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ - .scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = 16, \ + .shift = 16 - (bits), \ + }, \ } static const struct ad5360_chip_info ad5360_chip_info_tbl[] = { diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c index a59ff0e7b88821..9de4c4d3828030 100644 --- a/drivers/iio/dac/ad5380.c +++ b/drivers/iio/dac/ad5380.c @@ -261,7 +261,12 @@ static struct iio_chan_spec_ext_info ad5380_ext_info[] = { BIT(IIO_CHAN_INFO_CALIBSCALE) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ - .scan_type = IIO_ST('u', (_bits), 16, 14 - (_bits)), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (_bits), \ + .storagebits = 16, \ + .shift = 14 - (_bits), \ + }, \ .ext_info = ad5380_ext_info, \ } diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c index 3eeaa82075f7bc..787ef1d859c630 100644 --- a/drivers/iio/dac/ad5421.c +++ b/drivers/iio/dac/ad5421.c @@ -75,7 +75,7 @@ struct ad5421_state { * transfer buffers to live in their own cache lines. */ union { - u32 d32; + __be32 d32; u8 d8[4]; } data[2] ____cacheline_aligned; }; @@ -114,7 +114,11 @@ static const struct iio_chan_spec ad5421_channels[] = { BIT(IIO_CHAN_INFO_CALIBBIAS), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), - .scan_type = IIO_ST('u', 16, 16, 0), + .scan_type = { + .sign = 'u', + .realbits = 16, + .storagebits = 16, + }, .event_spec = ad5421_current_event, .num_event_specs = ARRAY_SIZE(ad5421_current_event), }, @@ -458,9 +462,9 @@ static int ad5421_read_event_value(struct iio_dev *indio_dev, static const struct iio_info ad5421_info = { .read_raw = ad5421_read_raw, .write_raw = ad5421_write_raw, - .read_event_config_new = ad5421_read_event_config, - .write_event_config_new = ad5421_write_event_config, - .read_event_value_new = ad5421_read_event_value, + .read_event_config = ad5421_read_event_config, + .write_event_config = ad5421_write_event_config, + .read_event_value = ad5421_read_event_value, .driver_module = THIS_MODULE, }; @@ -514,16 +518,7 @@ static int ad5421_probe(struct spi_device *spi) return ret; } - return iio_device_register(indio_dev); -} - -static int ad5421_remove(struct spi_device *spi) -{ - struct iio_dev *indio_dev = spi_get_drvdata(spi); - - iio_device_unregister(indio_dev); - - return 0; + return devm_iio_device_register(&spi->dev, indio_dev); } static struct spi_driver ad5421_driver = { @@ -532,7 +527,6 @@ static struct spi_driver ad5421_driver = { .owner = THIS_MODULE, }, .probe = ad5421_probe, - .remove = ad5421_remove, }; module_spi_driver(ad5421_driver); diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index 1263b0e5ad84fd..46bb62a5c1d4d6 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -139,14 +139,19 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = { { }, }; -#define _AD5446_CHANNEL(bits, storage, shift, ext) { \ +#define _AD5446_CHANNEL(bits, storage, _shift, ext) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ .channel = 0, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ - .scan_type = IIO_ST('u', (bits), (storage), (shift)), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = (storage), \ + .shift = (_shift), \ + }, \ .ext_info = (ext), \ } diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c index 82e208f6cde202..64d7256cbb6d0a 100644 --- a/drivers/iio/dac/ad5449.c +++ b/drivers/iio/dac/ad5449.c @@ -204,7 +204,12 @@ static const struct iio_info ad5449_info = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ .address = (chan), \ - .scan_type = IIO_ST('u', (bits), 16, 12 - (bits)), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = 16, \ + .shift = 12 - (bits), \ + }, \ } #define DECLARE_AD5449_CHANNELS(name, bits) \ diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c index c0957a918e1712..1e6449346b5086 100644 --- a/drivers/iio/dac/ad5504.c +++ b/drivers/iio/dac/ad5504.c @@ -47,14 +47,16 @@ * @vref_mv: actual reference voltage used * @pwr_down_mask power down mask * @pwr_down_mode current power down mode + * @data: transfer buffer */ - struct ad5504_state { struct spi_device *spi; struct regulator *reg; unsigned short vref_mv; unsigned pwr_down_mask; unsigned pwr_down_mode; + + __be16 data[2] ____cacheline_aligned; }; /** @@ -66,31 +68,29 @@ enum ad5504_supported_device_ids { ID_AD5501, }; -static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val) +static int ad5504_spi_write(struct ad5504_state *st, u8 addr, u16 val) { - u16 tmp = cpu_to_be16(AD5504_CMD_WRITE | - AD5504_ADDR(addr) | + st->data[0] = cpu_to_be16(AD5504_CMD_WRITE | AD5504_ADDR(addr) | (val & AD5504_RES_MASK)); - return spi_write(spi, (u8 *)&tmp, 2); + return spi_write(st->spi, &st->data[0], 2); } -static int ad5504_spi_read(struct spi_device *spi, u8 addr) +static int ad5504_spi_read(struct ad5504_state *st, u8 addr) { - u16 tmp = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr)); - u16 val; int ret; - struct spi_transfer t = { - .tx_buf = &tmp, - .rx_buf = &val, - .len = 2, - }; - ret = spi_sync_transfer(spi, &t, 1); - + struct spi_transfer t = { + .tx_buf = &st->data[0], + .rx_buf = &st->data[1], + .len = 2, + }; + + st->data[0] = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr)); + ret = spi_sync_transfer(st->spi, &t, 1); if (ret < 0) return ret; - return be16_to_cpu(val) & AD5504_RES_MASK; + return be16_to_cpu(st->data[1]) & AD5504_RES_MASK; } static int ad5504_read_raw(struct iio_dev *indio_dev, @@ -104,7 +104,7 @@ static int ad5504_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - ret = ad5504_spi_read(st->spi, chan->address); + ret = ad5504_spi_read(st, chan->address); if (ret < 0) return ret; @@ -133,7 +133,7 @@ static int ad5504_write_raw(struct iio_dev *indio_dev, if (val >= (1 << chan->scan_type.realbits) || val < 0) return -EINVAL; - return ad5504_spi_write(st->spi, chan->address, val); + return ad5504_spi_write(st, chan->address, val); default: ret = -EINVAL; } @@ -197,12 +197,12 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev, else st->pwr_down_mask &= ~(1 << chan->channel); - ret = ad5504_spi_write(st->spi, AD5504_ADDR_CTRL, + ret = ad5504_spi_write(st, AD5504_ADDR_CTRL, AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) | AD5504_DAC_PWR(st->pwr_down_mask)); /* writes to the CTRL register must be followed by a NOOP */ - ad5504_spi_write(st->spi, AD5504_ADDR_NOOP, 0); + ad5504_spi_write(st, AD5504_ADDR_NOOP, 0); return ret ? ret : len; } @@ -261,7 +261,11 @@ static const struct iio_chan_spec_ext_info ad5504_ext_info[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .address = AD5504_ADDR_DAC(_chan), \ - .scan_type = IIO_ST('u', 12, 16, 0), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 12, \ + .storagebits = 16, \ + }, \ .ext_info = ad5504_ext_info, \ } diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 774dd968145bd0..e8199cce2aeaed 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -176,7 +176,12 @@ static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .address = (_chan), \ - .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (_bits), \ + .storagebits = 16, \ + .shift = 16 - (_bits), \ + }, \ .ext_info = ad5624r_ext_info, \ } diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index 30e506e37dd26a..17aca4d9bd06e5 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c @@ -78,7 +78,7 @@ struct ad5686_state { */ union { - u32 d32; + __be32 d32; u8 d8[4]; } data[3] ____cacheline_aligned; }; @@ -267,7 +267,7 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = { { }, }; -#define AD5868_CHANNEL(chan, bits, shift) { \ +#define AD5868_CHANNEL(chan, bits, _shift) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ @@ -275,7 +275,12 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\ .address = AD5686_ADDR_DAC(chan), \ - .scan_type = IIO_ST('u', bits, 16, shift), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = 16, \ + .shift = (_shift), \ + }, \ .ext_info = ad5686_ext_info, \ } diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c index 9a78d5abb2f664..a7c851f62d7c20 100644 --- a/drivers/iio/dac/ad5755.c +++ b/drivers/iio/dac/ad5755.c @@ -97,7 +97,7 @@ struct ad5755_state { */ union { - u32 d32; + __be32 d32; u8 d8[4]; } data[2] ____cacheline_aligned; }; @@ -392,7 +392,12 @@ static const struct iio_chan_spec_ext_info ad5755_ext_info[] = { BIT(IIO_CHAN_INFO_OFFSET) | \ BIT(IIO_CHAN_INFO_CALIBSCALE) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ - .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (_bits), \ + .storagebits = 16, \ + .shift = 16 - (_bits), \ + }, \ .ext_info = ad5755_ext_info, \ } @@ -589,16 +594,7 @@ static int ad5755_probe(struct spi_device *spi) if (ret) return ret; - return iio_device_register(indio_dev); -} - -static int ad5755_remove(struct spi_device *spi) -{ - struct iio_dev *indio_dev = spi_get_drvdata(spi); - - iio_device_unregister(indio_dev); - - return 0; + return devm_iio_device_register(&spi->dev, indio_dev); } static const struct spi_device_id ad5755_id[] = { @@ -617,7 +613,6 @@ static struct spi_driver ad5755_driver = { .owner = THIS_MODULE, }, .probe = ad5755_probe, - .remove = ad5755_remove, .id_table = ad5755_id, }; module_spi_driver(ad5755_driver); diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c index a8ff5b2ed13ef3..d0d38165339d5b 100644 --- a/drivers/iio/dac/ad5764.c +++ b/drivers/iio/dac/ad5764.c @@ -83,7 +83,12 @@ enum ad5764_type { BIT(IIO_CHAN_INFO_CALIBSCALE) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET), \ - .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)) \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (_bits), \ + .storagebits = 16, \ + .shift = 16 - (_bits), \ + }, \ } #define DECLARE_AD5764_CHANNELS(_name, _bits) \ diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c index d64acbd89482b8..ae49afe2b3808c 100644 --- a/drivers/iio/dac/ad5791.c +++ b/drivers/iio/dac/ad5791.c @@ -91,6 +91,11 @@ struct ad5791_state { unsigned ctrl; unsigned pwr_down_mode; bool pwr_down; + + union { + __be32 d32; + u8 d8[4]; + } data[3] ____cacheline_aligned; }; /** @@ -104,48 +109,39 @@ enum ad5791_supported_device_ids { ID_AD5791, }; -static int ad5791_spi_write(struct spi_device *spi, u8 addr, u32 val) +static int ad5791_spi_write(struct ad5791_state *st, u8 addr, u32 val) { - union { - u32 d32; - u8 d8[4]; - } data; - - data.d32 = cpu_to_be32(AD5791_CMD_WRITE | + st->data[0].d32 = cpu_to_be32(AD5791_CMD_WRITE | AD5791_ADDR(addr) | (val & AD5791_DAC_MASK)); - return spi_write(spi, &data.d8[1], 3); + return spi_write(st->spi, &st->data[0].d8[1], 3); } -static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val) +static int ad5791_spi_read(struct ad5791_state *st, u8 addr, u32 *val) { - union { - u32 d32; - u8 d8[4]; - } data[3]; int ret; struct spi_transfer xfers[] = { { - .tx_buf = &data[0].d8[1], + .tx_buf = &st->data[0].d8[1], .bits_per_word = 8, .len = 3, .cs_change = 1, }, { - .tx_buf = &data[1].d8[1], - .rx_buf = &data[2].d8[1], + .tx_buf = &st->data[1].d8[1], + .rx_buf = &st->data[2].d8[1], .bits_per_word = 8, .len = 3, }, }; - data[0].d32 = cpu_to_be32(AD5791_CMD_READ | + st->data[0].d32 = cpu_to_be32(AD5791_CMD_READ | AD5791_ADDR(addr)); - data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP)); + st->data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP)); - ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers)); + ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers)); - *val = be32_to_cpu(data[2].d32); + *val = be32_to_cpu(st->data[2].d32); return ret; } @@ -210,7 +206,7 @@ static ssize_t ad5791_write_dac_powerdown(struct iio_dev *indio_dev, } st->pwr_down = pwr_down; - ret = ad5791_spi_write(st->spi, AD5791_ADDR_CTRL, st->ctrl); + ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl); return ret ? ret : len; } @@ -263,7 +259,7 @@ static int ad5791_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - ret = ad5791_spi_read(st->spi, chan->address, val); + ret = ad5791_spi_read(st, chan->address, val); if (ret) return ret; *val &= AD5791_DAC_MASK; @@ -297,7 +293,7 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = { { }, }; -#define AD5791_CHAN(bits, shift) { \ +#define AD5791_CHAN(bits, _shift) { \ .type = IIO_VOLTAGE, \ .output = 1, \ .indexed = 1, \ @@ -306,7 +302,12 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_OFFSET), \ - .scan_type = IIO_ST('u', bits, 24, shift), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = 24, \ + .shift = (_shift), \ + }, \ .ext_info = ad5791_ext_info, \ } @@ -330,7 +331,7 @@ static int ad5791_write_raw(struct iio_dev *indio_dev, val &= AD5791_RES_MASK(chan->scan_type.realbits); val <<= chan->scan_type.shift; - return ad5791_spi_write(st->spi, chan->address, val); + return ad5791_spi_write(st, chan->address, val); default: return -EINVAL; @@ -393,7 +394,7 @@ static int ad5791_probe(struct spi_device *spi) dev_warn(&spi->dev, "reference voltage unspecified\n"); } - ret = ad5791_spi_write(spi, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET); + ret = ad5791_spi_write(st, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET); if (ret) goto error_disable_reg_neg; @@ -405,7 +406,7 @@ static int ad5791_probe(struct spi_device *spi) | ((pdata && pdata->use_rbuf_gain2) ? 0 : AD5791_CTRL_RBUF) | AD5791_CTRL_BIN2SC; - ret = ad5791_spi_write(spi, AD5791_ADDR_CTRL, st->ctrl | + ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl | AD5791_CTRL_OPGND | AD5791_CTRL_DACTRI); if (ret) goto error_disable_reg_neg; diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c index 6e1903537950d9..de76e6a34c1ea9 100644 --- a/drivers/iio/dac/max517.c +++ b/drivers/iio/dac/max517.c @@ -146,7 +146,6 @@ static const struct iio_info max517_info = { .channel = (chan), \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ - .scan_type = IIO_ST('u', 8, 8, 0), \ } static const struct iio_chan_spec max517_channels[] = { diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 9f57ae84ab89c6..7d9f5c31d2fcc8 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -209,7 +209,6 @@ static const struct iio_chan_spec mcp4725_channel = { .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), - .scan_type = IIO_ST('u', 12, 16, 0), .ext_info = mcp4725_ext_info, }; diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c index 445c2aecfadde2..8d08c7ed1ea64d 100644 --- a/drivers/iio/gyro/adis16130.c +++ b/drivers/iio/gyro/adis16130.c @@ -161,13 +161,7 @@ static int adis16130_probe(struct spi_device *spi) indio_dev->info = &adis16130_info; indio_dev->modes = INDIO_DIRECT_MODE; - return iio_device_register(indio_dev); -} - -static int adis16130_remove(struct spi_device *spi) -{ - iio_device_unregister(spi_get_drvdata(spi)); - return 0; + return devm_iio_device_register(&spi->dev, indio_dev); } static struct spi_driver adis16130_driver = { @@ -176,7 +170,6 @@ static struct spi_driver adis16130_driver = { .owner = THIS_MODULE, }, .probe = adis16130_probe, - .remove = adis16130_remove, }; module_spi_driver(adis16130_driver); diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c index 1e546ba7ba45e1..eb0e08ec9e20d4 100644 --- a/drivers/iio/gyro/adxrs450.c +++ b/drivers/iio/gyro/adxrs450.c @@ -434,23 +434,14 @@ static int adxrs450_probe(struct spi_device *spi) indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels); indio_dev->name = spi->dev.driver->name; - ret = iio_device_register(indio_dev); + ret = devm_iio_device_register(&spi->dev, indio_dev); if (ret) return ret; /* Get the device into a sane initial state */ ret = adxrs450_initial_setup(indio_dev); if (ret) - goto error_initial; - return 0; -error_initial: - iio_device_unregister(indio_dev); - return ret; -} - -static int adxrs450_remove(struct spi_device *spi) -{ - iio_device_unregister(spi_get_drvdata(spi)); + return ret; return 0; } @@ -468,7 +459,6 @@ static struct spi_driver adxrs450_driver = { .owner = THIS_MODULE, }, .probe = adxrs450_probe, - .remove = adxrs450_remove, .id_table = adxrs450_id, }; module_spi_driver(adxrs450_driver); diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c index e54f0f4959d37a..59d6bc3e04df87 100644 --- a/drivers/iio/gyro/hid-sensor-gyro-3d.c +++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c @@ -262,6 +262,17 @@ static int gyro_3d_parse_report(struct platform_device *pdev, st->gyro[1].index, st->gyro[1].report_id, st->gyro[2].index, st->gyro[2].report_id); + /* Set Sensitivity field ids, when there is no individual modifier */ + if (st->common_attributes.sensitivity.index < 0) { + sensor_hub_input_get_attribute_info(hsdev, + HID_FEATURE_REPORT, usage_id, + HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS | + HID_USAGE_SENSOR_DATA_ANGL_VELOCITY, + &st->common_attributes.sensitivity); + dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n", + st->common_attributes.sensitivity.index, + st->common_attributes.sensitivity.report_id); + } return ret; } diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig new file mode 100644 index 00000000000000..463c4d9da79ec5 --- /dev/null +++ b/drivers/iio/humidity/Kconfig @@ -0,0 +1,15 @@ +# +# humidity sensor drivers +# +menu "Humidity sensors" + +config DHT11 + tristate "DHT11 (and compatible sensors) driver" + depends on GPIOLIB + help + This driver supports reading data via a single interrupt + generating GPIO line. Currently tested are DHT11 and DHT22. + Other sensors should work as well as long as they speak the + same protocol. + +endmenu diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile new file mode 100644 index 00000000000000..d5d36c0c95f952 --- /dev/null +++ b/drivers/iio/humidity/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for IIO humidity sensor drivers +# + +obj-$(CONFIG_DHT11) += dht11.o diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c new file mode 100644 index 00000000000000..d8771f546bf2d3 --- /dev/null +++ b/drivers/iio/humidity/dht11.c @@ -0,0 +1,294 @@ +/* + * DHT11/DHT22 bit banging GPIO driver + * + * Copyright (c) Harald Geyer + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRIVER_NAME "dht11" + +#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */ + +#define DHT11_EDGES_PREAMBLE 4 +#define DHT11_BITS_PER_READ 40 +#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1) + +/* Data transmission timing (nano seconds) */ +#define DHT11_START_TRANSMISSION 18 /* ms */ +#define DHT11_SENSOR_RESPONSE 80000 +#define DHT11_START_BIT 50000 +#define DHT11_DATA_BIT_LOW 27000 +#define DHT11_DATA_BIT_HIGH 70000 + +struct dht11 { + struct device *dev; + + int gpio; + int irq; + + struct completion completion; + + s64 timestamp; + int temperature; + int humidity; + + /* num_edges: -1 means "no transmission in progress" */ + int num_edges; + struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ]; +}; + +static unsigned char dht11_decode_byte(int *timing, int threshold) +{ + unsigned char ret = 0; + int i; + + for (i = 0; i < 8; ++i) { + ret <<= 1; + if (timing[i] >= threshold) + ++ret; + } + + return ret; +} + +static int dht11_decode(struct dht11 *dht11, int offset) +{ + int i, t, timing[DHT11_BITS_PER_READ], threshold, + timeres = DHT11_SENSOR_RESPONSE; + unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum; + + /* Calculate timestamp resolution */ + for (i = 0; i < dht11->num_edges; ++i) { + t = dht11->edges[i].ts - dht11->edges[i-1].ts; + if (t > 0 && t < timeres) + timeres = t; + } + if (2*timeres > DHT11_DATA_BIT_HIGH) { + pr_err("dht11: timeresolution %d too bad for decoding\n", + timeres); + return -EIO; + } + threshold = DHT11_DATA_BIT_HIGH / timeres; + if (DHT11_DATA_BIT_LOW/timeres + 1 >= threshold) + pr_err("dht11: WARNING: decoding ambiguous\n"); + + /* scale down with timeres and check validity */ + for (i = 0; i < DHT11_BITS_PER_READ; ++i) { + t = dht11->edges[offset + 2*i + 2].ts - + dht11->edges[offset + 2*i + 1].ts; + if (!dht11->edges[offset + 2*i + 1].value) + return -EIO; /* lost synchronisation */ + timing[i] = t / timeres; + } + + hum_int = dht11_decode_byte(timing, threshold); + hum_dec = dht11_decode_byte(&timing[8], threshold); + temp_int = dht11_decode_byte(&timing[16], threshold); + temp_dec = dht11_decode_byte(&timing[24], threshold); + checksum = dht11_decode_byte(&timing[32], threshold); + + if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum) + return -EIO; + + dht11->timestamp = iio_get_time_ns(); + if (hum_int < 20) { /* DHT22 */ + dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) * + ((temp_int & 0x80) ? -100 : 100); + dht11->humidity = ((hum_int << 8) + hum_dec) * 100; + } else if (temp_dec == 0 && hum_dec == 0) { /* DHT11 */ + dht11->temperature = temp_int * 1000; + dht11->humidity = hum_int * 1000; + } else { + dev_err(dht11->dev, + "Don't know how to decode data: %d %d %d %d\n", + hum_int, hum_dec, temp_int, temp_dec); + return -EIO; + } + + return 0; +} + +static int dht11_read_raw(struct iio_dev *iio_dev, + const struct iio_chan_spec *chan, + int *val, int *val2, long m) +{ + struct dht11 *dht11 = iio_priv(iio_dev); + int ret; + + if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) { + reinit_completion(&dht11->completion); + + dht11->num_edges = 0; + ret = gpio_direction_output(dht11->gpio, 0); + if (ret) + goto err; + msleep(DHT11_START_TRANSMISSION); + ret = gpio_direction_input(dht11->gpio); + if (ret) + goto err; + + ret = wait_for_completion_killable_timeout(&dht11->completion, + HZ); + if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) { + dev_err(&iio_dev->dev, + "Only %d signal edges detected\n", + dht11->num_edges); + ret = -ETIMEDOUT; + } + if (ret < 0) + goto err; + + ret = dht11_decode(dht11, + dht11->num_edges == DHT11_EDGES_PER_READ ? + DHT11_EDGES_PREAMBLE : + DHT11_EDGES_PREAMBLE - 2); + if (ret) + goto err; + } + + ret = IIO_VAL_INT; + if (chan->type == IIO_TEMP) + *val = dht11->temperature; + else if (chan->type == IIO_HUMIDITYRELATIVE) + *val = dht11->humidity; + else + ret = -EINVAL; +err: + dht11->num_edges = -1; + return ret; +} + +static const struct iio_info dht11_iio_info = { + .driver_module = THIS_MODULE, + .read_raw = dht11_read_raw, +}; + +/* + * IRQ handler called on GPIO edges +*/ +static irqreturn_t dht11_handle_irq(int irq, void *data) +{ + struct iio_dev *iio = data; + struct dht11 *dht11 = iio_priv(iio); + + /* TODO: Consider making the handler safe for IRQ sharing */ + if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) { + dht11->edges[dht11->num_edges].ts = iio_get_time_ns(); + dht11->edges[dht11->num_edges++].value = + gpio_get_value(dht11->gpio); + + if (dht11->num_edges >= DHT11_EDGES_PER_READ) + complete(&dht11->completion); + } + + return IRQ_HANDLED; +} + +static const struct iio_chan_spec dht11_chan_spec[] = { + { .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }, + { .type = IIO_HUMIDITYRELATIVE, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), } +}; + +static const struct of_device_id dht11_dt_ids[] = { + { .compatible = "dht11", }, + { } +}; +MODULE_DEVICE_TABLE(of, dht11_dt_ids); + +static int dht11_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct dht11 *dht11; + struct iio_dev *iio; + int ret; + + iio = devm_iio_device_alloc(dev, sizeof(*dht11)); + if (!iio) { + dev_err(dev, "Failed to allocate IIO device\n"); + return -ENOMEM; + } + + dht11 = iio_priv(iio); + dht11->dev = dev; + + dht11->gpio = ret = of_get_gpio(node, 0); + if (ret < 0) + return ret; + ret = devm_gpio_request_one(dev, dht11->gpio, GPIOF_IN, pdev->name); + if (ret) + return ret; + + dht11->irq = gpio_to_irq(dht11->gpio); + if (dht11->irq < 0) { + dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio); + return -EINVAL; + } + ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + pdev->name, iio); + if (ret) + return ret; + + dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1; + dht11->num_edges = -1; + + platform_set_drvdata(pdev, iio); + + init_completion(&dht11->completion); + iio->name = pdev->name; + iio->dev.parent = &pdev->dev; + iio->info = &dht11_iio_info; + iio->modes = INDIO_DIRECT_MODE; + iio->channels = dht11_chan_spec; + iio->num_channels = ARRAY_SIZE(dht11_chan_spec); + + return devm_iio_device_register(dev, iio); +} + +static struct platform_driver dht11_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = dht11_dt_ids, + }, + .probe = dht11_probe, +}; + +module_platform_driver(dht11_driver); + +MODULE_AUTHOR("Harald Geyer "); +MODULE_DESCRIPTION("DHT11 humidity/temperature sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 7f9152c3c4d3da..c67d83bdc8f091 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -37,6 +37,14 @@ static bool iio_buffer_is_active(struct iio_buffer *buf) return !list_empty(&buf->buffer_list); } +static bool iio_buffer_data_available(struct iio_buffer *buf) +{ + if (buf->access->data_available) + return buf->access->data_available(buf); + + return buf->stufftoread; +} + /** * iio_buffer_read_first_n_outer() - chrdev read for buffer access * @@ -48,13 +56,34 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, { struct iio_dev *indio_dev = filp->private_data; struct iio_buffer *rb = indio_dev->buffer; + int ret; if (!indio_dev->info) return -ENODEV; if (!rb || !rb->access->read_first_n) return -EINVAL; - return rb->access->read_first_n(rb, n, buf); + + do { + if (!iio_buffer_data_available(rb)) { + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_interruptible(rb->pollq, + iio_buffer_data_available(rb) || + indio_dev->info == NULL); + if (ret) + return ret; + if (indio_dev->info == NULL) + return -ENODEV; + } + + ret = rb->access->read_first_n(rb, n, buf); + if (ret == 0 && (filp->f_flags & O_NONBLOCK)) + ret = -EAGAIN; + } while (ret == 0); + + return ret; } /** @@ -70,7 +99,7 @@ unsigned int iio_buffer_poll(struct file *filp, return -ENODEV; poll_wait(filp, &rb->pollq, wait); - if (rb->stufftoread) + if (iio_buffer_data_available(rb)) return POLLIN | POLLRDNORM; /* need a way of knowing if there may be enough data... */ return 0; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 18f72e3d0ed6e6..acc911a836ca19 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -69,6 +69,7 @@ static const char * const iio_chan_type_name_spec[] = { [IIO_ALTVOLTAGE] = "altvoltage", [IIO_CCT] = "cct", [IIO_PRESSURE] = "pressure", + [IIO_HUMIDITYRELATIVE] = "humidityrelative", }; static const char * const iio_modifier_names[] = { @@ -107,6 +108,11 @@ static const char * const iio_chan_info_postfix[] = { [IIO_CHAN_INFO_INT_TIME] = "integration_time", }; +/** + * iio_find_channel_from_si() - get channel from its scan index + * @indio_dev: device + * @si: scan index to match + */ const struct iio_chan_spec *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) { @@ -922,6 +928,10 @@ struct device_type iio_device_type = { .release = iio_dev_release, }; +/** + * iio_device_alloc() - allocate an iio_dev from a driver + * @sizeof_priv: Space to allocate for private structure. + **/ struct iio_dev *iio_device_alloc(int sizeof_priv) { struct iio_dev *dev; @@ -962,6 +972,10 @@ struct iio_dev *iio_device_alloc(int sizeof_priv) } EXPORT_SYMBOL(iio_device_alloc); +/** + * iio_device_free() - free an iio_dev from a driver + * @dev: the iio_dev associated with the device + **/ void iio_device_free(struct iio_dev *dev) { if (dev) @@ -984,6 +998,20 @@ static int devm_iio_device_match(struct device *dev, void *res, void *data) return *r == data; } +/** + * devm_iio_device_alloc - Resource-managed iio_device_alloc() + * @dev: Device to allocate iio_dev for + * @sizeof_priv: Space to allocate for private structure. + * + * Managed iio_device_alloc. iio_dev allocated with this function is + * automatically freed on driver detach. + * + * If an iio_dev allocated with this function needs to be freed separately, + * devm_iio_device_free() must be used. + * + * RETURNS: + * Pointer to allocated iio_dev on success, NULL on failure. + */ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv) { struct iio_dev **ptr, *iio_dev; @@ -1006,6 +1034,13 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv) } EXPORT_SYMBOL_GPL(devm_iio_device_alloc); +/** + * devm_iio_device_free - Resource-managed iio_device_free() + * @dev: Device this iio_dev belongs to + * @iio_dev: the iio_dev associated with the device + * + * Free iio_dev allocated with devm_iio_device_alloc(). + */ void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev) { int rc; @@ -1080,6 +1115,10 @@ static const struct file_operations iio_buffer_fileops = { static const struct iio_buffer_setup_ops noop_ring_setup_ops; +/** + * iio_device_register() - register a device with the IIO subsystem + * @indio_dev: Device structure filled by the device driver + **/ int iio_device_register(struct iio_dev *indio_dev) { int ret; @@ -1141,6 +1180,10 @@ int iio_device_register(struct iio_dev *indio_dev) } EXPORT_SYMBOL(iio_device_register); +/** + * iio_device_unregister() - unregister a device from the IIO subsystem + * @indio_dev: Device structure representing the device. + **/ void iio_device_unregister(struct iio_dev *indio_dev) { mutex_lock(&indio_dev->info_exist_lock); @@ -1161,6 +1204,65 @@ void iio_device_unregister(struct iio_dev *indio_dev) mutex_unlock(&indio_dev->info_exist_lock); } EXPORT_SYMBOL(iio_device_unregister); + +static void devm_iio_device_unreg(struct device *dev, void *res) +{ + iio_device_unregister(*(struct iio_dev **)res); +} + +/** + * devm_iio_device_register - Resource-managed iio_device_register() + * @dev: Device to allocate iio_dev for + * @indio_dev: Device structure filled by the device driver + * + * Managed iio_device_register. The IIO device registered with this + * function is automatically unregistered on driver detach. This function + * calls iio_device_register() internally. Refer to that function for more + * information. + * + * If an iio_dev registered with this function needs to be unregistered + * separately, devm_iio_device_unregister() must be used. + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev) +{ + struct iio_dev **ptr; + int ret; + + ptr = devres_alloc(devm_iio_device_unreg, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + *ptr = indio_dev; + ret = iio_device_register(indio_dev); + if (!ret) + devres_add(dev, ptr); + else + devres_free(ptr); + + return ret; +} +EXPORT_SYMBOL_GPL(devm_iio_device_register); + +/** + * devm_iio_device_unregister - Resource-managed iio_device_unregister() + * @dev: Device this iio_dev belongs to + * @indio_dev: the iio_dev associated with the device + * + * Unregister iio_dev registered with devm_iio_device_register(). + */ +void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev) +{ + int rc; + + rc = devres_release(dev, devm_iio_device_unreg, + devm_iio_device_match, indio_dev); + WARN_ON(rc); +} +EXPORT_SYMBOL_GPL(devm_iio_device_unregister); + subsys_initcall(iio_init); module_exit(iio_exit); diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index c10eab64bc0524..c9c1419fe6e0ff 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -42,6 +42,12 @@ struct iio_event_interface { struct attribute_group group; }; +/** + * iio_push_event() - try to add event to the list for userspace reading + * @indio_dev: IIO device structure + * @ev_code: What event + * @timestamp: When the event occurred + **/ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) { struct iio_event_interface *ev_int = indio_dev->event_interface; @@ -236,13 +242,9 @@ static ssize_t iio_ev_state_store(struct device *dev, if (ret < 0) return ret; - if (indio_dev->info->write_event_config) - ret = indio_dev->info->write_event_config(indio_dev, - this_attr->address, val); - else - ret = indio_dev->info->write_event_config_new(indio_dev, - this_attr->c, iio_ev_attr_type(this_attr), - iio_ev_attr_dir(this_attr), val); + ret = indio_dev->info->write_event_config(indio_dev, + this_attr->c, iio_ev_attr_type(this_attr), + iio_ev_attr_dir(this_attr), val); return (ret < 0) ? ret : len; } @@ -255,13 +257,9 @@ static ssize_t iio_ev_state_show(struct device *dev, struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val; - if (indio_dev->info->read_event_config) - val = indio_dev->info->read_event_config(indio_dev, - this_attr->address); - else - val = indio_dev->info->read_event_config_new(indio_dev, - this_attr->c, iio_ev_attr_type(this_attr), - iio_ev_attr_dir(this_attr)); + val = indio_dev->info->read_event_config(indio_dev, + this_attr->c, iio_ev_attr_type(this_attr), + iio_ev_attr_dir(this_attr)); if (val < 0) return val; else @@ -277,21 +275,13 @@ static ssize_t iio_ev_value_show(struct device *dev, int val, val2; int ret; - if (indio_dev->info->read_event_value) { - ret = indio_dev->info->read_event_value(indio_dev, - this_attr->address, &val); - if (ret < 0) - return ret; - return sprintf(buf, "%d\n", val); - } else { - ret = indio_dev->info->read_event_value_new(indio_dev, - this_attr->c, iio_ev_attr_type(this_attr), - iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), - &val, &val2); - if (ret < 0) - return ret; - return iio_format_value(buf, ret, val, val2); - } + ret = indio_dev->info->read_event_value(indio_dev, + this_attr->c, iio_ev_attr_type(this_attr), + iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), + &val, &val2); + if (ret < 0) + return ret; + return iio_format_value(buf, ret, val, val2); } static ssize_t iio_ev_value_store(struct device *dev, @@ -304,25 +294,16 @@ static ssize_t iio_ev_value_store(struct device *dev, int val, val2; int ret; - if (!indio_dev->info->write_event_value && - !indio_dev->info->write_event_value_new) + if (!indio_dev->info->write_event_value) return -EINVAL; - if (indio_dev->info->write_event_value) { - ret = kstrtoint(buf, 10, &val); - if (ret) - return ret; - ret = indio_dev->info->write_event_value(indio_dev, - this_attr->address, val); - } else { - ret = iio_str_to_fixpoint(buf, 100000, &val, &val2); - if (ret) - return ret; - ret = indio_dev->info->write_event_value_new(indio_dev, - this_attr->c, iio_ev_attr_type(this_attr), - iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), - val, val2); - } + ret = iio_str_to_fixpoint(buf, 100000, &val, &val2); + if (ret) + return ret; + ret = indio_dev->info->write_event_value(indio_dev, + this_attr->c, iio_ev_attr_type(this_attr), + iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), + val, val2); if (ret < 0) return ret; @@ -371,7 +352,7 @@ static int iio_device_add_event(struct iio_dev *indio_dev, return attrcount; } -static int iio_device_add_event_sysfs_new(struct iio_dev *indio_dev, +static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { int ret = 0, i, attrcount = 0; @@ -414,89 +395,6 @@ static int iio_device_add_event_sysfs_new(struct iio_dev *indio_dev, return ret; } -static int iio_device_add_event_sysfs_old(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan) -{ - int ret = 0, i, attrcount = 0; - u64 mask = 0; - char *postfix; - if (!chan->event_mask) - return 0; - - for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) { - postfix = kasprintf(GFP_KERNEL, "%s_%s_en", - iio_ev_type_text[i/IIO_EV_DIR_MAX], - iio_ev_dir_text[i%IIO_EV_DIR_MAX]); - if (postfix == NULL) { - ret = -ENOMEM; - goto error_ret; - } - if (chan->modified) - mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2, - i/IIO_EV_DIR_MAX, - i%IIO_EV_DIR_MAX); - else if (chan->differential) - mask = IIO_EVENT_CODE(chan->type, - 0, 0, - i%IIO_EV_DIR_MAX, - i/IIO_EV_DIR_MAX, - 0, - chan->channel, - chan->channel2); - else - mask = IIO_UNMOD_EVENT_CODE(chan->type, - chan->channel, - i/IIO_EV_DIR_MAX, - i%IIO_EV_DIR_MAX); - - ret = __iio_add_chan_devattr(postfix, - chan, - &iio_ev_state_show, - iio_ev_state_store, - mask, - 0, - &indio_dev->dev, - &indio_dev->event_interface-> - dev_attr_list); - kfree(postfix); - if (ret) - goto error_ret; - attrcount++; - postfix = kasprintf(GFP_KERNEL, "%s_%s_value", - iio_ev_type_text[i/IIO_EV_DIR_MAX], - iio_ev_dir_text[i%IIO_EV_DIR_MAX]); - if (postfix == NULL) { - ret = -ENOMEM; - goto error_ret; - } - ret = __iio_add_chan_devattr(postfix, chan, - iio_ev_value_show, - iio_ev_value_store, - mask, - 0, - &indio_dev->dev, - &indio_dev->event_interface-> - dev_attr_list); - kfree(postfix); - if (ret) - goto error_ret; - attrcount++; - } - ret = attrcount; -error_ret: - return ret; -} - - -static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan) -{ - if (chan->event_mask) - return iio_device_add_event_sysfs_old(indio_dev, chan); - else - return iio_device_add_event_sysfs_new(indio_dev, chan); -} - static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) { int j, ret, attrcount = 0; @@ -517,8 +415,6 @@ static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) int j; for (j = 0; j < indio_dev->num_channels; j++) { - if (indio_dev->channels[j].event_mask != 0) - return true; if (indio_dev->channels[j].num_event_specs != 0) return true; } diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index bf5e70a32d3fb6..766fab24b7204b 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -55,15 +55,7 @@ static struct attribute *iio_trig_dev_attrs[] = { &dev_attr_name.attr, NULL, }; - -static struct attribute_group iio_trig_attr_group = { - .attrs = iio_trig_dev_attrs, -}; - -static const struct attribute_group *iio_trig_attr_groups[] = { - &iio_trig_attr_group, - NULL -}; +ATTRIBUTE_GROUPS(iio_trig_dev); int iio_trigger_register(struct iio_trigger *trig_info) { @@ -318,7 +310,7 @@ static ssize_t iio_trigger_read_current(struct device *dev, * iio_trigger_write_current() - trigger consumer sysfs set current trigger * * For trigger consumers the current_trigger interface allows the trigger - * used for this device to be specified at run time based on the triggers + * used for this device to be specified at run time based on the trigger's * name. **/ static ssize_t iio_trigger_write_current(struct device *dev, @@ -356,7 +348,7 @@ static ssize_t iio_trigger_write_current(struct device *dev, indio_dev->trig = trig; - if (oldtrig && indio_dev->trig != oldtrig) + if (oldtrig) iio_trigger_put(oldtrig); if (indio_dev->trig) iio_trigger_get(indio_dev->trig); @@ -403,7 +395,7 @@ static void iio_trig_release(struct device *device) static struct device_type iio_trig_type = { .release = iio_trig_release, - .groups = iio_trig_attr_groups, + .groups = iio_trig_dev_groups, }; static void iio_trig_subirqmask(struct irq_data *d) @@ -506,6 +498,23 @@ static int devm_iio_trigger_match(struct device *dev, void *res, void *data) return *r == data; } +/** + * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc() + * @dev: Device to allocate iio_trigger for + * @fmt: trigger name format. If it includes format + * specifiers, the additional arguments following + * format are formatted and inserted in the resulting + * string replacing their respective specifiers. + * + * Managed iio_trigger_alloc. iio_trigger allocated with this function is + * automatically freed on driver detach. + * + * If an iio_trigger allocated with this function needs to be freed separately, + * devm_iio_trigger_free() must be used. + * + * RETURNS: + * Pointer to allocated iio_trigger on success, NULL on failure. + */ struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, const char *fmt, ...) { @@ -532,6 +541,13 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, } EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc); +/** + * devm_iio_trigger_free - Resource-managed iio_trigger_free() + * @dev: Device this iio_dev belongs to + * @iio_trig: the iio_trigger associated with the device + * + * Free iio_trigger allocated with devm_iio_trigger_alloc(). + */ void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig) { int rc; diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c index 95c6fc81c2c78f..7134e8ada09ac5 100644 --- a/drivers/iio/kfifo_buf.c +++ b/drivers/iio/kfifo_buf.c @@ -42,7 +42,6 @@ static int iio_request_update_kfifo(struct iio_buffer *r) } else { kfifo_reset_out(&buf->kf); } - r->stufftoread = false; mutex_unlock(&buf->user_lock); return ret; @@ -108,7 +107,7 @@ static int iio_store_to_kfifo(struct iio_buffer *r, ret = kfifo_in(&kf->kf, data, 1); if (ret != 1) return -EBUSY; - r->stufftoread = true; + wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM); return 0; @@ -127,13 +126,6 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r, ret = -EINVAL; else ret = kfifo_to_user(&kf->kf, buf, n, &copied); - - if (kfifo_is_empty(&kf->kf)) - r->stufftoread = false; - /* verify it is still empty to avoid race */ - if (!kfifo_is_empty(&kf->kf)) - r->stufftoread = true; - mutex_unlock(&kf->user_lock); if (ret < 0) return ret; @@ -141,6 +133,18 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r, return copied; } +static bool iio_kfifo_buf_data_available(struct iio_buffer *r) +{ + struct iio_kfifo *kf = iio_to_kfifo(r); + bool empty; + + mutex_lock(&kf->user_lock); + empty = kfifo_is_empty(&kf->kf); + mutex_unlock(&kf->user_lock); + + return !empty; +} + static void iio_kfifo_buffer_release(struct iio_buffer *buffer) { struct iio_kfifo *kf = iio_to_kfifo(buffer); @@ -153,6 +157,7 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer) static const struct iio_buffer_access_funcs kfifo_access_funcs = { .store_to = &iio_store_to_kfifo, .read_first_n = &iio_read_first_n_kfifo, + .data_available = iio_kfifo_buf_data_available, .request_update = &iio_request_update_kfifo, .get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo, .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo, diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index a022f27c6690d5..d12b2a0dbfbc51 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -27,6 +27,17 @@ config APDS9300 To compile this driver as a module, choose M here: the module will be called apds9300. +config CM32181 + depends on I2C + tristate "CM32181 driver" + help + Say Y here if you use cm32181. + This option enables ambient light sensor using + Capella cm32181 device driver. + + To compile this driver as a module, choose M here: + the module will be called cm32181. + config CM36651 depends on I2C tristate "CM36651 driver" diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile index daa327f39e04e7..60e35ac07ff0ee 100644 --- a/drivers/iio/light/Makefile +++ b/drivers/iio/light/Makefile @@ -5,6 +5,7 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_ADJD_S311) += adjd_s311.o obj-$(CONFIG_APDS9300) += apds9300.o +obj-$(CONFIG_CM32181) += cm32181.o obj-$(CONFIG_CM36651) += cm36651.o obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 83d15c5baf646c..f3068477b4668e 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c @@ -155,7 +155,12 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p) BIT(IIO_CHAN_INFO_INT_TIME), \ .channel2 = (IIO_MOD_LIGHT_##_color), \ .scan_index = (_scan_idx), \ - .scan_type = IIO_ST('u', 10, 16, 0), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 10, \ + .storagebits = 16, \ + .endianness = IIO_CPU, \ + }, \ } static const struct iio_chan_spec adjd_s311_channels[] = { diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c index 51097bbd59c94a..9ddde0ca9c3415 100644 --- a/drivers/iio/light/apds9300.c +++ b/drivers/iio/light/apds9300.c @@ -344,10 +344,10 @@ static const struct iio_info apds9300_info_no_irq = { static const struct iio_info apds9300_info = { .driver_module = THIS_MODULE, .read_raw = apds9300_read_raw, - .read_event_value_new = apds9300_read_thresh, - .write_event_value_new = apds9300_write_thresh, - .read_event_config_new = apds9300_read_interrupt_config, - .write_event_config_new = apds9300_write_interrupt_config, + .read_event_value = apds9300_read_thresh, + .write_event_value = apds9300_write_thresh, + .read_event_config = apds9300_read_interrupt_config, + .write_event_config = apds9300_write_interrupt_config, }; static const struct iio_event_spec apds9300_event_spec[] = { diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c new file mode 100644 index 00000000000000..f17b4e6183c6ba --- /dev/null +++ b/drivers/iio/light/cm32181.c @@ -0,0 +1,379 @@ +/* + * Copyright (C) 2013 Capella Microsystems Inc. + * Author: Kevin Tsai + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2, as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Registers Address */ +#define CM32181_REG_ADDR_CMD 0x00 +#define CM32181_REG_ADDR_ALS 0x04 +#define CM32181_REG_ADDR_STATUS 0x06 +#define CM32181_REG_ADDR_ID 0x07 + +/* Number of Configurable Registers */ +#define CM32181_CONF_REG_NUM 0x01 + +/* CMD register */ +#define CM32181_CMD_ALS_ENABLE 0x00 +#define CM32181_CMD_ALS_DISABLE 0x01 +#define CM32181_CMD_ALS_INT_EN 0x02 + +#define CM32181_CMD_ALS_IT_SHIFT 6 +#define CM32181_CMD_ALS_IT_MASK (0x0F << CM32181_CMD_ALS_IT_SHIFT) +#define CM32181_CMD_ALS_IT_DEFAULT (0x00 << CM32181_CMD_ALS_IT_SHIFT) + +#define CM32181_CMD_ALS_SM_SHIFT 11 +#define CM32181_CMD_ALS_SM_MASK (0x03 << CM32181_CMD_ALS_SM_SHIFT) +#define CM32181_CMD_ALS_SM_DEFAULT (0x01 << CM32181_CMD_ALS_SM_SHIFT) + +#define CM32181_MLUX_PER_BIT 5 /* ALS_SM=01 IT=800ms */ +#define CM32181_MLUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */ +#define CM32181_CALIBSCALE_DEFAULT 1000 +#define CM32181_CALIBSCALE_RESOLUTION 1000 +#define MLUX_PER_LUX 1000 + +static const u8 cm32181_reg[CM32181_CONF_REG_NUM] = { + CM32181_REG_ADDR_CMD, +}; + +static const int als_it_bits[] = {12, 8, 0, 1, 2, 3}; +static const int als_it_value[] = {25000, 50000, 100000, 200000, 400000, + 800000}; + +struct cm32181_chip { + struct i2c_client *client; + struct mutex lock; + u16 conf_regs[CM32181_CONF_REG_NUM]; + int calibscale; +}; + +/** + * cm32181_reg_init() - Initialize CM32181 registers + * @cm32181: pointer of struct cm32181. + * + * Initialize CM32181 ambient light sensor register to default values. + * + * Return: 0 for success; otherwise for error code. + */ +static int cm32181_reg_init(struct cm32181_chip *cm32181) +{ + struct i2c_client *client = cm32181->client; + int i; + s32 ret; + + ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ID); + if (ret < 0) + return ret; + + /* check device ID */ + if ((ret & 0xFF) != 0x81) + return -ENODEV; + + /* Default Values */ + cm32181->conf_regs[CM32181_REG_ADDR_CMD] = CM32181_CMD_ALS_ENABLE | + CM32181_CMD_ALS_IT_DEFAULT | CM32181_CMD_ALS_SM_DEFAULT; + cm32181->calibscale = CM32181_CALIBSCALE_DEFAULT; + + /* Initialize registers*/ + for (i = 0; i < CM32181_CONF_REG_NUM; i++) { + ret = i2c_smbus_write_word_data(client, cm32181_reg[i], + cm32181->conf_regs[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * cm32181_read_als_it() - Get sensor integration time (ms) + * @cm32181: pointer of struct cm32181 + * @val: pointer of int to load the als_it value. + * + * Report the current integartion time by millisecond. + * + * Return: IIO_VAL_INT for success, otherwise -EINVAL. + */ +static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val) +{ + u16 als_it; + int i; + + als_it = cm32181->conf_regs[CM32181_REG_ADDR_CMD]; + als_it &= CM32181_CMD_ALS_IT_MASK; + als_it >>= CM32181_CMD_ALS_IT_SHIFT; + for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) { + if (als_it == als_it_bits[i]) { + *val = als_it_value[i]; + return IIO_VAL_INT; + } + } + + return -EINVAL; +} + +/** + * cm32181_write_als_it() - Write sensor integration time + * @cm32181: pointer of struct cm32181. + * @val: integration time by millisecond. + * + * Convert integration time (ms) to sensor value. + * + * Return: i2c_smbus_write_word_data command return value. + */ +static int cm32181_write_als_it(struct cm32181_chip *cm32181, int val) +{ + struct i2c_client *client = cm32181->client; + u16 als_it; + int ret, i, n; + + n = ARRAY_SIZE(als_it_value); + for (i = 0; i < n; i++) + if (val <= als_it_value[i]) + break; + if (i >= n) + i = n - 1; + + als_it = als_it_bits[i]; + als_it <<= CM32181_CMD_ALS_IT_SHIFT; + + mutex_lock(&cm32181->lock); + cm32181->conf_regs[CM32181_REG_ADDR_CMD] &= + ~CM32181_CMD_ALS_IT_MASK; + cm32181->conf_regs[CM32181_REG_ADDR_CMD] |= + als_it; + ret = i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD, + cm32181->conf_regs[CM32181_REG_ADDR_CMD]); + mutex_unlock(&cm32181->lock); + + return ret; +} + +/** + * cm32181_get_lux() - report current lux value + * @cm32181: pointer of struct cm32181. + * + * Convert sensor raw data to lux. It depends on integration + * time and claibscale variable. + * + * Return: Positive value is lux, otherwise is error code. + */ +static int cm32181_get_lux(struct cm32181_chip *cm32181) +{ + struct i2c_client *client = cm32181->client; + int ret; + int als_it; + unsigned long lux; + + ret = cm32181_read_als_it(cm32181, &als_it); + if (ret < 0) + return -EINVAL; + + lux = CM32181_MLUX_PER_BIT; + lux *= CM32181_MLUX_PER_BIT_BASE_IT; + lux /= als_it; + + ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ALS); + if (ret < 0) + return ret; + + lux *= ret; + lux *= cm32181->calibscale; + lux /= CM32181_CALIBSCALE_RESOLUTION; + lux /= MLUX_PER_LUX; + + if (lux > 0xFFFF) + lux = 0xFFFF; + + return lux; +} + +static int cm32181_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct cm32181_chip *cm32181 = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_PROCESSED: + ret = cm32181_get_lux(cm32181); + if (ret < 0) + return ret; + *val = ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_CALIBSCALE: + *val = cm32181->calibscale; + return IIO_VAL_INT; + case IIO_CHAN_INFO_INT_TIME: + ret = cm32181_read_als_it(cm32181, val); + return ret; + } + + return -EINVAL; +} + +static int cm32181_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct cm32181_chip *cm32181 = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_CALIBSCALE: + cm32181->calibscale = val; + return val; + case IIO_CHAN_INFO_INT_TIME: + ret = cm32181_write_als_it(cm32181, val); + return ret; + } + + return -EINVAL; +} + +/** + * cm32181_get_it_available() - Get available ALS IT value + * @dev: pointer of struct device. + * @attr: pointer of struct device_attribute. + * @buf: pointer of return string buffer. + * + * Display the available integration time values by millisecond. + * + * Return: string length. + */ +static ssize_t cm32181_get_it_available(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i, n, len; + + n = ARRAY_SIZE(als_it_value); + for (i = 0, len = 0; i < n; i++) + len += sprintf(buf + len, "%d ", als_it_value[i]); + return len + sprintf(buf + len, "\n"); +} + +static const struct iio_chan_spec cm32181_channels[] = { + { + .type = IIO_LIGHT, + .info_mask_separate = + BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_CALIBSCALE) | + BIT(IIO_CHAN_INFO_INT_TIME), + } +}; + +static IIO_DEVICE_ATTR(in_illuminance_integration_time_available, + S_IRUGO, cm32181_get_it_available, NULL, 0); + +static struct attribute *cm32181_attributes[] = { + &iio_dev_attr_in_illuminance_integration_time_available.dev_attr.attr, + NULL, +}; + +static const struct attribute_group cm32181_attribute_group = { + .attrs = cm32181_attributes +}; + +static const struct iio_info cm32181_info = { + .driver_module = THIS_MODULE, + .read_raw = &cm32181_read_raw, + .write_raw = &cm32181_write_raw, + .attrs = &cm32181_attribute_group, +}; + +static int cm32181_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct cm32181_chip *cm32181; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm32181)); + if (!indio_dev) { + dev_err(&client->dev, "devm_iio_device_alloc failed\n"); + return -ENOMEM; + } + + cm32181 = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); + cm32181->client = client; + + mutex_init(&cm32181->lock); + indio_dev->dev.parent = &client->dev; + indio_dev->channels = cm32181_channels; + indio_dev->num_channels = ARRAY_SIZE(cm32181_channels); + indio_dev->info = &cm32181_info; + indio_dev->name = id->name; + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = cm32181_reg_init(cm32181); + if (ret) { + dev_err(&client->dev, + "%s: register init failed\n", + __func__); + return ret; + } + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(&client->dev, + "%s: regist device failed\n", + __func__); + return ret; + } + + return 0; +} + +static int cm32181_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + + iio_device_unregister(indio_dev); + return 0; +} + +static const struct i2c_device_id cm32181_id[] = { + { "cm32181", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, cm32181_id); + +static const struct of_device_id cm32181_of_match[] = { + { .compatible = "capella,cm32181" }, + { } +}; + +static struct i2c_driver cm32181_driver = { + .driver = { + .name = "cm32181", + .of_match_table = of_match_ptr(cm32181_of_match), + .owner = THIS_MODULE, + }, + .id_table = cm32181_id, + .probe = cm32181_probe, + .remove = cm32181_remove, +}; + +module_i2c_driver(cm32181_driver); + +MODULE_AUTHOR("Kevin Tsai "); +MODULE_DESCRIPTION("CM32181 ambient light sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c index 0922e39b0ea979..0a142af83e25db 100644 --- a/drivers/iio/light/cm36651.c +++ b/drivers/iio/light/cm36651.c @@ -488,7 +488,11 @@ static int cm36651_write_raw(struct iio_dev *indio_dev, } static int cm36651_read_prox_thresh(struct iio_dev *indio_dev, - u64 event_code, int *val) + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int *val, int *val2) { struct cm36651_data *cm36651 = iio_priv(indio_dev); @@ -498,7 +502,11 @@ static int cm36651_read_prox_thresh(struct iio_dev *indio_dev, } static int cm36651_write_prox_thresh(struct iio_dev *indio_dev, - u64 event_code, int val) + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int val, int val2) { struct cm36651_data *cm36651 = iio_priv(indio_dev); struct i2c_client *client = cm36651->client; @@ -520,7 +528,10 @@ static int cm36651_write_prox_thresh(struct iio_dev *indio_dev, } static int cm36651_write_prox_event_config(struct iio_dev *indio_dev, - u64 event_code, int state) + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + int state) { struct cm36651_data *cm36651 = iio_priv(indio_dev); int cmd, ret = -EINVAL; @@ -536,7 +547,9 @@ static int cm36651_write_prox_event_config(struct iio_dev *indio_dev, } static int cm36651_read_prox_event_config(struct iio_dev *indio_dev, - u64 event_code) + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) { struct cm36651_data *cm36651 = iio_priv(indio_dev); int event_en; @@ -559,12 +572,22 @@ static int cm36651_read_prox_event_config(struct iio_dev *indio_dev, .channel2 = IIO_MOD_LIGHT_##_color, \ } \ +static const struct iio_event_spec cm36651_event_spec[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_ENABLE), + } +}; + static const struct iio_chan_spec cm36651_channels[] = { { .type = IIO_PROXIMITY, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_INT_TIME), - .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER) + .event_spec = cm36651_event_spec, + .num_event_specs = ARRAY_SIZE(cm36651_event_spec), }, CM36651_LIGHT_CHANNEL(RED, CM36651_LIGHT_CHANNEL_IDX_RED), CM36651_LIGHT_CHANNEL(GREEN, CM36651_LIGHT_CHANNEL_IDX_GREEN), @@ -693,7 +716,7 @@ static const struct of_device_id cm36651_of_match[] = { static struct i2c_driver cm36651_driver = { .driver = { .name = "cm36651", - .of_match_table = of_match_ptr(cm36651_of_match), + .of_match_table = cm36651_of_match, .owner = THIS_MODULE, }, .probe = cm36651_probe, diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c index dc79835be30836..5ea4a03c7e71b4 100644 --- a/drivers/iio/light/gp2ap020a00f.c +++ b/drivers/iio/light/gp2ap020a00f.c @@ -1388,10 +1388,10 @@ static const struct iio_chan_spec gp2ap020a00f_channels[] = { static const struct iio_info gp2ap020a00f_info = { .read_raw = &gp2ap020a00f_read_raw, - .read_event_value_new = &gp2ap020a00f_read_event_val, - .read_event_config_new = &gp2ap020a00f_read_event_config, - .write_event_value_new = &gp2ap020a00f_write_event_val, - .write_event_config_new = &gp2ap020a00f_write_event_config, + .read_event_value = &gp2ap020a00f_read_event_val, + .read_event_config = &gp2ap020a00f_read_event_config, + .write_event_value = &gp2ap020a00f_write_event_val, + .write_event_config = &gp2ap020a00f_write_event_config, .driver_module = THIS_MODULE, }; diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c index 8e8b9d72285373..621541fb10a986 100644 --- a/drivers/iio/light/hid-sensor-als.c +++ b/drivers/iio/light/hid-sensor-als.c @@ -229,6 +229,17 @@ static int als_parse_report(struct platform_device *pdev, dev_dbg(&pdev->dev, "als %x:%x\n", st->als_illum.index, st->als_illum.report_id); + /* Set Sensitivity field ids, when there is no individual modifier */ + if (st->common_attributes.sensitivity.index < 0) { + sensor_hub_input_get_attribute_info(hsdev, + HID_FEATURE_REPORT, usage_id, + HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS | + HID_USAGE_SENSOR_DATA_LIGHT, + &st->common_attributes.sensitivity); + dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n", + st->common_attributes.sensitivity.index, + st->common_attributes.sensitivity.report_id); + } return ret; } diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c index 45df2204614a86..887fecf1f9bb8e 100644 --- a/drivers/iio/light/tcs3472.c +++ b/drivers/iio/light/tcs3472.c @@ -67,7 +67,12 @@ struct tcs3472_data { .channel2 = IIO_MOD_LIGHT_##_color, \ .address = _addr, \ .scan_index = _si, \ - .scan_type = IIO_ST('u', 16, 16, 0), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_CPU, \ + }, \ } static const int tcs3472_agains[] = { 1, 4, 16, 60 }; diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index 5e5d9dea22c598..3d8110157f2d4b 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c @@ -702,10 +702,10 @@ static const struct iio_info tsl2563_info = { .driver_module = THIS_MODULE, .read_raw = &tsl2563_read_raw, .write_raw = &tsl2563_write_raw, - .read_event_value_new = &tsl2563_read_thresh, - .write_event_value_new = &tsl2563_write_thresh, - .read_event_config_new = &tsl2563_read_interrupt_config, - .write_event_config_new = &tsl2563_write_interrupt_config, + .read_event_value = &tsl2563_read_thresh, + .write_event_value = &tsl2563_write_thresh, + .read_event_config = &tsl2563_read_interrupt_config, + .write_event_config = &tsl2563_write_interrupt_config, }; static int tsl2563_probe(struct i2c_client *client, @@ -714,6 +714,7 @@ static int tsl2563_probe(struct i2c_client *client, struct iio_dev *indio_dev; struct tsl2563_chip *chip; struct tsl2563_platform_data *pdata = client->dev.platform_data; + struct device_node *np = client->dev.of_node; int err = 0; u8 id = 0; @@ -750,6 +751,9 @@ static int tsl2563_probe(struct i2c_client *client, if (pdata) chip->cover_comp_gain = pdata->cover_comp_gain; + else if (np) + of_property_read_u32(np, "amstaos,cover-comp-gain", + &chip->cover_comp_gain); else chip->cover_comp_gain = 1; diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index ecb3341ef9c07a..d948c4778ba6cc 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -56,7 +56,7 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, u8 rdy_mask, u8 data_reg, int *val) { int tries = 20; - u16 buf; + __be16 buf; int ret; ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, @@ -179,13 +179,7 @@ static int vcnl4000_probe(struct i2c_client *client, indio_dev->name = VCNL4000_DRV_NAME; indio_dev->modes = INDIO_DIRECT_MODE; - return iio_device_register(indio_dev); -} - -static int vcnl4000_remove(struct i2c_client *client) -{ - iio_device_unregister(i2c_get_clientdata(client)); - return 0; + return devm_iio_device_register(&client->dev, indio_dev); } static struct i2c_driver vcnl4000_driver = { @@ -194,7 +188,6 @@ static struct i2c_driver vcnl4000_driver = { .owner = THIS_MODULE, }, .probe = vcnl4000_probe, - .remove = vcnl4000_remove, .id_table = vcnl4000_id, }; diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c index b26e1028a0a0b1..6d162b7e7af583 100644 --- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c +++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c @@ -263,6 +263,18 @@ static int magn_3d_parse_report(struct platform_device *pdev, st->magn[1].index, st->magn[1].report_id, st->magn[2].index, st->magn[2].report_id); + /* Set Sensitivity field ids, when there is no individual modifier */ + if (st->common_attributes.sensitivity.index < 0) { + sensor_hub_input_get_attribute_info(hsdev, + HID_FEATURE_REPORT, usage_id, + HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS | + HID_USAGE_SENSOR_DATA_ORIENTATION, + &st->common_attributes.sensitivity); + dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n", + st->common_attributes.sensitivity.index, + st->common_attributes.sensitivity.report_id); + } + return ret; } diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index becf54496967ae..4b65b6d3bdb176 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c @@ -266,7 +266,11 @@ static const struct iio_chan_spec mag3110_channels[] = { .type = IIO_TEMP, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .scan_index = 3, - .scan_type = IIO_ST('s', 8, 8, 0), + .scan_type = { + .sign = 's', + .realbits = 8, + .storagebits = 8, + }, }, IIO_CHAN_SOFT_TIMESTAMP(4), }; diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig new file mode 100644 index 00000000000000..58c62c837e12f3 --- /dev/null +++ b/drivers/iio/orientation/Kconfig @@ -0,0 +1,19 @@ +# +# Inclinometer sensors +# +# When adding new entries keep the list in alphabetical order + +menu "Inclinometer sensors" + +config HID_SENSOR_INCLINOMETER_3D + depends on HID_SENSOR_HUB + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID Inclinometer 3D" + help + Say yes here to build support for the HID SENSOR + Inclinometer 3D. + +endmenu diff --git a/drivers/iio/orientation/Makefile b/drivers/iio/orientation/Makefile new file mode 100644 index 00000000000000..2c97572ee919b4 --- /dev/null +++ b/drivers/iio/orientation/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for industrial I/O Inclinometer sensor drivers +# + +# When adding new entries keep the list in alphabetical order +obj-$(CONFIG_HID_SENSOR_INCLINOMETER_3D) += hid-sensor-incl-3d.o diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c new file mode 100644 index 00000000000000..070feab08faa8e --- /dev/null +++ b/drivers/iio/orientation/hid-sensor-incl-3d.c @@ -0,0 +1,428 @@ +/* + * HID Sensors Driver + * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../common/hid-sensors/hid-sensor-trigger.h" + +enum incl_3d_channel { + CHANNEL_SCAN_INDEX_X, + CHANNEL_SCAN_INDEX_Y, + CHANNEL_SCAN_INDEX_Z, + INCLI_3D_CHANNEL_MAX, +}; + +struct incl_3d_state { + struct hid_sensor_hub_callbacks callbacks; + struct hid_sensor_common common_attributes; + struct hid_sensor_hub_attribute_info incl[INCLI_3D_CHANNEL_MAX]; + u32 incl_val[INCLI_3D_CHANNEL_MAX]; +}; + +static const u32 incl_3d_addresses[INCLI_3D_CHANNEL_MAX] = { + HID_USAGE_SENSOR_ORIENT_TILT_X, + HID_USAGE_SENSOR_ORIENT_TILT_Y, + HID_USAGE_SENSOR_ORIENT_TILT_Z +}; + +/* Channel definitions */ +static const struct iio_chan_spec incl_3d_channels[] = { + { + .type = IIO_INCLI, + .modified = 1, + .channel2 = IIO_MOD_X, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_SAMP_FREQ) | + BIT(IIO_CHAN_INFO_HYSTERESIS), + .scan_index = CHANNEL_SCAN_INDEX_X, + }, { + .type = IIO_INCLI, + .modified = 1, + .channel2 = IIO_MOD_Y, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_SAMP_FREQ) | + BIT(IIO_CHAN_INFO_HYSTERESIS), + .scan_index = CHANNEL_SCAN_INDEX_Y, + }, { + .type = IIO_INCLI, + .modified = 1, + .channel2 = IIO_MOD_Z, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_SAMP_FREQ) | + BIT(IIO_CHAN_INFO_HYSTERESIS), + .scan_index = CHANNEL_SCAN_INDEX_Z, + } +}; + +/* Adjust channel real bits based on report descriptor */ +static void incl_3d_adjust_channel_bit_mask(struct iio_chan_spec *chan, + int size) +{ + chan->scan_type.sign = 's'; + /* Real storage bits will change based on the report desc. */ + chan->scan_type.realbits = size * 8; + /* Maximum size of a sample to capture is u32 */ + chan->scan_type.storagebits = sizeof(u32) * 8; +} + +/* Channel read_raw handler */ +static int incl_3d_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, + long mask) +{ + struct incl_3d_state *incl_state = iio_priv(indio_dev); + int report_id = -1; + u32 address; + int ret_type; + + *val = 0; + *val2 = 0; + switch (mask) { + case IIO_CHAN_INFO_RAW: + report_id = + incl_state->incl[chan->scan_index].report_id; + address = incl_3d_addresses[chan->scan_index]; + if (report_id >= 0) + *val = sensor_hub_input_attr_get_raw_value( + incl_state->common_attributes.hsdev, + HID_USAGE_SENSOR_INCLINOMETER_3D, address, + report_id); + else { + return -EINVAL; + } + ret_type = IIO_VAL_INT; + break; + case IIO_CHAN_INFO_SCALE: + *val = incl_state->incl[CHANNEL_SCAN_INDEX_X].units; + ret_type = IIO_VAL_INT; + break; + case IIO_CHAN_INFO_OFFSET: + *val = hid_sensor_convert_exponent( + incl_state->incl[CHANNEL_SCAN_INDEX_X].unit_expo); + ret_type = IIO_VAL_INT; + break; + case IIO_CHAN_INFO_SAMP_FREQ: + ret_type = hid_sensor_read_samp_freq_value( + &incl_state->common_attributes, val, val2); + break; + case IIO_CHAN_INFO_HYSTERESIS: + ret_type = hid_sensor_read_raw_hyst_value( + &incl_state->common_attributes, val, val2); + break; + default: + ret_type = -EINVAL; + break; + } + + return ret_type; +} + +/* Channel write_raw handler */ +static int incl_3d_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, + int val2, + long mask) +{ + struct incl_3d_state *incl_state = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + ret = hid_sensor_write_samp_freq_value( + &incl_state->common_attributes, val, val2); + break; + case IIO_CHAN_INFO_HYSTERESIS: + ret = hid_sensor_write_raw_hyst_value( + &incl_state->common_attributes, val, val2); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static const struct iio_info incl_3d_info = { + .driver_module = THIS_MODULE, + .read_raw = &incl_3d_read_raw, + .write_raw = &incl_3d_write_raw, +}; + +/* Function to push data to buffer */ +static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len) +{ + dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n"); + iio_push_to_buffers(indio_dev, (u8 *)data); +} + +/* Callback handler to send event after all samples are received and captured */ +static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev, + unsigned usage_id, + void *priv) +{ + struct iio_dev *indio_dev = platform_get_drvdata(priv); + struct incl_3d_state *incl_state = iio_priv(indio_dev); + + dev_dbg(&indio_dev->dev, "incl_3d_proc_event [%d]\n", + incl_state->common_attributes.data_ready); + if (incl_state->common_attributes.data_ready) + hid_sensor_push_data(indio_dev, + (u8 *)incl_state->incl_val, + sizeof(incl_state->incl_val)); + + return 0; +} + +/* Capture samples in local storage */ +static int incl_3d_capture_sample(struct hid_sensor_hub_device *hsdev, + unsigned usage_id, + size_t raw_len, char *raw_data, + void *priv) +{ + struct iio_dev *indio_dev = platform_get_drvdata(priv); + struct incl_3d_state *incl_state = iio_priv(indio_dev); + int ret = 0; + + switch (usage_id) { + case HID_USAGE_SENSOR_ORIENT_TILT_X: + incl_state->incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data; + break; + case HID_USAGE_SENSOR_ORIENT_TILT_Y: + incl_state->incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data; + break; + case HID_USAGE_SENSOR_ORIENT_TILT_Z: + incl_state->incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* Parse report which is specific to an usage id*/ +static int incl_3d_parse_report(struct platform_device *pdev, + struct hid_sensor_hub_device *hsdev, + struct iio_chan_spec *channels, + unsigned usage_id, + struct incl_3d_state *st) +{ + int ret; + + ret = sensor_hub_input_get_attribute_info(hsdev, + HID_INPUT_REPORT, + usage_id, + HID_USAGE_SENSOR_ORIENT_TILT_X, + &st->incl[CHANNEL_SCAN_INDEX_X]); + if (ret) + return ret; + incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_X], + st->incl[CHANNEL_SCAN_INDEX_X].size); + + ret = sensor_hub_input_get_attribute_info(hsdev, + HID_INPUT_REPORT, + usage_id, + HID_USAGE_SENSOR_ORIENT_TILT_Y, + &st->incl[CHANNEL_SCAN_INDEX_Y]); + if (ret) + return ret; + incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Y], + st->incl[CHANNEL_SCAN_INDEX_Y].size); + + ret = sensor_hub_input_get_attribute_info(hsdev, + HID_INPUT_REPORT, + usage_id, + HID_USAGE_SENSOR_ORIENT_TILT_Z, + &st->incl[CHANNEL_SCAN_INDEX_Z]); + if (ret) + return ret; + incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Z], + st->incl[CHANNEL_SCAN_INDEX_Z].size); + + dev_dbg(&pdev->dev, "incl_3d %x:%x, %x:%x, %x:%x\n", + st->incl[0].index, + st->incl[0].report_id, + st->incl[1].index, st->incl[1].report_id, + st->incl[2].index, st->incl[2].report_id); + + /* Set Sensitivity field ids, when there is no individual modifier */ + if (st->common_attributes.sensitivity.index < 0) { + sensor_hub_input_get_attribute_info(hsdev, + HID_FEATURE_REPORT, usage_id, + HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS | + HID_USAGE_SENSOR_DATA_ORIENTATION, + &st->common_attributes.sensitivity); + dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n", + st->common_attributes.sensitivity.index, + st->common_attributes.sensitivity.report_id); + } + return ret; +} + +/* Function to initialize the processing for usage id */ +static int hid_incl_3d_probe(struct platform_device *pdev) +{ + int ret; + static char *name = "incli_3d"; + struct iio_dev *indio_dev; + struct incl_3d_state *incl_state; + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; + struct iio_chan_spec *channels; + + indio_dev = devm_iio_device_alloc(&pdev->dev, + sizeof(struct incl_3d_state)); + if (indio_dev == NULL) + return -ENOMEM; + + platform_set_drvdata(pdev, indio_dev); + + incl_state = iio_priv(indio_dev); + incl_state->common_attributes.hsdev = hsdev; + incl_state->common_attributes.pdev = pdev; + + ret = hid_sensor_parse_common_attributes(hsdev, + HID_USAGE_SENSOR_INCLINOMETER_3D, + &incl_state->common_attributes); + if (ret) { + dev_err(&pdev->dev, "failed to setup common attributes\n"); + return ret; + } + + channels = kmemdup(incl_3d_channels, sizeof(incl_3d_channels), + GFP_KERNEL); + if (!channels) { + dev_err(&pdev->dev, "failed to duplicate channels\n"); + return -ENOMEM; + } + + ret = incl_3d_parse_report(pdev, hsdev, channels, + HID_USAGE_SENSOR_INCLINOMETER_3D, incl_state); + if (ret) { + dev_err(&pdev->dev, "failed to setup attributes\n"); + goto error_free_dev_mem; + } + + indio_dev->channels = channels; + indio_dev->num_channels = ARRAY_SIZE(incl_3d_channels); + indio_dev->dev.parent = &pdev->dev; + indio_dev->info = &incl_3d_info; + indio_dev->name = name; + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + NULL, NULL); + if (ret) { + dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); + goto error_free_dev_mem; + } + incl_state->common_attributes.data_ready = false; + ret = hid_sensor_setup_trigger(indio_dev, name, + &incl_state->common_attributes); + if (ret) { + dev_err(&pdev->dev, "trigger setup failed\n"); + goto error_unreg_buffer_funcs; + } + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(&pdev->dev, "device register failed\n"); + goto error_remove_trigger; + } + + incl_state->callbacks.send_event = incl_3d_proc_event; + incl_state->callbacks.capture_sample = incl_3d_capture_sample; + incl_state->callbacks.pdev = pdev; + ret = sensor_hub_register_callback(hsdev, + HID_USAGE_SENSOR_INCLINOMETER_3D, + &incl_state->callbacks); + if (ret) { + dev_err(&pdev->dev, "callback reg failed\n"); + goto error_iio_unreg; + } + + return 0; + +error_iio_unreg: + iio_device_unregister(indio_dev); +error_remove_trigger: + hid_sensor_remove_trigger(&incl_state->common_attributes); +error_unreg_buffer_funcs: + iio_triggered_buffer_cleanup(indio_dev); +error_free_dev_mem: + kfree(indio_dev->channels); + return ret; +} + +/* Function to deinitialize the processing for usage id */ +static int hid_incl_3d_remove(struct platform_device *pdev) +{ + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct incl_3d_state *incl_state = iio_priv(indio_dev); + + sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D); + iio_device_unregister(indio_dev); + hid_sensor_remove_trigger(&incl_state->common_attributes); + iio_triggered_buffer_cleanup(indio_dev); + kfree(indio_dev->channels); + + return 0; +} + +static struct platform_device_id hid_incl_3d_ids[] = { + { + /* Format: HID-SENSOR-usage_id_in_hex_lowercase */ + .name = "HID-SENSOR-200086", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, hid_incl_3d_ids); + +static struct platform_driver hid_incl_3d_platform_driver = { + .id_table = hid_incl_3d_ids, + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = hid_incl_3d_probe, + .remove = hid_incl_3d_remove, +}; +module_platform_driver(hid_incl_3d_platform_driver); + +MODULE_DESCRIPTION("HID Sensor Inclinometer 3D"); +MODULE_AUTHOR("Srinivas Pandruvada "); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig index 4f2e0f9bad8c10..a8b9cae5c17355 100644 --- a/drivers/iio/pressure/Kconfig +++ b/drivers/iio/pressure/Kconfig @@ -5,6 +5,18 @@ menu "Pressure sensors" +config MPL3115 + tristate "Freescale MPL3115A2 pressure sensor driver" + depends on I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + Say yes here to build support for the Freescale MPL3115A2 + pressure sensor / altimeter. + + To compile this driver as a module, choose M here: the module + will be called mpl3115. + config IIO_ST_PRESS tristate "STMicroelectronics pressure sensor Driver" depends on (I2C || SPI_MASTER) && SYSFS diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile index be71464c27525b..42bb9fcf54362d 100644 --- a/drivers/iio/pressure/Makefile +++ b/drivers/iio/pressure/Makefile @@ -3,6 +3,7 @@ # # When adding new entries keep the list in alphabetical order +obj-$(CONFIG_MPL3115) += mpl3115.o obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o st_pressure-y := st_pressure_core.o st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c new file mode 100644 index 00000000000000..ac8c8ab723e567 --- /dev/null +++ b/drivers/iio/pressure/mpl3115.c @@ -0,0 +1,329 @@ +/* + * mpl3115.c - Support for Freescale MPL3115A2 pressure/temperature sensor + * + * Copyright (c) 2013 Peter Meerwald + * + * This file is subject to the terms and conditions of version 2 of + * the GNU General Public License. See the file COPYING in the main + * directory of this archive for more details. + * + * (7-bit I2C slave address 0x60) + * + * TODO: FIFO buffer, altimeter mode, oversampling, continuous mode, + * interrupts, user offset correction, raw mode + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define MPL3115_STATUS 0x00 +#define MPL3115_OUT_PRESS 0x01 /* MSB first, 20 bit */ +#define MPL3115_OUT_TEMP 0x04 /* MSB first, 12 bit */ +#define MPL3115_WHO_AM_I 0x0c +#define MPL3115_CTRL_REG1 0x26 + +#define MPL3115_DEVICE_ID 0xc4 + +#define MPL3115_STATUS_PRESS_RDY BIT(2) +#define MPL3115_STATUS_TEMP_RDY BIT(1) + +#define MPL3115_CTRL_RESET BIT(2) /* software reset */ +#define MPL3115_CTRL_OST BIT(1) /* initiate measurement */ +#define MPL3115_CTRL_ACTIVE BIT(0) /* continuous measurement */ +#define MPL3115_CTRL_OS_258MS (BIT(5) | BIT(4)) /* 64x oversampling */ + +struct mpl3115_data { + struct i2c_client *client; + struct mutex lock; + u8 ctrl_reg1; +}; + +static int mpl3115_request(struct mpl3115_data *data) +{ + int ret, tries = 15; + + /* trigger measurement */ + ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1, + data->ctrl_reg1 | MPL3115_CTRL_OST); + if (ret < 0) + return ret; + + while (tries-- > 0) { + ret = i2c_smbus_read_byte_data(data->client, MPL3115_CTRL_REG1); + if (ret < 0) + return ret; + /* wait for data ready, i.e. OST cleared */ + if (!(ret & MPL3115_CTRL_OST)) + break; + msleep(20); + } + + if (tries < 0) { + dev_err(&data->client->dev, "data not ready\n"); + return -EIO; + } + + return 0; +} + +static int mpl3115_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct mpl3115_data *data = iio_priv(indio_dev); + s32 tmp = 0; + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + if (iio_buffer_enabled(indio_dev)) + return -EBUSY; + + switch (chan->type) { + case IIO_PRESSURE: /* in 0.25 pascal / LSB */ + mutex_lock(&data->lock); + ret = mpl3115_request(data); + if (ret < 0) { + mutex_unlock(&data->lock); + return ret; + } + ret = i2c_smbus_read_i2c_block_data(data->client, + MPL3115_OUT_PRESS, 3, (u8 *) &tmp); + mutex_unlock(&data->lock); + if (ret < 0) + return ret; + *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23); + return IIO_VAL_INT; + case IIO_TEMP: /* in 0.0625 celsius / LSB */ + mutex_lock(&data->lock); + ret = mpl3115_request(data); + if (ret < 0) { + mutex_unlock(&data->lock); + return ret; + } + ret = i2c_smbus_read_i2c_block_data(data->client, + MPL3115_OUT_TEMP, 2, (u8 *) &tmp); + mutex_unlock(&data->lock); + if (ret < 0) + return ret; + *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15); + return IIO_VAL_INT; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_PRESSURE: + *val = 0; + *val2 = 250; /* want kilopascal */ + return IIO_VAL_INT_PLUS_MICRO; + case IIO_TEMP: + *val = 0; + *val2 = 62500; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } + } + return -EINVAL; +} + +static irqreturn_t mpl3115_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mpl3115_data *data = iio_priv(indio_dev); + u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */ + int ret, pos = 0; + + mutex_lock(&data->lock); + ret = mpl3115_request(data); + if (ret < 0) { + mutex_unlock(&data->lock); + goto done; + } + + memset(buffer, 0, sizeof(buffer)); + if (test_bit(0, indio_dev->active_scan_mask)) { + ret = i2c_smbus_read_i2c_block_data(data->client, + MPL3115_OUT_PRESS, 3, &buffer[pos]); + if (ret < 0) { + mutex_unlock(&data->lock); + goto done; + } + pos += 4; + } + + if (test_bit(1, indio_dev->active_scan_mask)) { + ret = i2c_smbus_read_i2c_block_data(data->client, + MPL3115_OUT_TEMP, 2, &buffer[pos]); + if (ret < 0) { + mutex_unlock(&data->lock); + goto done; + } + } + mutex_unlock(&data->lock); + + iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_get_time_ns()); + +done: + iio_trigger_notify_done(indio_dev->trig); + return IRQ_HANDLED; +} + +static const struct iio_chan_spec mpl3115_channels[] = { + { + .type = IIO_PRESSURE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 0, + .scan_type = { + .sign = 's', + .realbits = 20, + .storagebits = 32, + .shift = 12, + .endianness = IIO_BE, + } + }, + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 1, + .scan_type = { + .sign = 's', + .realbits = 12, + .storagebits = 16, + .shift = 4, + .endianness = IIO_BE, + } + }, + IIO_CHAN_SOFT_TIMESTAMP(2), +}; + +static const struct iio_info mpl3115_info = { + .read_raw = &mpl3115_read_raw, + .driver_module = THIS_MODULE, +}; + +static int mpl3115_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct mpl3115_data *data; + struct iio_dev *indio_dev; + int ret; + + ret = i2c_smbus_read_byte_data(client, MPL3115_WHO_AM_I); + if (ret < 0) + return ret; + if (ret != MPL3115_DEVICE_ID) + return -ENODEV; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + data->client = client; + mutex_init(&data->lock); + + i2c_set_clientdata(client, indio_dev); + indio_dev->info = &mpl3115_info; + indio_dev->name = id->name; + indio_dev->dev.parent = &client->dev; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = mpl3115_channels; + indio_dev->num_channels = ARRAY_SIZE(mpl3115_channels); + + /* software reset, I2C transfer is aborted (fails) */ + i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1, + MPL3115_CTRL_RESET); + msleep(50); + + data->ctrl_reg1 = MPL3115_CTRL_OS_258MS; + ret = i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1, + data->ctrl_reg1); + if (ret < 0) + return ret; + + ret = iio_triggered_buffer_setup(indio_dev, NULL, + mpl3115_trigger_handler, NULL); + if (ret < 0) + return ret; + + ret = iio_device_register(indio_dev); + if (ret < 0) + goto buffer_cleanup; + return 0; + +buffer_cleanup: + iio_triggered_buffer_cleanup(indio_dev); + return ret; +} + +static int mpl3115_standby(struct mpl3115_data *data) +{ + return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1, + data->ctrl_reg1 & ~MPL3115_CTRL_ACTIVE); +} + +static int mpl3115_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + + iio_device_unregister(indio_dev); + iio_triggered_buffer_cleanup(indio_dev); + mpl3115_standby(iio_priv(indio_dev)); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mpl3115_suspend(struct device *dev) +{ + return mpl3115_standby(iio_priv(i2c_get_clientdata( + to_i2c_client(dev)))); +} + +static int mpl3115_resume(struct device *dev) +{ + struct mpl3115_data *data = iio_priv(i2c_get_clientdata( + to_i2c_client(dev))); + + return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1, + data->ctrl_reg1); +} + +static SIMPLE_DEV_PM_OPS(mpl3115_pm_ops, mpl3115_suspend, mpl3115_resume); +#define MPL3115_PM_OPS (&mpl3115_pm_ops) +#else +#define MPL3115_PM_OPS NULL +#endif + +static const struct i2c_device_id mpl3115_id[] = { + { "mpl3115", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, mpl3115_id); + +static struct i2c_driver mpl3115_driver = { + .driver = { + .name = "mpl3115", + .pm = MPL3115_PM_OPS, + }, + .probe = mpl3115_probe, + .remove = mpl3115_remove, + .id_table = mpl3115_id, +}; +module_i2c_driver(mpl3115_driver); + +MODULE_AUTHOR("Peter Meerwald "); +MODULE_DESCRIPTION("Freescale MPL3115 pressure/temperature driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 12fef76c791c52..45126879ad28a2 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } -#define VLAN_NONE 0xfff -#define FILTER_SEL_VLAN_NONE 0xffff -#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ -#define FILTER_SEL_WIDTH_VIN_P_FC \ - (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ -#define FILTER_SEL_WIDTH_TAG_P_FC \ - (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ -#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) - -static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, - struct l2t_entry *l2t) -{ - unsigned int ntuple = 0; - u32 viid; - - switch (dev->rdev.lldi.filt_mode) { - - /* default filter mode */ - case HW_TPL_FR_MT_PR_IV_P_FC: - if (l2t->vlan == VLAN_NONE) - ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; - else { - ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; - ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC; - } - ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << - FILTER_SEL_WIDTH_VLD_TAG_P_FC; - break; - case HW_TPL_FR_MT_PR_OV_P_FC: { - viid = cxgb4_port_viid(l2t->neigh->dev); - - ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; - ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; - ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; - ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << - FILTER_SEL_WIDTH_VLD_TAG_P_FC; - break; - } - default: - break; - } - return ntuple; -} - static int send_connect(struct c4iw_ep *ep) { struct cpl_act_open_req *req; @@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep) req->local_ip = la->sin_addr.s_addr; req->peer_ip = ra->sin_addr.s_addr; req->opt0 = cpu_to_be64(opt0); - req->params = cpu_to_be32(select_ntuple(ep->com.dev, - ep->dst, ep->l2t)); + req->params = cpu_to_be32(cxgb4_select_ntuple( + ep->com.dev->rdev.lldi.ports[0], + ep->l2t)); req->opt2 = cpu_to_be32(opt2); } else { req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); @@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep) req6->peer_ip_lo = *((__be64 *) (ra6->sin6_addr.s6_addr + 8)); req6->opt0 = cpu_to_be64(opt0); - req6->params = cpu_to_be32( - select_ntuple(ep->com.dev, ep->dst, - ep->l2t)); + req6->params = cpu_to_be32(cxgb4_select_ntuple( + ep->com.dev->rdev.lldi.ports[0], + ep->l2t)); req6->opt2 = cpu_to_be32(opt2); } } else { @@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep) t5_req->peer_ip = ra->sin_addr.s_addr; t5_req->opt0 = cpu_to_be64(opt0); t5_req->params = cpu_to_be64(V_FILTER_TUPLE( - select_ntuple(ep->com.dev, - ep->dst, ep->l2t))); + cxgb4_select_ntuple( + ep->com.dev->rdev.lldi.ports[0], + ep->l2t))); t5_req->opt2 = cpu_to_be32(opt2); } else { t5_req6 = (struct cpl_t5_act_open_req6 *) @@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep) (ra6->sin6_addr.s6_addr + 8)); t5_req6->opt0 = cpu_to_be64(opt0); t5_req6->params = (__force __be64)cpu_to_be32( - select_ntuple(ep->com.dev, ep->dst, ep->l2t)); + cxgb4_select_ntuple( + ep->com.dev->rdev.lldi.ports[0], + ep->l2t)); t5_req6->opt2 = cpu_to_be32(opt2); } } @@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) memset(req, 0, sizeof(*req)); req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); - req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, + req->le.filter = cpu_to_be32(cxgb4_select_ntuple( + ep->com.dev->rdev.lldi.ports[0], ep->l2t)); sin = (struct sockaddr_in *)&ep->com.local_addr; req->le.lport = sin->sin_port; @@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) /* * Allocate a server TID. */ - if (dev->rdev.lldi.enable_fw_ofld_conn) + if (dev->rdev.lldi.enable_fw_ofld_conn && + ep->com.local_addr.ss_family == AF_INET) ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, cm_id->local_addr.ss_family, ep); else @@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) /* * Calculate the server tid from filter hit index from cpl_rx_pkt. */ - stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) - - dev->rdev.lldi.tids->sftid_base - + dev->rdev.lldi.tids->nstids; + stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); if (!lep) { @@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) window = (__force u16) htons((__force u16)tcph->window); /* Calcuate filter portion for LE region. */ - filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); + filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( + dev->rdev.lldi.ports[0], + e)); /* * Synthesize the cpl_pass_accept_req. We have everything except the diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index c29b5c83883314..cdc7df4fdb8aed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -31,6 +31,7 @@ */ #include +#include /* For ARPHRD_xxx */ #include #include #include "ipoib.h" @@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, return -EINVAL; pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); - if (!pdev) + if (!pdev || pdev->type != ARPHRD_INFINIBAND) return -ENODEV; ppriv = netdev_priv(pdev); diff --git a/drivers/input/input.c b/drivers/input/input.c index 846ccdd905b19b..d2965e4b32243a 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int break; case EV_ABS: + input_alloc_absinfo(dev); + if (!dev->absinfo) + return; + __set_bit(code, dev->absbit); break; diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c index 8755f5f3ad37c2..0cb7ef59071b79 100644 --- a/drivers/input/serio/serport.c +++ b/drivers/input/serio/serport.c @@ -124,7 +124,7 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c { struct serport *serport = (struct serport*) tty->disc_data; unsigned long flags; - unsigned int ch_flags; + unsigned int ch_flags = 0; int i; spin_lock_irqsave(&serport->lock, flags); @@ -133,18 +133,20 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c goto out; for (i = 0; i < count; i++) { - switch (fp[i]) { - case TTY_FRAME: - ch_flags = SERIO_FRAME; - break; - - case TTY_PARITY: - ch_flags = SERIO_PARITY; - break; - - default: - ch_flags = 0; - break; + if (fp) { + switch (fp[i]) { + case TTY_FRAME: + ch_flags = SERIO_FRAME; + break; + + case TTY_PARITY: + ch_flags = SERIO_PARITY; + break; + + default: + ch_flags = 0; + break; + } } serio_interrupt(serport->serio, cp[i], ch_flags); diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index 75762d6ff3ba70..aa127ba392a45c 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c @@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result) } } -static irqreturn_t zforce_interrupt(int irq, void *dev_id) +static irqreturn_t zforce_irq(int irq, void *dev_id) +{ + struct zforce_ts *ts = dev_id; + struct i2c_client *client = ts->client; + + if (ts->suspended && device_may_wakeup(&client->dev)) + pm_wakeup_event(&client->dev, 500); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t zforce_irq_thread(int irq, void *dev_id) { struct zforce_ts *ts = dev_id; struct i2c_client *client = ts->client; @@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id) u8 *payload; /* - * When suspended, emit a wakeup signal if necessary and return. + * When still suspended, return. * Due to the level-interrupt we will get re-triggered later. */ if (ts->suspended) { - if (device_may_wakeup(&client->dev)) - pm_wakeup_event(&client->dev, 500); msleep(20); return IRQ_HANDLED; } @@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client, * Therefore we can trigger the interrupt anytime it is low and do * not need to limit it to the interrupt edge. */ - ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, - zforce_interrupt, + ret = devm_request_threaded_irq(&client->dev, client->irq, + zforce_irq, zforce_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, input_dev->name, ts); if (ret) { diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 497bd026c2378e..4a48255281887e 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c @@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card) int i; struct pci_dev *tmp_hfcpci = NULL; -#ifdef __BIG_ENDIAN -#error "not running on big endian machines now" -#endif - strcpy(tmp, hfcpci_revision); printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c index f6ab63aa699590..33eeb4602c7e71 100644 --- a/drivers/isdn/hisax/telespci.c +++ b/drivers/isdn/hisax/telespci.c @@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card) struct IsdnCardState *cs = card->cs; char tmp[64]; -#ifdef __BIG_ENDIAN -#error "not running on big endian machines now" -#endif - strcpy(tmp, telespci_revision); printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_TELESPCI) diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 05188351711d2d..a97263e902ffc6 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip, if (i % 2) goto err; - mutex_lock(&chip->lock); - for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); - if (ret) { - mutex_unlock(&chip->lock); + if (ret) return -EINVAL; - } } - mutex_unlock(&chip->lock); - return size; err: @@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev, { struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); struct lp55xx_chip *chip = led->chip; + int ret; mutex_lock(&chip->lock); chip->engine_idx = nr; lp5521_load_engine(chip); + ret = lp5521_update_program_memory(chip, buf, len); mutex_unlock(&chip->lock); - return lp5521_update_program_memory(chip, buf, len); + return ret; } store_load(1) store_load(2) diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 6b553d9f4266d5..fd9ab5f61441c5 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip, if (i % 2) goto err; - mutex_lock(&chip->lock); - for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); - if (ret) { - mutex_unlock(&chip->lock); + if (ret) return -EINVAL; - } } - mutex_unlock(&chip->lock); - return size; err: @@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev, { struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); struct lp55xx_chip *chip = led->chip; + int ret; mutex_lock(&chip->lock); chip->engine_idx = nr; lp5523_load_engine_and_select_page(chip); + ret = lp5523_update_program_memory(chip, buf, len); mutex_unlock(&chip->lock); - return lp5523_update_program_memory(chip, buf, len); + return ret; } store_load(1) store_load(2) diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index d26a312f117aef..3067d56b11a6b6 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -32,7 +32,7 @@ config ADB_MACII config ADB_MACIISI bool "Include Mac IIsi ADB driver" - depends on ADB && MAC + depends on ADB && MAC && BROKEN help Say Y here if want your kernel to support Macintosh systems that use the Mac IIsi style ADB. This includes the IIsi, IIvi, IIvx, Classic diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 2b46bf1d7e40ca..4c9852d92b0a90 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -421,9 +421,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait) if (watermark <= WATERMARK_METADATA) { SET_GC_MARK(b, GC_MARK_METADATA); + SET_GC_MOVE(b, 0); b->prio = BTREE_PRIO; } else { SET_GC_MARK(b, GC_MARK_RECLAIMABLE); + SET_GC_MOVE(b, 0); b->prio = INITIAL_PRIO; } diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 4beb55a0ff30dc..754f4317748322 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -197,7 +197,7 @@ struct bucket { uint8_t disk_gen; uint8_t last_gc; /* Most out of date gen in the btree */ uint8_t gc_gen; - uint16_t gc_mark; + uint16_t gc_mark; /* Bitfield used by GC. See below for field */ }; /* @@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); #define GC_MARK_RECLAIMABLE 0 #define GC_MARK_DIRTY 1 #define GC_MARK_METADATA 2 -BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); +BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); +BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); #include "journal.h" #include "stats.h" @@ -372,14 +373,14 @@ struct cached_dev { unsigned char writeback_percent; unsigned writeback_delay; - int writeback_rate_change; - int64_t writeback_rate_derivative; uint64_t writeback_rate_target; + int64_t writeback_rate_proportional; + int64_t writeback_rate_derivative; + int64_t writeback_rate_change; unsigned writeback_rate_update_seconds; unsigned writeback_rate_d_term; unsigned writeback_rate_p_term_inverse; - unsigned writeback_rate_d_smooth; }; enum alloc_watermarks { @@ -445,7 +446,6 @@ struct cache { * call prio_write() to keep gens from wrapping. */ uint8_t need_save_prio; - unsigned gc_move_threshold; /* * If nonzero, we know we aren't going to find any buckets to invalidate diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5e2765aadce174..31bb53fcc67a40 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c) SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), GC_MARK_METADATA); + /* don't reclaim buckets to which writeback keys point */ + rcu_read_lock(); + for (i = 0; i < c->nr_uuids; i++) { + struct bcache_device *d = c->devices[i]; + struct cached_dev *dc; + struct keybuf_key *w, *n; + unsigned j; + + if (!d || UUID_FLASH_ONLY(&c->uuids[i])) + continue; + dc = container_of(d, struct cached_dev, disk); + + spin_lock(&dc->writeback_keys.lock); + rbtree_postorder_for_each_entry_safe(w, n, + &dc->writeback_keys.keys, node) + for (j = 0; j < KEY_PTRS(&w->key); j++) + SET_GC_MARK(PTR_BUCKET(c, &w->key, j), + GC_MARK_DIRTY); + spin_unlock(&dc->writeback_keys.lock); + } + rcu_read_unlock(); + for_each_cache(ca, c, i) { uint64_t *i; @@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; - if (KEY_PTRS(replace_key) != KEY_PTRS(k)) + if (KEY_PTRS(k) != KEY_PTRS(replace_key) || + KEY_DIRTY(k) != KEY_DIRTY(replace_key)) goto check_failed; /* skip past gen */ @@ -2217,7 +2240,7 @@ struct btree_insert_op { struct bkey *replace_key; }; -int btree_insert_fn(struct btree_op *b_op, struct btree *b) +static int btree_insert_fn(struct btree_op *b_op, struct btree *b) { struct btree_insert_op *op = container_of(b_op, struct btree_insert_op, op); diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7c1275e66025b6..f2f0998c4a9187 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) unsigned i; for (i = 0; i < KEY_PTRS(k); i++) { - struct cache *ca = PTR_CACHE(c, k, i); struct bucket *g = PTR_BUCKET(c, k, i); - if (GC_SECTORS_USED(g) < ca->gc_move_threshold) + if (GC_MOVE(g)) return true; } @@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl) static void read_moving_endio(struct bio *bio, int error) { + struct bbio *b = container_of(bio, struct bbio, bio); struct moving_io *io = container_of(bio->bi_private, struct moving_io, cl); if (error) io->op.error = error; + else if (!KEY_DIRTY(&b->key) && + ptr_stale(io->op.c, &b->key, 0)) { + io->op.error = -EINTR; + } bch_bbio_endio(io->op.c, bio, error, "reading data to move"); } @@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c) if (!w) break; + if (ptr_stale(c, &w->key, 0)) { + bch_keybuf_del(&c->moving_gc_keys, w); + continue; + } + io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), GFP_KERNEL); @@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) static unsigned bucket_heap_top(struct cache *ca) { - return GC_SECTORS_USED(heap_peek(&ca->heap)); + struct bucket *b; + return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; } void bch_moving_gc(struct cache_set *c) @@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c) sectors_to_move -= GC_SECTORS_USED(b); } - ca->gc_move_threshold = bucket_heap_top(ca); - - pr_debug("threshold %u", ca->gc_move_threshold); + while (heap_pop(&ca->heap, b, bucket_cmp)) + SET_GC_MOVE(b, 1); } mutex_unlock(&c->bucket_lock); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dec15cd2d797ea..c57bfa071a57c5 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1676,7 +1676,7 @@ static void run_cache_set(struct cache_set *c) static bool can_attach_cache(struct cache *ca, struct cache_set *c) { return ca->sb.block_size == c->sb.block_size && - ca->sb.bucket_size == c->sb.block_size && + ca->sb.bucket_size == c->sb.bucket_size && ca->sb.nr_in_set == c->sb.nr_in_set; } diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 80d4c2bee18aa3..a1f85612f0b3df 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -83,7 +83,6 @@ rw_attribute(writeback_rate); rw_attribute(writeback_rate_update_seconds); rw_attribute(writeback_rate_d_term); rw_attribute(writeback_rate_p_term_inverse); -rw_attribute(writeback_rate_d_smooth); read_attribute(writeback_rate_debug); read_attribute(stripe_size); @@ -129,31 +128,41 @@ SHOW(__bch_cached_dev) var_printf(writeback_running, "%i"); var_print(writeback_delay); var_print(writeback_percent); - sysfs_print(writeback_rate, dc->writeback_rate.rate); + sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); var_print(writeback_rate_update_seconds); var_print(writeback_rate_d_term); var_print(writeback_rate_p_term_inverse); - var_print(writeback_rate_d_smooth); if (attr == &sysfs_writeback_rate_debug) { + char rate[20]; char dirty[20]; - char derivative[20]; char target[20]; - bch_hprint(dirty, - bcache_dev_sectors_dirty(&dc->disk) << 9); - bch_hprint(derivative, dc->writeback_rate_derivative << 9); + char proportional[20]; + char derivative[20]; + char change[20]; + s64 next_io; + + bch_hprint(rate, dc->writeback_rate.rate << 9); + bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); bch_hprint(target, dc->writeback_rate_target << 9); + bch_hprint(proportional,dc->writeback_rate_proportional << 9); + bch_hprint(derivative, dc->writeback_rate_derivative << 9); + bch_hprint(change, dc->writeback_rate_change << 9); + + next_io = div64_s64(dc->writeback_rate.next - local_clock(), + NSEC_PER_MSEC); return sprintf(buf, - "rate:\t\t%u\n" - "change:\t\t%i\n" + "rate:\t\t%s/sec\n" "dirty:\t\t%s\n" + "target:\t\t%s\n" + "proportional:\t%s\n" "derivative:\t%s\n" - "target:\t\t%s\n", - dc->writeback_rate.rate, - dc->writeback_rate_change, - dirty, derivative, target); + "change:\t\t%s/sec\n" + "next io:\t%llims\n", + rate, dirty, target, proportional, + derivative, change, next_io); } sysfs_hprint(dirty_data, @@ -189,6 +198,7 @@ STORE(__cached_dev) struct kobj_uevent_env *env; #define d_strtoul(var) sysfs_strtoul(var, dc->var) +#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) sysfs_strtoul(data_csum, dc->disk.data_csum); @@ -197,16 +207,15 @@ STORE(__cached_dev) d_strtoul(writeback_metadata); d_strtoul(writeback_running); d_strtoul(writeback_delay); - sysfs_strtoul_clamp(writeback_rate, - dc->writeback_rate.rate, 1, 1000000); + sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); - d_strtoul(writeback_rate_update_seconds); + sysfs_strtoul_clamp(writeback_rate, + dc->writeback_rate.rate, 1, INT_MAX); + + d_strtoul_nonzero(writeback_rate_update_seconds); d_strtoul(writeback_rate_d_term); - d_strtoul(writeback_rate_p_term_inverse); - sysfs_strtoul_clamp(writeback_rate_p_term_inverse, - dc->writeback_rate_p_term_inverse, 1, INT_MAX); - d_strtoul(writeback_rate_d_smooth); + d_strtoul_nonzero(writeback_rate_p_term_inverse); d_strtoi_h(sequential_cutoff); d_strtoi_h(readahead); @@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_rate_update_seconds, &sysfs_writeback_rate_d_term, &sysfs_writeback_rate_p_term_inverse, - &sysfs_writeback_rate_d_smooth, &sysfs_writeback_rate_debug, &sysfs_dirty_data, &sysfs_stripe_size, diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 462214eeacbedb..bb37618e76648b 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { uint64_t now = local_clock(); - d->next += div_u64(done, d->rate); + d->next += div_u64(done * NSEC_PER_SEC, d->rate); + + if (time_before64(now + NSEC_PER_SEC, d->next)) + d->next = now + NSEC_PER_SEC; + + if (time_after64(now - NSEC_PER_SEC * 2, d->next)) + d->next = now - NSEC_PER_SEC * 2; return time_after64(d->next, now) ? div_u64(d->next - now, NSEC_PER_SEC / HZ) diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 362c4b3f8b4a00..1030c6020e9869 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -110,7 +110,7 @@ do { \ _r; \ }) -#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) +#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) #define heap_full(h) ((h)->used == (h)->size) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 99053b1251bea1..6c44fe059c2769 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc) /* PD controller */ - int change = 0; - int64_t error; int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); int64_t derivative = dirty - dc->disk.sectors_dirty_last; + int64_t proportional = dirty - target; + int64_t change; dc->disk.sectors_dirty_last = dirty; - derivative *= dc->writeback_rate_d_term; - derivative = clamp(derivative, -dirty, dirty); + /* Scale to sectors per second */ - derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, - dc->writeback_rate_d_smooth, 0); + proportional *= dc->writeback_rate_update_seconds; + proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); - /* Avoid divide by zero */ - if (!target) - goto out; + derivative = div_s64(derivative, dc->writeback_rate_update_seconds); - error = div64_s64((dirty + derivative - target) << 8, target); + derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, + (dc->writeback_rate_d_term / + dc->writeback_rate_update_seconds) ?: 1, 0); + + derivative *= dc->writeback_rate_d_term; + derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); - change = div_s64((dc->writeback_rate.rate * error) >> 8, - dc->writeback_rate_p_term_inverse); + change = proportional + derivative; /* Don't increase writeback rate if the device isn't keeping up */ if (change > 0 && time_after64(local_clock(), - dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) + dc->writeback_rate.next + NSEC_PER_MSEC)) change = 0; dc->writeback_rate.rate = - clamp_t(int64_t, dc->writeback_rate.rate + change, + clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, 1, NSEC_PER_MSEC); -out: + + dc->writeback_rate_proportional = proportional; dc->writeback_rate_derivative = derivative; dc->writeback_rate_change = change; dc->writeback_rate_target = target; @@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work) static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) { - uint64_t ret; - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || !dc->writeback_percent) return 0; - ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); - - return min_t(uint64_t, ret, HZ); + return bch_next_delay(&dc->writeback_rate, sectors); } struct dirty_io { @@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc) if (KEY_START(&w->key) != dc->last_read || jiffies_to_msecs(delay) > 50) while (!kthread_should_stop() && delay) - delay = schedule_timeout_interruptible(delay); + delay = schedule_timeout_uninterruptible(delay); dc->last_read = KEY_OFFSET(&w->key); @@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg) while (delay && !kthread_should_stop() && !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) - delay = schedule_timeout_interruptible(delay); + delay = schedule_timeout_uninterruptible(delay); } } @@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc) bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), sectors_dirty_init_fn, 0); + + dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); } int bch_cached_dev_writeback_init(struct cached_dev *dc) @@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_delay = 30; dc->writeback_rate.rate = 1024; - dc->writeback_rate_update_seconds = 30; - dc->writeback_rate_d_term = 16; - dc->writeback_rate_p_term_inverse = 64; - dc->writeback_rate_d_smooth = 8; + dc->writeback_rate_update_seconds = 5; + dc->writeback_rate_d_term = 30; + dc->writeback_rate_p_term_inverse = 6000; dc->writeback_thread = kthread_create(bch_writeback_thread, dc, "bcache_writeback"); if (IS_ERR(dc->writeback_thread)) return PTR_ERR(dc->writeback_thread); - set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); - INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 12dc29ba7399c8..4195a01b15359b 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1635,7 +1635,7 @@ int bitmap_create(struct mddev *mddev) sector_t blocks = mddev->resync_max_sectors; struct file *file = mddev->bitmap_info.file; int err; - struct sysfs_dirent *bm = NULL; + struct kernfs_node *bm = NULL; BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index df4aeb6ac6f04e..30210b9c4ef972 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -225,7 +225,7 @@ struct bitmap { wait_queue_head_t overflow_wait; wait_queue_head_t behind_wait; - struct sysfs_dirent *sysfs_can_clear; + struct kernfs_node *sysfs_can_clear; }; /* the bitmap API */ diff --git a/drivers/md/md.c b/drivers/md/md.c index 21f4d7ff0da22e..369d919bdafef4 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1077,6 +1077,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) rdev->raid_disk = -1; clear_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); + clear_bit(Bitmap_sync, &rdev->flags); clear_bit(WriteMostly, &rdev->flags); if (mddev->raid_disks == 0) { @@ -1155,6 +1156,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) */ if (ev1 < mddev->bitmap->events_cleared) return 0; + if (ev1 < mddev->events) + set_bit(Bitmap_sync, &rdev->flags); } else { if (ev1 < mddev->events) /* just a hot-add of a new device, leave raid_disk at -1 */ @@ -1563,6 +1566,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) rdev->raid_disk = -1; clear_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); + clear_bit(Bitmap_sync, &rdev->flags); clear_bit(WriteMostly, &rdev->flags); if (mddev->raid_disks == 0) { @@ -1645,6 +1649,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) */ if (ev1 < mddev->bitmap->events_cleared) return 0; + if (ev1 < mddev->events) + set_bit(Bitmap_sync, &rdev->flags); } else { if (ev1 < mddev->events) /* just a hot-add of a new device, leave raid_disk at -1 */ @@ -2788,6 +2794,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) else rdev->saved_raid_disk = -1; clear_bit(In_sync, &rdev->flags); + clear_bit(Bitmap_sync, &rdev->flags); err = rdev->mddev->pers-> hot_add_disk(rdev->mddev, rdev); if (err) { @@ -5760,6 +5767,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) info->raid_disk < mddev->raid_disks) { rdev->raid_disk = info->raid_disk; set_bit(In_sync, &rdev->flags); + clear_bit(Bitmap_sync, &rdev->flags); } else rdev->raid_disk = -1; } else @@ -7706,7 +7714,8 @@ static int remove_and_add_spares(struct mddev *mddev, if (test_bit(Faulty, &rdev->flags)) continue; if (mddev->ro && - rdev->saved_raid_disk < 0) + ! (rdev->saved_raid_disk >= 0 && + !test_bit(Bitmap_sync, &rdev->flags))) continue; rdev->recovery_offset = 0; @@ -7787,9 +7796,12 @@ void md_check_recovery(struct mddev *mddev) * As we only add devices that are already in-sync, * we can activate the spares immediately. */ - clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); remove_and_add_spares(mddev, NULL); - mddev->pers->spare_active(mddev); + /* There is no thread, but we need to call + * ->spare_active and clear saved_raid_disk + */ + md_reap_sync_thread(mddev); + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); goto unlock; } diff --git a/drivers/md/md.h b/drivers/md/md.h index 2f5cc8a7ef3ed8..07bba96de26047 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -106,7 +106,7 @@ struct md_rdev { */ struct work_struct del_work; /* used for delayed sysfs removal */ - struct sysfs_dirent *sysfs_state; /* handle for 'state' + struct kernfs_node *sysfs_state; /* handle for 'state' * sysfs entry */ struct badblocks { @@ -129,6 +129,9 @@ struct md_rdev { enum flag_bits { Faulty, /* device is known to have a fault */ In_sync, /* device is in_sync with rest of array */ + Bitmap_sync, /* ..actually, not quite In_sync. Need a + * bitmap-based recovery to get fully in sync + */ Unmerged, /* device is being added to array and should * be considerred for bvec_merge_fn but not * yet for actual IO @@ -376,10 +379,10 @@ struct mddev { sector_t resync_max; /* resync should pause * when it gets here */ - struct sysfs_dirent *sysfs_state; /* handle for 'array_state' + struct kernfs_node *sysfs_state; /* handle for 'array_state' * file in sysfs. */ - struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ + struct kernfs_node *sysfs_action; /* handle for 'sync_action' */ struct work_struct del_work; /* used for delayed sysfs removal */ @@ -498,13 +501,13 @@ struct md_sysfs_entry { }; extern struct attribute_group md_bitmap_group; -static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name) +static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name) { if (sd) return sysfs_get_dirent(sd, name); return sd; } -static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd) +static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd) { if (sd) sysfs_notify_dirent(sd); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1e5a540995e932..a49cfcc7a34318 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -924,9 +924,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) conf->next_window_requests++; else conf->current_window_requests++; - } - if (bio->bi_sector >= conf->start_next_window) sector = conf->start_next_window; + } } conf->nr_pending++; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c504e8389e69e3..06eeb99ea6fc55 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1319,7 +1319,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) /* Could not read all from this device, so we will * need another r10_bio. */ - sectors_handled = (r10_bio->sectors + max_sectors + sectors_handled = (r10_bio->sector + max_sectors - bio->bi_sector); r10_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); @@ -1327,7 +1327,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) bio->bi_phys_segments = 2; else bio->bi_phys_segments++; - spin_unlock(&conf->device_lock); + spin_unlock_irq(&conf->device_lock); /* Cannot call generic_make_request directly * as that will be queued in __generic_make_request * and subsequent mempool_alloc might block @@ -3218,10 +3218,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, if (j == conf->copies) { /* Cannot recover, so abort the recovery or * record a bad block */ - put_buf(r10_bio); - if (rb2) - atomic_dec(&rb2->remaining); - r10_bio = rb2; if (any_working) { /* problem is that there are bad blocks * on other device(s) @@ -3253,6 +3249,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, mirror->recovery_disabled = mddev->recovery_disabled; } + put_buf(r10_bio); + if (rb2) + atomic_dec(&rb2->remaining); + r10_bio = rb2; break; } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index cc055da02e2a30..cbb15716a5db31 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -687,7 +687,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector, } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); - BUG_ON(list_empty(&sh->lru)); + BUG_ON(list_empty(&sh->lru) && + !test_bit(STRIPE_EXPANDING, &sh->state)); list_del_init(&sh->lru); if (sh->group) { sh->group->stripes_cnt--; @@ -3608,7 +3609,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) */ set_bit(R5_Insync, &dev->flags); - if (rdev && test_bit(R5_WriteError, &dev->flags)) { + if (test_bit(R5_WriteError, &dev->flags)) { /* This flag does not apply to '.replacement' * only to .rdev, so make sure to check that*/ struct md_rdev *rdev2 = rcu_dereference( @@ -3621,7 +3622,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } else clear_bit(R5_WriteError, &dev->flags); } - if (rdev && test_bit(R5_MadeGood, &dev->flags)) { + if (test_bit(R5_MadeGood, &dev->flags)) { /* This flag does not apply to '.replacement' * only to .rdev, so make sure to check that*/ struct md_rdev *rdev2 = rcu_dereference( diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index 11e20afbdcacb7..705698fd2c7ed0 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) pcr->remove_pci = true; - cancel_delayed_work(&pcr->carddet_work); - cancel_delayed_work(&pcr->idle_work); + /* Disable interrupts at the pcr level */ + spin_lock_irq(&pcr->lock); + rtsx_pci_writel(pcr, RTSX_BIER, 0); + pcr->bier = 0; + spin_unlock_irq(&pcr->lock); + + cancel_delayed_work_sync(&pcr->carddet_work); + cancel_delayed_work_sync(&pcr->idle_work); mfd_remove_devices(&pcidev->dev); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index a3e291d0df9a19..6cb388e8fb7d5a 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -525,4 +525,5 @@ source "drivers/misc/altera-stapl/Kconfig" source "drivers/misc/mei/Kconfig" source "drivers/misc/vmw_vmci/Kconfig" source "drivers/misc/mic/Kconfig" +source "drivers/misc/genwqe/Kconfig" endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f45473e68bf71d..99b9424ce31d80 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -53,3 +53,4 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o obj-$(CONFIG_SRAM) += sram.o obj-y += mic/ +obj-$(CONFIG_GENWQE) += genwqe/ diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index 0daadcf1ed7ae3..d3eee113baeba5 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c @@ -641,7 +641,7 @@ static const struct attribute_group ad525x_group_commands = { .attrs = ad525x_attributes_commands, }; -int ad_dpot_add_files(struct device *dev, +static int ad_dpot_add_files(struct device *dev, unsigned features, unsigned rdac) { int err = sysfs_create_file(&dev->kobj, @@ -666,7 +666,7 @@ int ad_dpot_add_files(struct device *dev, return err; } -inline void ad_dpot_remove_files(struct device *dev, +static inline void ad_dpot_remove_files(struct device *dev, unsigned features, unsigned rdac) { sysfs_remove_file(&dev->kobj, diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c index 3abfcecf84240b..a7c16295b8161f 100644 --- a/drivers/misc/bmp085-i2c.c +++ b/drivers/misc/bmp085-i2c.c @@ -49,7 +49,7 @@ static int bmp085_i2c_probe(struct i2c_client *client, return err; } - return bmp085_probe(&client->dev, regmap); + return bmp085_probe(&client->dev, regmap, client->irq); } static int bmp085_i2c_remove(struct i2c_client *client) diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c index d6a52659cf24a7..864ecac323732a 100644 --- a/drivers/misc/bmp085-spi.c +++ b/drivers/misc/bmp085-spi.c @@ -41,7 +41,7 @@ static int bmp085_spi_probe(struct spi_device *client) return err; } - return bmp085_probe(&client->dev, regmap); + return bmp085_probe(&client->dev, regmap, client->irq); } static int bmp085_spi_remove(struct spi_device *client) diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c index 2704d885a9b305..820e53d0048fa1 100644 --- a/drivers/misc/bmp085.c +++ b/drivers/misc/bmp085.c @@ -49,9 +49,11 @@ #include #include #include -#include #include #include "bmp085.h" +#include +#include +#include #define BMP085_CHIP_ID 0x55 #define BMP085_CALIBRATION_DATA_START 0xAA @@ -84,8 +86,19 @@ struct bmp085_data { unsigned long last_temp_measurement; u8 chip_id; s32 b6; /* calculated temperature correction coefficient */ + int irq; + struct completion done; }; +static irqreturn_t bmp085_eoc_isr(int irq, void *devid) +{ + struct bmp085_data *data = devid; + + complete(&data->done); + + return IRQ_HANDLED; +} + static s32 bmp085_read_calibration_data(struct bmp085_data *data) { u16 tmp[BMP085_CALIBRATION_DATA_LENGTH]; @@ -116,6 +129,9 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data) s32 status; mutex_lock(&data->lock); + + init_completion(&data->done); + status = regmap_write(data->regmap, BMP085_CTRL_REG, BMP085_TEMP_MEASUREMENT); if (status < 0) { @@ -123,7 +139,8 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data) "Error while requesting temperature measurement.\n"); goto exit; } - msleep(BMP085_TEMP_CONVERSION_TIME); + wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies( + BMP085_TEMP_CONVERSION_TIME)); status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, &tmp, sizeof(tmp)); @@ -147,6 +164,9 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data) s32 status; mutex_lock(&data->lock); + + init_completion(&data->done); + status = regmap_write(data->regmap, BMP085_CTRL_REG, BMP085_PRESSURE_MEASUREMENT + (data->oversampling_setting << 6)); @@ -157,8 +177,8 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data) } /* wait for the end of conversion */ - msleep(2+(3 << data->oversampling_setting)); - + wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies( + 2+(3 << data->oversampling_setting))); /* copy data into a u32 (4 bytes), but skip the first byte. */ status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, ((u8 *)&tmp)+1, 3); @@ -420,7 +440,7 @@ struct regmap_config bmp085_regmap_config = { }; EXPORT_SYMBOL_GPL(bmp085_regmap_config); -int bmp085_probe(struct device *dev, struct regmap *regmap) +int bmp085_probe(struct device *dev, struct regmap *regmap, int irq) { struct bmp085_data *data; int err = 0; @@ -434,6 +454,15 @@ int bmp085_probe(struct device *dev, struct regmap *regmap) dev_set_drvdata(dev, data); data->dev = dev; data->regmap = regmap; + data->irq = irq; + + if (data->irq > 0) { + err = devm_request_irq(dev, data->irq, bmp085_eoc_isr, + IRQF_TRIGGER_RISING, "bmp085", + data); + if (err < 0) + goto exit_free; + } /* Initialize the BMP085 chip */ err = bmp085_init_client(data); diff --git a/drivers/misc/bmp085.h b/drivers/misc/bmp085.h index 2b8f615bca92e5..8b8e3b1f5ca5b0 100644 --- a/drivers/misc/bmp085.h +++ b/drivers/misc/bmp085.h @@ -26,7 +26,7 @@ extern struct regmap_config bmp085_regmap_config; -int bmp085_probe(struct device *dev, struct regmap *regmap); +int bmp085_probe(struct device *dev, struct regmap *regmap, int irq); int bmp085_remove(struct device *dev); int bmp085_detect(struct device *dev); diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 3a015abb444a82..78e55b501c9481 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -378,7 +378,6 @@ static int eeprom_93xx46_remove(struct spi_device *spi) device_remove_file(&spi->dev, &dev_attr_erase); sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin); - spi_set_drvdata(spi, NULL); kfree(edev); return 0; } diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig new file mode 100644 index 00000000000000..6069d8cd79d767 --- /dev/null +++ b/drivers/misc/genwqe/Kconfig @@ -0,0 +1,13 @@ +# +# IBM Accelerator Family 'GenWQE' +# + +menuconfig GENWQE + tristate "GenWQE PCIe Accelerator" + depends on PCI && 64BIT + select CRC_ITU_T + default n + help + Enables PCIe card driver for IBM GenWQE accelerators. + The user-space interface is described in + include/linux/genwqe/genwqe_card.h. diff --git a/drivers/misc/genwqe/Makefile b/drivers/misc/genwqe/Makefile new file mode 100644 index 00000000000000..98a2b4f0b18b6c --- /dev/null +++ b/drivers/misc/genwqe/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for GenWQE driver +# + +obj-$(CONFIG_GENWQE) := genwqe_card.o +genwqe_card-objs := card_base.o card_dev.o card_ddcb.o card_sysfs.o \ + card_debugfs.o card_utils.o diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c new file mode 100644 index 00000000000000..74d51c9bb85870 --- /dev/null +++ b/drivers/misc/genwqe/card_base.c @@ -0,0 +1,1205 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Module initialization and PCIe setup. Card health monitoring and + * recovery functionality. Character device creation and deletion are + * controlled from here. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "card_base.h" +#include "card_ddcb.h" + +MODULE_AUTHOR("Frank Haverkamp "); +MODULE_AUTHOR("Michael Ruettger "); +MODULE_AUTHOR("Joerg-Stephan Vogt "); +MODULE_AUTHOR("Michal Jung "); + +MODULE_DESCRIPTION("GenWQE Card"); +MODULE_VERSION(DRV_VERS_STRING); +MODULE_LICENSE("GPL"); + +static char genwqe_driver_name[] = GENWQE_DEVNAME; +static struct class *class_genwqe; +static struct dentry *debugfs_genwqe; +static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX]; + +/* PCI structure for identifying device by PCI vendor and device ID */ +static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = { + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5 << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Initial SR-IOV bring-up image */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */ + .device = 0x0000, /* VF Device ID */ + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Fixed up image */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */ + .device = 0x0000, /* VF Device ID */ + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Even one more ... */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW, + .class = (PCI_CLASSCODE_GENWQE5 << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { 0, } /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, genwqe_device_table); + +/** + * genwqe_dev_alloc() - Create and prepare a new card descriptor + * + * Return: Pointer to card descriptor, or ERR_PTR(err) on error + */ +static struct genwqe_dev *genwqe_dev_alloc(void) +{ + unsigned int i = 0, j; + struct genwqe_dev *cd; + + for (i = 0; i < GENWQE_CARD_NO_MAX; i++) { + if (genwqe_devices[i] == NULL) + break; + } + if (i >= GENWQE_CARD_NO_MAX) + return ERR_PTR(-ENODEV); + + cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL); + if (!cd) + return ERR_PTR(-ENOMEM); + + cd->card_idx = i; + cd->class_genwqe = class_genwqe; + cd->debugfs_genwqe = debugfs_genwqe; + + init_waitqueue_head(&cd->queue_waitq); + + spin_lock_init(&cd->file_lock); + INIT_LIST_HEAD(&cd->file_list); + + cd->card_state = GENWQE_CARD_UNUSED; + spin_lock_init(&cd->print_lock); + + cd->ddcb_software_timeout = genwqe_ddcb_software_timeout; + cd->kill_timeout = genwqe_kill_timeout; + + for (j = 0; j < GENWQE_MAX_VFS; j++) + cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec; + + genwqe_devices[i] = cd; + return cd; +} + +static void genwqe_dev_free(struct genwqe_dev *cd) +{ + if (!cd) + return; + + genwqe_devices[cd->card_idx] = NULL; + kfree(cd); +} + +/** + * genwqe_bus_reset() - Card recovery + * + * pci_reset_function() will recover the device and ensure that the + * registers are accessible again when it completes with success. If + * not, the card will stay dead and registers will be unaccessible + * still. + */ +static int genwqe_bus_reset(struct genwqe_dev *cd) +{ + int bars, rc = 0; + struct pci_dev *pci_dev = cd->pci_dev; + void __iomem *mmio; + + if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE) + return -EIO; + + mmio = cd->mmio; + cd->mmio = NULL; + pci_iounmap(pci_dev, mmio); + + bars = pci_select_bars(pci_dev, IORESOURCE_MEM); + pci_release_selected_regions(pci_dev, bars); + + /* + * Firmware/BIOS might change memory mapping during bus reset. + * Settings like enable bus-mastering, ... are backuped and + * restored by the pci_reset_function(). + */ + dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__); + rc = pci_reset_function(pci_dev); + if (rc) { + dev_err(&pci_dev->dev, + "[%s] err: failed reset func (rc %d)\n", __func__, rc); + return rc; + } + dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc); + + /* + * Here is the right spot to clear the register read + * failure. pci_bus_reset() does this job in real systems. + */ + cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE | + GENWQE_INJECT_GFIR_FATAL | + GENWQE_INJECT_GFIR_INFO); + + rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); + if (rc) { + dev_err(&pci_dev->dev, + "[%s] err: request bars failed (%d)\n", __func__, rc); + return -EIO; + } + + cd->mmio = pci_iomap(pci_dev, 0, 0); + if (cd->mmio == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: mapping BAR0 failed\n", __func__); + return -ENOMEM; + } + return 0; +} + +/* + * Hardware circumvention section. Certain bitstreams in our test-lab + * had different kinds of problems. Here is where we adjust those + * bitstreams to function will with this version of our device driver. + * + * Thise circumventions are applied to the physical function only. + * The magical numbers below are identifying development/manufacturing + * versions of the bitstream used on the card. + * + * Turn off error reporting for old/manufacturing images. + */ + +bool genwqe_need_err_masking(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; +} + +static void genwqe_tweak_hardware(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + /* Mask FIRs for development images */ + if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) && + ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) { + dev_warn(&pci_dev->dev, + "FIRs masked due to bitstream %016llx.%016llx\n", + cd->slu_unitcfg, cd->app_unitcfg); + + __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR, + 0xFFFFFFFFFFFFFFFFull); + + __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK, + 0x0000000000000000ull); + } +} + +/** + * genwqe_recovery_on_fatal_gfir_required() - Version depended actions + * + * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must + * be ignored. This is e.g. true for the bitstream we gave to the card + * manufacturer, but also for some old bitstreams we released to our + * test-lab. + */ +int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull; +} + +int genwqe_flash_readback_fails(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; +} + +/** + * genwqe_T_psec() - Calculate PF/VF timeout register content + * + * Note: From a design perspective it turned out to be a bad idea to + * use codes here to specifiy the frequency/speed values. An old + * driver cannot understand new codes and is therefore always a + * problem. Better is to measure out the value or put the + * speed/frequency directly into a register which is always a valid + * value for old as well as for new software. + */ +/* T = 1/f */ +static int genwqe_T_psec(struct genwqe_dev *cd) +{ + u16 speed; /* 1/f -> 250, 200, 166, 175 */ + static const int T[] = { 4000, 5000, 6000, 5714 }; + + speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); + if (speed >= ARRAY_SIZE(T)) + return -1; /* illegal value */ + + return T[speed]; +} + +/** + * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution + * + * Do this _after_ card_reset() is called. Otherwise the values will + * vanish. The settings need to be done when the queues are inactive. + * + * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16. + * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16. + */ +static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd) +{ + u32 T = genwqe_T_psec(cd); + u64 x; + + if (genwqe_pf_jobtimeout_msec == 0) + return false; + + /* PF: large value needed, flash update 2sec per block */ + x = ilog2(genwqe_pf_jobtimeout_msec * + 16000000000uL/(T * 15)) - 10; + + genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + 0xff00 | (x & 0xff), 0); + return true; +} + +/** + * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution + */ +static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + unsigned int vf; + u32 T = genwqe_T_psec(cd); + u64 x; + + for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) { + + if (cd->vf_jobtimeout_msec[vf] == 0) + continue; + + x = ilog2(cd->vf_jobtimeout_msec[vf] * + 16000000000uL/(T * 15)) - 10; + + genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + 0xff00 | (x & 0xff), vf + 1); + } + return true; +} + +static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd) +{ + unsigned int type, e = 0; + + for (type = 0; type < GENWQE_DBG_UNITS; type++) { + switch (type) { + case GENWQE_DBG_UNIT0: + e = genwqe_ffdc_buff_size(cd, 0); + break; + case GENWQE_DBG_UNIT1: + e = genwqe_ffdc_buff_size(cd, 1); + break; + case GENWQE_DBG_UNIT2: + e = genwqe_ffdc_buff_size(cd, 2); + break; + case GENWQE_DBG_REGS: + e = GENWQE_FFDC_REGS; + break; + } + + /* currently support only the debug units mentioned here */ + cd->ffdc[type].entries = e; + cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg), + GFP_KERNEL); + /* + * regs == NULL is ok, the using code treats this as no regs, + * Printing warning is ok in this case. + */ + } + return 0; +} + +static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd) +{ + unsigned int type; + + for (type = 0; type < GENWQE_DBG_UNITS; type++) { + kfree(cd->ffdc[type].regs); + cd->ffdc[type].regs = NULL; + } +} + +static int genwqe_read_ids(struct genwqe_dev *cd) +{ + int err = 0; + int slu_id; + struct pci_dev *pci_dev = cd->pci_dev; + + cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); + if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "err: SLUID=%016llx\n", cd->slu_unitcfg); + err = -EIO; + goto out_err; + } + + slu_id = genwqe_get_slu_id(cd); + if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) { + dev_err(&pci_dev->dev, + "err: incompatible SLU Architecture %u\n", slu_id); + err = -ENOENT; + goto out_err; + } + + cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); + if (cd->app_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "err: APPID=%016llx\n", cd->app_unitcfg); + err = -EIO; + goto out_err; + } + genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name)); + + /* + * Is access to all registers possible? If we are a VF the + * answer is obvious. If we run fully virtualized, we need to + * check if we can access all registers. If we do not have + * full access we will cause an UR and some informational FIRs + * in the PF, but that should not harm. + */ + if (pci_dev->is_virtfn) + cd->is_privileged = 0; + else + cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) + != IO_ILLEGAL_VALUE); + + out_err: + return err; +} + +static int genwqe_start(struct genwqe_dev *cd) +{ + int err; + struct pci_dev *pci_dev = cd->pci_dev; + + err = genwqe_read_ids(cd); + if (err) + return err; + + if (genwqe_is_privileged(cd)) { + /* do this after the tweaks. alloc fail is acceptable */ + genwqe_ffdc_buffs_alloc(cd); + genwqe_stop_traps(cd); + + /* Collect registers e.g. FIRs, UNITIDs, traces ... */ + genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs, + cd->ffdc[GENWQE_DBG_REGS].entries, 0); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0, + cd->ffdc[GENWQE_DBG_UNIT0].regs, + cd->ffdc[GENWQE_DBG_UNIT0].entries); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1, + cd->ffdc[GENWQE_DBG_UNIT1].regs, + cd->ffdc[GENWQE_DBG_UNIT1].entries); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2, + cd->ffdc[GENWQE_DBG_UNIT2].regs, + cd->ffdc[GENWQE_DBG_UNIT2].entries); + + genwqe_start_traps(cd); + + if (cd->card_state == GENWQE_CARD_FATAL_ERROR) { + dev_warn(&pci_dev->dev, + "[%s] chip reload/recovery!\n", __func__); + + /* + * Stealth Mode: Reload chip on either hot + * reset or PERST. + */ + cd->softreset = 0x7Cull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, + cd->softreset); + + err = genwqe_bus_reset(cd); + if (err != 0) { + dev_err(&pci_dev->dev, + "[%s] err: bus reset failed!\n", + __func__); + goto out; + } + + /* + * Re-read the IDs because + * it could happen that the bitstream load + * failed! + */ + err = genwqe_read_ids(cd); + if (err) + goto out; + } + } + + err = genwqe_setup_service_layer(cd); /* does a reset to the card */ + if (err != 0) { + dev_err(&pci_dev->dev, + "[%s] err: could not setup servicelayer!\n", __func__); + err = -ENODEV; + goto out; + } + + if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */ + genwqe_tweak_hardware(cd); + + genwqe_setup_pf_jtimer(cd); + genwqe_setup_vf_jtimer(cd); + } + + err = genwqe_device_create(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: chdev init failed! (err=%d)\n", err); + goto out_release_service_layer; + } + return 0; + + out_release_service_layer: + genwqe_release_service_layer(cd); + out: + if (genwqe_is_privileged(cd)) + genwqe_ffdc_buffs_free(cd); + return -EIO; +} + +/** + * genwqe_stop() - Stop card operation + * + * Recovery notes: + * As long as genwqe_thread runs we might access registers during + * error data capture. Same is with the genwqe_health_thread. + * When genwqe_bus_reset() fails this function might called two times: + * first by the genwqe_health_thread() and later by genwqe_remove() to + * unbind the device. We must be able to survive that. + * + * This function must be robust enough to be called twice. + */ +static int genwqe_stop(struct genwqe_dev *cd) +{ + genwqe_finish_queue(cd); /* no register access */ + genwqe_device_remove(cd); /* device removed, procs killed */ + genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */ + + if (genwqe_is_privileged(cd)) { + pci_disable_sriov(cd->pci_dev); /* access pci config space */ + genwqe_ffdc_buffs_free(cd); + } + + return 0; +} + +/** + * genwqe_recover_card() - Try to recover the card if it is possible + * + * If fatal_err is set no register access is possible anymore. It is + * likely that genwqe_start fails in that situation. Proper error + * handling is required in this case. + * + * genwqe_bus_reset() will cause the pci code to call genwqe_remove() + * and later genwqe_probe() for all virtual functions. + */ +static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + genwqe_stop(cd); + + /* + * Make sure chip is not reloaded to maintain FFDC. Write SLU + * Reset Register, CPLDReset field to 0. + */ + if (!fatal_err) { + cd->softreset = 0x70ull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); + } + + rc = genwqe_bus_reset(cd); + if (rc != 0) { + dev_err(&pci_dev->dev, + "[%s] err: card recovery impossible!\n", __func__); + return rc; + } + + rc = genwqe_start(cd); + if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: failed to launch device!\n", __func__); + return rc; + } + return 0; +} + +static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir) +{ + *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + return (*gfir & GFIR_ERR_TRIGGER) && + genwqe_recovery_on_fatal_gfir_required(cd); +} + +/** + * genwqe_fir_checking() - Check the fault isolation registers of the card + * + * If this code works ok, can be tried out with help of the genwqe_poke tool: + * sudo ./tools/genwqe_poke 0x8 0xfefefefefef + * + * Now the relevant FIRs/sFIRs should be printed out and the driver should + * invoke recovery (devices are removed and readded). + */ +static u64 genwqe_fir_checking(struct genwqe_dev *cd) +{ + int j, iterations = 0; + u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec; + u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr; + struct pci_dev *pci_dev = cd->pci_dev; + + healthMonitor: + iterations++; + if (iterations > 16) { + dev_err(&pci_dev->dev, "* exit looping after %d times\n", + iterations); + goto fatal_error; + } + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir != 0x0) + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", + IO_SLC_CFGREG_GFIR, gfir); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* + * Avoid printing when to GFIR bit is on prevents contignous + * printout e.g. for the following bug: + * FIR set without a 2ndary FIR/FIR cannot be cleared + * Comment out the following if to get the prints: + */ + if (gfir == 0) + return 0; + + gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */ + + for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */ + + /* read the primary FIR (pfir) */ + fir_addr = (uid << 24) + 0x08; + fir = __genwqe_readq(cd, fir_addr); + if (fir == 0x0) + continue; /* no error in this unit */ + + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir); + if (fir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* read primary FEC */ + fec_addr = (uid << 24) + 0x18; + fec = __genwqe_readq(cd, fec_addr); + + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec); + if (fec == IO_ILLEGAL_VALUE) + goto fatal_error; + + for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) { + + /* secondary fir empty, skip it */ + if ((fir & mask) == 0x0) + continue; + + sfir_addr = (uid << 24) + 0x100 + 0x08 * j; + sfir = __genwqe_readq(cd, sfir_addr); + + if (sfir == IO_ILLEGAL_VALUE) + goto fatal_error; + dev_err(&pci_dev->dev, + "* 0x%08x 0x%016llx\n", sfir_addr, sfir); + + sfec_addr = (uid << 24) + 0x300 + 0x08 * j; + sfec = __genwqe_readq(cd, sfec_addr); + + if (sfec == IO_ILLEGAL_VALUE) + goto fatal_error; + dev_err(&pci_dev->dev, + "* 0x%08x 0x%016llx\n", sfec_addr, sfec); + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* gfir turned on during routine! get out and + start over. */ + if ((gfir_masked == 0x0) && + (gfir & GFIR_ERR_TRIGGER)) { + goto healthMonitor; + } + + /* do not clear if we entered with a fatal gfir */ + if (gfir_masked == 0x0) { + + /* NEW clear by mask the logged bits */ + sfir_addr = (uid << 24) + 0x100 + 0x08 * j; + __genwqe_writeq(cd, sfir_addr, sfir); + + dev_dbg(&pci_dev->dev, + "[HM] Clearing 2ndary FIR 0x%08x " + "with 0x%016llx\n", sfir_addr, sfir); + + /* + * note, these cannot be error-Firs + * since gfir_masked is 0 after sfir + * was read. Also, it is safe to do + * this write if sfir=0. Still need to + * clear the primary. This just means + * there is no secondary FIR. + */ + + /* clear by mask the logged bit. */ + fir_clr_addr = (uid << 24) + 0x10; + __genwqe_writeq(cd, fir_clr_addr, mask); + + dev_dbg(&pci_dev->dev, + "[HM] Clearing primary FIR 0x%08x " + "with 0x%016llx\n", fir_clr_addr, + mask); + } + } + } + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) { + /* + * Check once more that it didn't go on after all the + * FIRS were cleared. + */ + dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n", + iterations); + goto healthMonitor; + } + return gfir_masked; + + fatal_error: + return IO_ILLEGAL_VALUE; +} + +/** + * genwqe_health_thread() - Health checking thread + * + * This thread is only started for the PF of the card. + * + * This thread monitors the health of the card. A critical situation + * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In + * this case we need to be recovered from outside. Writing to + * registers will very likely not work either. + * + * This thread must only exit if kthread_should_stop() becomes true. + * + * Condition for the health-thread to trigger: + * a) when a kthread_stop() request comes in or + * b) a critical GFIR occured + * + * Informational GFIRs are checked and potentially printed in + * health_check_interval seconds. + */ +static int genwqe_health_thread(void *data) +{ + int rc, should_stop = 0; + struct genwqe_dev *cd = data; + struct pci_dev *pci_dev = cd->pci_dev; + u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg; + + while (!kthread_should_stop()) { + rc = wait_event_interruptible_timeout(cd->health_waitq, + (genwqe_health_check_cond(cd, &gfir) || + (should_stop = kthread_should_stop())), + genwqe_health_check_interval * HZ); + + if (should_stop) + break; + + if (gfir == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] GFIR=%016llx\n", __func__, gfir); + goto fatal_error; + } + + slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); + if (slu_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] SLU_UNITCFG=%016llx\n", + __func__, slu_unitcfg); + goto fatal_error; + } + + app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); + if (app_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] APP_UNITCFG=%016llx\n", + __func__, app_unitcfg); + goto fatal_error; + } + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] %s: GFIR=%016llx\n", __func__, + (gfir & GFIR_ERR_TRIGGER) ? "err" : "info", + gfir); + goto fatal_error; + } + + gfir_masked = genwqe_fir_checking(cd); + if (gfir_masked == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* + * GFIR ErrorTrigger bits set => reset the card! + * Never do this for old/manufacturing images! + */ + if ((gfir_masked) && !cd->skip_recovery && + genwqe_recovery_on_fatal_gfir_required(cd)) { + + cd->card_state = GENWQE_CARD_FATAL_ERROR; + + rc = genwqe_recover_card(cd, 0); + if (rc < 0) { + /* FIXME Card is unusable and needs unbind! */ + goto fatal_error; + } + } + + cd->last_gfir = gfir; + cond_resched(); + } + + return 0; + + fatal_error: + dev_err(&pci_dev->dev, + "[%s] card unusable. Please trigger unbind!\n", __func__); + + /* Bring down logical devices to inform user space via udev remove. */ + cd->card_state = GENWQE_CARD_FATAL_ERROR; + genwqe_stop(cd); + + /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */ + while (!kthread_should_stop()) + cond_resched(); + + return -EIO; +} + +static int genwqe_health_check_start(struct genwqe_dev *cd) +{ + int rc; + + if (genwqe_health_check_interval <= 0) + return 0; /* valid for disabling the service */ + + /* moved before request_irq() */ + /* init_waitqueue_head(&cd->health_waitq); */ + + cd->health_thread = kthread_run(genwqe_health_thread, cd, + GENWQE_DEVNAME "%d_health", + cd->card_idx); + if (IS_ERR(cd->health_thread)) { + rc = PTR_ERR(cd->health_thread); + cd->health_thread = NULL; + return rc; + } + return 0; +} + +static int genwqe_health_thread_running(struct genwqe_dev *cd) +{ + return cd->health_thread != NULL; +} + +static int genwqe_health_check_stop(struct genwqe_dev *cd) +{ + int rc; + + if (!genwqe_health_thread_running(cd)) + return -EIO; + + rc = kthread_stop(cd->health_thread); + cd->health_thread = NULL; + return 0; +} + +/** + * genwqe_pci_setup() - Allocate PCIe related resources for our card + */ +static int genwqe_pci_setup(struct genwqe_dev *cd) +{ + int err, bars; + struct pci_dev *pci_dev = cd->pci_dev; + + bars = pci_select_bars(pci_dev, IORESOURCE_MEM); + err = pci_enable_device_mem(pci_dev); + if (err) { + dev_err(&pci_dev->dev, + "err: failed to enable pci memory (err=%d)\n", err); + goto err_out; + } + + /* Reserve PCI I/O and memory resources */ + err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); + if (err) { + dev_err(&pci_dev->dev, + "[%s] err: request bars failed (%d)\n", __func__, err); + err = -EIO; + goto err_disable_device; + } + + /* check for 64-bit DMA address supported (DAC) */ + if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { + err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pci_dev->dev, + "err: DMA64 consistent mask error\n"); + err = -EIO; + goto out_release_resources; + } + /* check for 32-bit DMA address supported (SAC) */ + } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pci_dev->dev, + "err: DMA32 consistent mask error\n"); + err = -EIO; + goto out_release_resources; + } + } else { + dev_err(&pci_dev->dev, + "err: neither DMA32 nor DMA64 supported\n"); + err = -EIO; + goto out_release_resources; + } + + pci_set_master(pci_dev); + pci_enable_pcie_error_reporting(pci_dev); + + /* request complete BAR-0 space (length = 0) */ + cd->mmio_len = pci_resource_len(pci_dev, 0); + cd->mmio = pci_iomap(pci_dev, 0, 0); + if (cd->mmio == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: mapping BAR0 failed\n", __func__); + err = -ENOMEM; + goto out_release_resources; + } + + cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); + + err = genwqe_read_ids(cd); + if (err) + goto out_iounmap; + + return 0; + + out_iounmap: + pci_iounmap(pci_dev, cd->mmio); + out_release_resources: + pci_release_selected_regions(pci_dev, bars); + err_disable_device: + pci_disable_device(pci_dev); + err_out: + return err; +} + +/** + * genwqe_pci_remove() - Free PCIe related resources for our card + */ +static void genwqe_pci_remove(struct genwqe_dev *cd) +{ + int bars; + struct pci_dev *pci_dev = cd->pci_dev; + + if (cd->mmio) + pci_iounmap(pci_dev, cd->mmio); + + bars = pci_select_bars(pci_dev, IORESOURCE_MEM); + pci_release_selected_regions(pci_dev, bars); + pci_disable_device(pci_dev); +} + +/** + * genwqe_probe() - Device initialization + * @pdev: PCI device information struct + * + * Callable for multiple cards. This function is called on bind. + * + * Return: 0 if succeeded, < 0 when failed + */ +static int genwqe_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + int err; + struct genwqe_dev *cd; + + genwqe_init_crc32(); + + cd = genwqe_dev_alloc(); + if (IS_ERR(cd)) { + dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n", + (int)PTR_ERR(cd)); + return PTR_ERR(cd); + } + + dev_set_drvdata(&pci_dev->dev, cd); + cd->pci_dev = pci_dev; + + err = genwqe_pci_setup(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: problems with PCI setup (err=%d)\n", err); + goto out_free_dev; + } + + err = genwqe_start(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: cannot start card services! (err=%d)\n", err); + goto out_pci_remove; + } + + if (genwqe_is_privileged(cd)) { + err = genwqe_health_check_start(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: cannot start health checking! " + "(err=%d)\n", err); + goto out_stop_services; + } + } + return 0; + + out_stop_services: + genwqe_stop(cd); + out_pci_remove: + genwqe_pci_remove(cd); + out_free_dev: + genwqe_dev_free(cd); + return err; +} + +/** + * genwqe_remove() - Called when device is removed (hot-plugable) + * + * Or when driver is unloaded respecitively when unbind is done. + */ +static void genwqe_remove(struct pci_dev *pci_dev) +{ + struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev); + + genwqe_health_check_stop(cd); + + /* + * genwqe_stop() must survive if it is called twice + * sequentially. This happens when the health thread calls it + * and fails on genwqe_bus_reset(). + */ + genwqe_stop(cd); + genwqe_pci_remove(cd); + genwqe_dev_free(cd); +} + +/* + * genwqe_err_error_detected() - Error detection callback + * + * This callback is called by the PCI subsystem whenever a PCI bus + * error is detected. + */ +static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev, + enum pci_channel_state state) +{ + struct genwqe_dev *cd; + + dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state); + + if (pci_dev == NULL) + return PCI_ERS_RESULT_NEED_RESET; + + cd = dev_get_drvdata(&pci_dev->dev); + if (cd == NULL) + return PCI_ERS_RESULT_NEED_RESET; + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev) +{ + return PCI_ERS_RESULT_NONE; +} + +static void genwqe_err_resume(struct pci_dev *dev) +{ +} + +static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) +{ + struct genwqe_dev *cd = dev_get_drvdata(&dev->dev); + + if (numvfs > 0) { + genwqe_setup_vf_jtimer(cd); + pci_enable_sriov(dev, numvfs); + return numvfs; + } + if (numvfs == 0) { + pci_disable_sriov(dev); + return 0; + } + return 0; +} + +static struct pci_error_handlers genwqe_err_handler = { + .error_detected = genwqe_err_error_detected, + .mmio_enabled = genwqe_err_result_none, + .link_reset = genwqe_err_result_none, + .slot_reset = genwqe_err_result_none, + .resume = genwqe_err_resume, +}; + +static struct pci_driver genwqe_driver = { + .name = genwqe_driver_name, + .id_table = genwqe_device_table, + .probe = genwqe_probe, + .remove = genwqe_remove, + .sriov_configure = genwqe_sriov_configure, + .err_handler = &genwqe_err_handler, +}; + +/** + * genwqe_init_module() - Driver registration and initialization + */ +static int __init genwqe_init_module(void) +{ + int rc; + + class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME); + if (IS_ERR(class_genwqe)) { + pr_err("[%s] create class failed\n", __func__); + return -ENOMEM; + } + + debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL); + if (!debugfs_genwqe) { + rc = -ENOMEM; + goto err_out; + } + + rc = pci_register_driver(&genwqe_driver); + if (rc != 0) { + pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc); + goto err_out0; + } + + return rc; + + err_out0: + debugfs_remove(debugfs_genwqe); + err_out: + class_destroy(class_genwqe); + return rc; +} + +/** + * genwqe_exit_module() - Driver exit + */ +static void __exit genwqe_exit_module(void) +{ + pci_unregister_driver(&genwqe_driver); + debugfs_remove(debugfs_genwqe); + class_destroy(class_genwqe); +} + +module_init(genwqe_init_module); +module_exit(genwqe_exit_module); diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h new file mode 100644 index 00000000000000..5e4dbd21f89ad1 --- /dev/null +++ b/drivers/misc/genwqe/card_base.h @@ -0,0 +1,557 @@ +#ifndef __CARD_BASE_H__ +#define __CARD_BASE_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Interfaces within the GenWQE module. Defines genwqe_card and + * ddcb_queue as well as ddcb_requ. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "genwqe_driver.h" + +#define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */ +#define GENWQE_FLAG_MSI_ENABLED (1 << 0) + +#define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */ +#define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */ +#define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS) + +/* Compile parameters, some of them appear in debugfs for later adjustment */ +#define genwqe_ddcb_max 32 /* DDCBs on the work-queue */ +#define genwqe_polling_enabled 0 /* in case of irqs not working */ +#define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */ +#define genwqe_kill_timeout 8 /* time until process gets killed */ +#define genwqe_vf_jobtimeout_msec 250 /* 250 msec */ +#define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */ +#define genwqe_health_check_interval 4 /* <= 0: disabled */ + +/* Sysfs attribute groups used when we create the genwqe device */ +extern const struct attribute_group *genwqe_attribute_groups[]; + +/* + * Config space for Genwqe5 A7: + * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00 + * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00 + * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04] + * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 + */ +#define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */ + +#define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */ +#define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */ +#define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */ + +#define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000 +#define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */ +#define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */ + +#define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */ + +/** + * struct genwqe_reg - Genwqe data dump functionality + */ +struct genwqe_reg { + u32 addr; + u32 idx; + u64 val; +}; + +/* + * enum genwqe_dbg_type - Specify chip unit to dump/debug + */ +enum genwqe_dbg_type { + GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */ + GENWQE_DBG_UNIT1 = 1, + GENWQE_DBG_UNIT2 = 2, + GENWQE_DBG_UNIT3 = 3, + GENWQE_DBG_UNIT4 = 4, + GENWQE_DBG_UNIT5 = 5, + GENWQE_DBG_UNIT6 = 6, + GENWQE_DBG_UNIT7 = 7, + GENWQE_DBG_REGS = 8, + GENWQE_DBG_DMA = 9, + GENWQE_DBG_UNITS = 10, /* max number of possible debug units */ +}; + +/* Software error injection to simulate card failures */ +#define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */ +#define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */ +#define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */ +#define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */ + +/* + * Genwqe card description and management data. + * + * Error-handling in case of card malfunction + * ------------------------------------------ + * + * If the card is detected to be defective the outside environment + * will cause the PCI layer to call deinit (the cleanup function for + * probe). This is the same effect like doing a unbind/bind operation + * on the card. + * + * The genwqe card driver implements a health checking thread which + * verifies the card function. If this detects a problem the cards + * device is being shutdown and restarted again, along with a reset of + * the card and queue. + * + * All functions accessing the card device return either -EIO or -ENODEV + * code to indicate the malfunction to the user. The user has to close + * the file descriptor and open a new one, once the card becomes + * available again. + * + * If the open file descriptor is setup to receive SIGIO, the signal is + * genereated for the application which has to provide a handler to + * react on it. If the application does not close the open + * file descriptor a SIGKILL is send to enforce freeing the cards + * resources. + * + * I did not find a different way to prevent kernel problems due to + * reference counters for the cards character devices getting out of + * sync. The character device deallocation does not block, even if + * there is still an open file descriptor pending. If this pending + * descriptor is closed, the data structures used by the character + * device is reinstantiated, which will lead to the reference counter + * dropping below the allowed values. + * + * Card recovery + * ------------- + * + * To test the internal driver recovery the following command can be used: + * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject' + */ + + +/** + * struct dma_mapping_type - Mapping type definition + * + * To avoid memcpying data arround we use user memory directly. To do + * this we need to pin/swap-in the memory and request a DMA address + * for it. + */ +enum dma_mapping_type { + GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */ + GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */ + GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */ +}; + +/** + * struct dma_mapping - Information about memory mappings done by the driver + */ +struct dma_mapping { + enum dma_mapping_type type; + + void *u_vaddr; /* user-space vaddr/non-aligned */ + void *k_vaddr; /* kernel-space vaddr/non-aligned */ + dma_addr_t dma_addr; /* physical DMA address */ + + struct page **page_list; /* list of pages used by user buff */ + dma_addr_t *dma_list; /* list of dma addresses per page */ + unsigned int nr_pages; /* number of pages */ + unsigned int size; /* size in bytes */ + + struct list_head card_list; /* list of usr_maps for card */ + struct list_head pin_list; /* list of pinned memory for dev */ +}; + +static inline void genwqe_mapping_init(struct dma_mapping *m, + enum dma_mapping_type type) +{ + memset(m, 0, sizeof(*m)); + m->type = type; +} + +/** + * struct ddcb_queue - DDCB queue data + * @ddcb_max: Number of DDCBs on the queue + * @ddcb_next: Next free DDCB + * @ddcb_act: Next DDCB supposed to finish + * @ddcb_seq: Sequence number of last DDCB + * @ddcbs_in_flight: Currently enqueued DDCBs + * @ddcbs_completed: Number of already completed DDCBs + * @busy: Number of -EBUSY returns + * @ddcb_daddr: DMA address of first DDCB in the queue + * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue + * @ddcb_req: Associated requests (one per DDCB) + * @ddcb_waitqs: Associated wait queues (one per DDCB) + * @ddcb_lock: Lock to protect queuing operations + * @ddcb_waitq: Wait on next DDCB finishing + */ + +struct ddcb_queue { + int ddcb_max; /* amount of DDCBs */ + int ddcb_next; /* next available DDCB num */ + int ddcb_act; /* DDCB to be processed */ + u16 ddcb_seq; /* slc seq num */ + unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ + unsigned int ddcbs_completed; + unsigned int ddcbs_max_in_flight; + unsigned int busy; /* how many times -EBUSY? */ + + dma_addr_t ddcb_daddr; /* DMA address */ + struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ + struct ddcb_requ **ddcb_req; /* ddcb processing parameter */ + wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ + + spinlock_t ddcb_lock; /* exclusive access to queue */ + wait_queue_head_t ddcb_waitq; /* wait for ddcb processing */ + + /* registers or the respective queue to be used */ + u32 IO_QUEUE_CONFIG; + u32 IO_QUEUE_STATUS; + u32 IO_QUEUE_SEGMENT; + u32 IO_QUEUE_INITSQN; + u32 IO_QUEUE_WRAP; + u32 IO_QUEUE_OFFSET; + u32 IO_QUEUE_WTIME; + u32 IO_QUEUE_ERRCNTS; + u32 IO_QUEUE_LRW; +}; + +/* + * GFIR, SLU_UNITCFG, APP_UNITCFG + * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC. + */ +#define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64))) + +struct genwqe_ffdc { + unsigned int entries; + struct genwqe_reg *regs; +}; + +/** + * struct genwqe_dev - GenWQE device information + * @card_state: Card operation state, see above + * @ffdc: First Failure Data Capture buffers for each unit + * @card_thread: Working thread to operate the DDCB queue + * @card_waitq: Wait queue used in card_thread + * @queue: DDCB queue + * @health_thread: Card monitoring thread (only for PFs) + * @health_waitq: Wait queue used in health_thread + * @pci_dev: Associated PCI device (function) + * @mmio: Base address of 64-bit register space + * @mmio_len: Length of register area + * @file_lock: Lock to protect access to file_list + * @file_list: List of all processes with open GenWQE file descriptors + * + * This struct contains all information needed to communicate with a + * GenWQE card. It is initialized when a GenWQE device is found and + * destroyed when it goes away. It holds data to maintain the queue as + * well as data needed to feed the user interfaces. + */ +struct genwqe_dev { + enum genwqe_card_state card_state; + spinlock_t print_lock; + + int card_idx; /* card index 0..CARD_NO_MAX-1 */ + u64 flags; /* general flags */ + + /* FFDC data gathering */ + struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS]; + + /* DDCB workqueue */ + struct task_struct *card_thread; + wait_queue_head_t queue_waitq; + struct ddcb_queue queue; /* genwqe DDCB queue */ + unsigned int irqs_processed; + + /* Card health checking thread */ + struct task_struct *health_thread; + wait_queue_head_t health_waitq; + + /* char device */ + dev_t devnum_genwqe; /* major/minor num card */ + struct class *class_genwqe; /* reference to class object */ + struct device *dev; /* for device creation */ + struct cdev cdev_genwqe; /* char device for card */ + + struct dentry *debugfs_root; /* debugfs card root directory */ + struct dentry *debugfs_genwqe; /* debugfs driver root directory */ + + /* pci resources */ + struct pci_dev *pci_dev; /* PCI device */ + void __iomem *mmio; /* BAR-0 MMIO start */ + unsigned long mmio_len; + u16 num_vfs; + u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; + int is_privileged; /* access to all regs possible */ + + /* config regs which we need often */ + u64 slu_unitcfg; + u64 app_unitcfg; + u64 softreset; + u64 err_inject; + u64 last_gfir; + char app_name[5]; + + spinlock_t file_lock; /* lock for open files */ + struct list_head file_list; /* list of open files */ + + /* debugfs parameters */ + int ddcb_software_timeout; /* wait until DDCB times out */ + int skip_recovery; /* circumvention if recovery fails */ + int kill_timeout; /* wait after sending SIGKILL */ +}; + +/** + * enum genwqe_requ_state - State of a DDCB execution request + */ +enum genwqe_requ_state { + GENWQE_REQU_NEW = 0, + GENWQE_REQU_ENQUEUED = 1, + GENWQE_REQU_TAPPED = 2, + GENWQE_REQU_FINISHED = 3, + GENWQE_REQU_STATE_MAX, +}; + +/** + * struct ddcb_requ - Kernel internal representation of the DDCB request + * @cmd: User space representation of the DDCB execution request + */ +struct ddcb_requ { + /* kernel specific content */ + enum genwqe_requ_state req_state; /* request status */ + int num; /* ddcb_no for this request */ + struct ddcb_queue *queue; /* associated queue */ + + struct dma_mapping dma_mappings[DDCB_FIXUPS]; + struct sg_entry *sgl[DDCB_FIXUPS]; + dma_addr_t sgl_dma_addr[DDCB_FIXUPS]; + size_t sgl_size[DDCB_FIXUPS]; + + /* kernel/user shared content */ + struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ + struct genwqe_debug_data debug_data; +}; + +/** + * struct genwqe_file - Information for open GenWQE devices + */ +struct genwqe_file { + struct genwqe_dev *cd; + struct genwqe_driver *client; + struct file *filp; + + struct fasync_struct *async_queue; + struct task_struct *owner; + struct list_head list; /* entry in list of open files */ + + spinlock_t map_lock; /* lock for dma_mappings */ + struct list_head map_list; /* list of dma_mappings */ + + spinlock_t pin_lock; /* lock for pinned memory */ + struct list_head pin_list; /* list of pinned memory */ +}; + +int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */ +int genwqe_finish_queue(struct genwqe_dev *cd); +int genwqe_release_service_layer(struct genwqe_dev *cd); + +/** + * genwqe_get_slu_id() - Read Service Layer Unit Id + * Return: 0x00: Development code + * 0x01: SLC1 (old) + * 0x02: SLC2 (sept2012) + * 0x03: SLC2 (feb2013, generic driver) + */ +static inline int genwqe_get_slu_id(struct genwqe_dev *cd) +{ + return (int)((cd->slu_unitcfg >> 32) & 0xff); +} + +int genwqe_ddcbs_in_flight(struct genwqe_dev *cd); + +u8 genwqe_card_type(struct genwqe_dev *cd); +int genwqe_card_reset(struct genwqe_dev *cd); +int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count); +void genwqe_reset_interrupt_capability(struct genwqe_dev *cd); + +int genwqe_device_create(struct genwqe_dev *cd); +int genwqe_device_remove(struct genwqe_dev *cd); + +/* debugfs */ +int genwqe_init_debugfs(struct genwqe_dev *cd); +void genqwe_exit_debugfs(struct genwqe_dev *cd); + +int genwqe_read_softreset(struct genwqe_dev *cd); + +/* Hardware Circumventions */ +int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd); +int genwqe_flash_readback_fails(struct genwqe_dev *cd); + +/** + * genwqe_write_vreg() - Write register in VF window + * @cd: genwqe device + * @reg: register address + * @val: value to write + * @func: 0: PF, 1: VF0, ..., 15: VF14 + */ +int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func); + +/** + * genwqe_read_vreg() - Read register in VF window + * @cd: genwqe device + * @reg: register address + * @func: 0: PF, 1: VF0, ..., 15: VF14 + * + * Return: content of the register + */ +u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func); + +/* FFDC Buffer Management */ +int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id); +int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id, + struct genwqe_reg *regs, unsigned int max_regs); +int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, + unsigned int max_regs, int all); +int genwqe_ffdc_dump_dma(struct genwqe_dev *cd, + struct genwqe_reg *regs, unsigned int max_regs); + +int genwqe_init_debug_data(struct genwqe_dev *cd, + struct genwqe_debug_data *d); + +void genwqe_init_crc32(void); +int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len); + +/* Memory allocation/deallocation; dma address handling */ +int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, + void *uaddr, unsigned long size, + struct ddcb_requ *req); + +int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, + struct ddcb_requ *req); + +struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages, + dma_addr_t *dma_addr, size_t *sgl_size); + +void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, + dma_addr_t dma_addr, size_t size); + +int genwqe_setup_sgl(struct genwqe_dev *cd, + unsigned long offs, + unsigned long size, + struct sg_entry *sgl, /* genwqe sgl */ + dma_addr_t dma_addr, size_t sgl_size, + dma_addr_t *dma_list, int page_offs, int num_pages); + +int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, + int size); + +static inline bool dma_mapping_used(struct dma_mapping *m) +{ + if (!m) + return 0; + return m->size != 0; +} + +/** + * __genwqe_execute_ddcb() - Execute DDCB request with addr translation + * + * This function will do the address translation changes to the DDCBs + * according to the definitions required by the ATS field. It looks up + * the memory allocation buffer or does vmap/vunmap for the respective + * user-space buffers, inclusive page pinning and scatter gather list + * buildup and teardown. + */ +int __genwqe_execute_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd); + +/** + * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation + * + * This version will not do address translation or any modifcation of + * the DDCB data. It is used e.g. for the MoveFlash DDCB which is + * entirely prepared by the driver itself. That means the appropriate + * DMA addresses are already in the DDCB and do not need any + * modification. + */ +int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd); + +int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); +int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); +int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); + +/* register access */ +int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val); +u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs); +int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val); +u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs); + +void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, + dma_addr_t *dma_handle); +void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, + void *vaddr, dma_addr_t dma_handle); + +/* Base clock frequency in MHz */ +int genwqe_base_clock_frequency(struct genwqe_dev *cd); + +/* Before FFDC is captured the traps should be stopped. */ +void genwqe_stop_traps(struct genwqe_dev *cd); +void genwqe_start_traps(struct genwqe_dev *cd); + +/* Hardware circumvention */ +bool genwqe_need_err_masking(struct genwqe_dev *cd); + +/** + * genwqe_is_privileged() - Determine operation mode for PCI function + * + * On Intel with SRIOV support we see: + * PF: is_physfn = 1 is_virtfn = 0 + * VF: is_physfn = 0 is_virtfn = 1 + * + * On Systems with no SRIOV support _and_ virtualized systems we get: + * is_physfn = 0 is_virtfn = 0 + * + * Other vendors have individual pci device ids to distinguish between + * virtual function drivers and physical function drivers. GenWQE + * unfortunately has just on pci device id for both, VFs and PF. + * + * The following code is used to distinguish if the card is running in + * privileged mode, either as true PF or in a virtualized system with + * full register access e.g. currently on PowerPC. + * + * if (pci_dev->is_virtfn) + * cd->is_privileged = 0; + * else + * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) + * != IO_ILLEGAL_VALUE); + */ +static inline int genwqe_is_privileged(struct genwqe_dev *cd) +{ + return cd->is_privileged; +} + +#endif /* __CARD_BASE_H__ */ diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c new file mode 100644 index 00000000000000..6f1acc0ccf887f --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.c @@ -0,0 +1,1376 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Device Driver Control Block (DDCB) queue support. Definition of + * interrupt handlers for queue support as well as triggering the + * health monitor code in case of problems. The current hardware uses + * an MSI interrupt which is shared between error handling and + * functional code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "card_base.h" +#include "card_ddcb.h" + +/* + * N: next DDCB, this is where the next DDCB will be put. + * A: active DDCB, this is where the code will look for the next completion. + * x: DDCB is enqueued, we are waiting for its completion. + + * Situation (1): Empty queue + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | | | | | | | + * +---+---+---+---+---+---+---+---+ + * A/N + * enqueued_ddcbs = A - N = 2 - 2 = 0 + * + * Situation (2): Wrapped, N > A + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | x | x | | | | | + * +---+---+---+---+---+---+---+---+ + * A N + * enqueued_ddcbs = N - A = 4 - 2 = 2 + * + * Situation (3): Queue wrapped, A > N + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | | | x | x | x | x | + * +---+---+---+---+---+---+---+---+ + * N A + * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6 + * + * Situation (4a): Queue full N > A + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | x | x | x | x | x | | + * +---+---+---+---+---+---+---+---+ + * A N + * + * enqueued_ddcbs = N - A = 7 - 0 = 7 + * + * Situation (4a): Queue full A > N + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | x | | x | x | x | x | + * +---+---+---+---+---+---+---+---+ + * N A + * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7 + */ + +static int queue_empty(struct ddcb_queue *queue) +{ + return queue->ddcb_next == queue->ddcb_act; +} + +static int queue_enqueued_ddcbs(struct ddcb_queue *queue) +{ + if (queue->ddcb_next >= queue->ddcb_act) + return queue->ddcb_next - queue->ddcb_act; + + return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); +} + +static int queue_free_ddcbs(struct ddcb_queue *queue) +{ + int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; + + if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */ + return 0; + } + return free_ddcbs; +} + +/* + * Use of the PRIV field in the DDCB for queue debugging: + * + * (1) Trying to get rid of a DDCB which saw a timeout: + * pddcb->priv[6] = 0xcc; # cleared + * + * (2) Append a DDCB via NEXT bit: + * pddcb->priv[7] = 0xaa; # appended + * + * (3) DDCB needed tapping: + * pddcb->priv[7] = 0xbb; # tapped + * + * (4) DDCB marked as correctly finished: + * pddcb->priv[6] = 0xff; # finished + */ + +static inline void ddcb_mark_tapped(struct ddcb *pddcb) +{ + pddcb->priv[7] = 0xbb; /* tapped */ +} + +static inline void ddcb_mark_appended(struct ddcb *pddcb) +{ + pddcb->priv[7] = 0xaa; /* appended */ +} + +static inline void ddcb_mark_cleared(struct ddcb *pddcb) +{ + pddcb->priv[6] = 0xcc; /* cleared */ +} + +static inline void ddcb_mark_finished(struct ddcb *pddcb) +{ + pddcb->priv[6] = 0xff; /* finished */ +} + +static inline void ddcb_mark_unused(struct ddcb *pddcb) +{ + pddcb->priv_64 = cpu_to_be64(0); /* not tapped */ +} + +/** + * genwqe_crc16() - Generate 16-bit crc as required for DDCBs + * @buff: pointer to data buffer + * @len: length of data for calculation + * @init: initial crc (0xffff at start) + * + * Polynomial = x^16 + x^12 + x^5 + 1 (0x1021) + * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff + * should result in a crc16 of 0x89c3 + * + * Return: crc16 checksum in big endian format ! + */ +static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init) +{ + return crc_itu_t(init, buff, len); +} + +static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + int i; + struct ddcb *pddcb; + unsigned long flags; + struct pci_dev *pci_dev = cd->pci_dev; + + spin_lock_irqsave(&cd->print_lock, flags); + + dev_info(&pci_dev->dev, + "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n", + cd->card_idx, queue->ddcb_act, queue->ddcb_next); + + pddcb = queue->ddcb_vaddr; + for (i = 0; i < queue->ddcb_max; i++) { + dev_err(&pci_dev->dev, + " %c %-3d: RETC=%03x SEQ=%04x " + "HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n", + i == queue->ddcb_act ? '>' : ' ', + i, + be16_to_cpu(pddcb->retc_16), + be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, + pddcb->shi, + be64_to_cpu(pddcb->priv_64), + pddcb->cmd); + pddcb++; + } + spin_unlock_irqrestore(&cd->print_lock, flags); +} + +struct genwqe_ddcb_cmd *ddcb_requ_alloc(void) +{ + struct ddcb_requ *req; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return NULL; + + return &req->cmd; +} + +void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd) +{ + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + kfree(req); +} + +static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req) +{ + return req->req_state; +} + +static inline void ddcb_requ_set_state(struct ddcb_requ *req, + enum genwqe_requ_state new_state) +{ + req->req_state = new_state; +} + +static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req) +{ + return req->cmd.ddata_addr != 0x0; +} + +/** + * ddcb_requ_finished() - Returns the hardware state of the associated DDCB + * @cd: pointer to genwqe device descriptor + * @req: DDCB work request + * + * Status of ddcb_requ mirrors this hardware state, but is copied in + * the ddcb_requ on interrupt/polling function. The lowlevel code + * should check the hardware state directly, the higher level code + * should check the copy. + * + * This function will also return true if the state of the queue is + * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the + * shutdown case. + */ +static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) || + (cd->card_state != GENWQE_CARD_USED); +} + +/** + * enqueue_ddcb() - Enqueue a DDCB + * @cd: pointer to genwqe device descriptor + * @queue: queue this operation should be done on + * @ddcb_no: pointer to ddcb number being tapped + * + * Start execution of DDCB by tapping or append to queue via NEXT + * bit. This is done by an atomic 'compare and swap' instruction and + * checking SHI and HSI of the previous DDCB. + * + * This function must only be called with ddcb_lock held. + * + * Return: 1 if new DDCB is appended to previous + * 2 if DDCB queue is tapped via register/simulation + */ +#define RET_DDCB_APPENDED 1 +#define RET_DDCB_TAPPED 2 + +static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, + struct ddcb *pddcb, int ddcb_no) +{ + unsigned int try; + int prev_no; + struct ddcb *prev_ddcb; + __be32 old, new, icrc_hsi_shi; + u64 num; + + /* + * For performance checks a Dispatch Timestamp can be put into + * DDCB It is supposed to use the SLU's free running counter, + * but this requires PCIe cycles. + */ + ddcb_mark_unused(pddcb); + + /* check previous DDCB if already fetched */ + prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1; + prev_ddcb = &queue->ddcb_vaddr[prev_no]; + + /* + * It might have happened that the HSI.FETCHED bit is + * set. Retry in this case. Therefore I expect maximum 2 times + * trying. + */ + ddcb_mark_appended(pddcb); + for (try = 0; try < 2; try++) { + old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ + + /* try to append via NEXT bit if prev DDCB is not completed */ + if ((old & DDCB_COMPLETED_BE32) != 0x00000000) + break; + + new = (old | DDCB_NEXT_BE32); + icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); + + if (icrc_hsi_shi == old) + return RET_DDCB_APPENDED; /* appended to queue */ + } + + /* Queue must be re-started by updating QUEUE_OFFSET */ + ddcb_mark_tapped(pddcb); + num = (u64)ddcb_no << 8; + __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ + + return RET_DDCB_TAPPED; +} + +/** + * copy_ddcb_results() - Copy output state from real DDCB to request + * + * Copy DDCB ASV to request struct. There is no endian + * conversion made, since data structure in ASV is still + * unknown here. + * + * This is needed by: + * - genwqe_purge_ddcb() + * - genwqe_check_ddcb_queue() + */ +static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no) +{ + struct ddcb_queue *queue = req->queue; + struct ddcb *pddcb = &queue->ddcb_vaddr[req->num]; + + memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH); + + /* copy status flags of the variant part */ + req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16); + req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64); + req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64); + + req->cmd.attn = be16_to_cpu(pddcb->attn_16); + req->cmd.progress = be32_to_cpu(pddcb->progress_32); + req->cmd.retc = be16_to_cpu(pddcb->retc_16); + + if (ddcb_requ_collect_debug_data(req)) { + int prev_no = (ddcb_no == 0) ? + queue->ddcb_max - 1 : ddcb_no - 1; + struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no]; + + memcpy(&req->debug_data.ddcb_finished, pddcb, + sizeof(req->debug_data.ddcb_finished)); + memcpy(&req->debug_data.ddcb_prev, prev_pddcb, + sizeof(req->debug_data.ddcb_prev)); + } +} + +/** + * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests. + * @cd: pointer to genwqe device descriptor + * + * Return: Number of DDCBs which were finished + */ +static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, + struct ddcb_queue *queue) +{ + unsigned long flags; + int ddcbs_finished = 0; + struct pci_dev *pci_dev = cd->pci_dev; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + /* FIXME avoid soft locking CPU */ + while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) { + + struct ddcb *pddcb; + struct ddcb_requ *req; + u16 vcrc, vcrc_16, retc_16; + + pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; + + if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == + 0x00000000) + goto go_home; /* not completed, continue waiting */ + + /* Note: DDCB could be purged */ + + req = queue->ddcb_req[queue->ddcb_act]; + if (req == NULL) { + /* this occurs if DDCB is purged, not an error */ + /* Move active DDCB further; Nothing to do anymore. */ + goto pick_next_one; + } + + /* + * HSI=0x44 (fetched and completed), but RETC is + * 0x101, or even worse 0x000. + * + * In case of seeing the queue in inconsistent state + * we read the errcnts and the queue status to provide + * a trigger for our PCIe analyzer stop capturing. + */ + retc_16 = be16_to_cpu(pddcb->retc_16); + if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) { + u64 errcnts, status; + u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr; + + errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS); + status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); + + dev_err(&pci_dev->dev, + "[%s] SEQN=%04x HSI=%02x RETC=%03x " + " Q_ERRCNTS=%016llx Q_STATUS=%016llx\n" + " DDCB_DMA_ADDR=%016llx\n", + __func__, be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, retc_16, errcnts, status, + queue->ddcb_daddr + ddcb_offs); + } + + copy_ddcb_results(req, queue->ddcb_act); + queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */ + + dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + ddcb_mark_finished(pddcb); + + /* calculate CRC_16 to see if VCRC is correct */ + vcrc = genwqe_crc16(pddcb->asv, + VCRC_LENGTH(req->cmd.asv_length), + 0xffff); + vcrc_16 = be16_to_cpu(pddcb->vcrc_16); + if (vcrc != vcrc_16) { + printk_ratelimited(KERN_ERR + "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d " + "bytes vcrc_data=%04x is not vcrc_card=%04x\n", + GENWQE_DEVNAME, dev_name(&pci_dev->dev), + pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), + vcrc, vcrc_16); + } + + ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); + queue->ddcbs_completed++; + queue->ddcbs_in_flight--; + + /* wake up process waiting for this DDCB */ + wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); + +pick_next_one: + queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; + ddcbs_finished++; + } + + go_home: + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return ddcbs_finished; +} + +/** + * __genwqe_wait_ddcb(): Waits until DDCB is completed + * @cd: pointer to genwqe device descriptor + * @req: pointer to requsted DDCB parameters + * + * The Service Layer will update the RETC in DDCB when processing is + * pending or done. + * + * Return: > 0 remaining jiffies, DDCB completed + * -ETIMEDOUT when timeout + * -ERESTARTSYS when ^C + * -EINVAL when unknown error condition + * + * When an error is returned the called needs to ensure that + * purge_ddcb() is being called to get the &req removed from the + * queue. + */ +int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + int rc; + unsigned int ddcb_no; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + + if (req == NULL) + return -EINVAL; + + queue = req->queue; + if (queue == NULL) + return -EINVAL; + + ddcb_no = req->num; + if (ddcb_no >= queue->ddcb_max) + return -EINVAL; + + rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no], + ddcb_requ_finished(cd, req), + genwqe_ddcb_software_timeout * HZ); + + /* + * We need to distinguish 3 cases here: + * 1. rc == 0 timeout occured + * 2. rc == -ERESTARTSYS signal received + * 3. rc > 0 remaining jiffies condition is true + */ + if (rc == 0) { + struct ddcb_queue *queue = req->queue; + struct ddcb *pddcb; + + /* + * Timeout may be caused by long task switching time. + * When timeout happens, check if the request has + * meanwhile completed. + */ + genwqe_check_ddcb_queue(cd, req->queue); + if (ddcb_requ_finished(cd, req)) + return rc; + + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n", + __func__, req->num, rc, ddcb_requ_get_state(req), + req); + dev_err(&pci_dev->dev, + "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__, + __genwqe_readq(cd, queue->IO_QUEUE_STATUS)); + + pddcb = &queue->ddcb_vaddr[req->num]; + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + print_ddcb_info(cd, req->queue); + return -ETIMEDOUT; + + } else if (rc == -ERESTARTSYS) { + return rc; + /* + * EINTR: Stops the application + * ERESTARTSYS: Restartable systemcall; called again + */ + + } else if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n", + __func__, req->num, rc, ddcb_requ_get_state(req)); + return -EINVAL; + } + + /* Severe error occured. Driver is forced to stop operation */ + if (cd->card_state != GENWQE_CARD_USED) { + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d forced to stop (rc=%d)\n", + __func__, req->num, rc); + return -EIO; + } + return rc; +} + +/** + * get_next_ddcb() - Get next available DDCB + * @cd: pointer to genwqe device descriptor + * + * DDCB's content is completely cleared but presets for PRE and + * SEQNUM. This function must only be called when ddcb_lock is held. + * + * Return: NULL if no empty DDCB available otherwise ptr to next DDCB. + */ +static struct ddcb *get_next_ddcb(struct genwqe_dev *cd, + struct ddcb_queue *queue, + int *num) +{ + u64 *pu64; + struct ddcb *pddcb; + + if (queue_free_ddcbs(queue) == 0) /* queue is full */ + return NULL; + + /* find new ddcb */ + pddcb = &queue->ddcb_vaddr[queue->ddcb_next]; + + /* if it is not completed, we are not allowed to use it */ + /* barrier(); */ + if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000) + return NULL; + + *num = queue->ddcb_next; /* internal DDCB number */ + queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max; + + /* clear important DDCB fields */ + pu64 = (u64 *)pddcb; + pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */ + pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */ + + /* destroy previous results in ASV */ + pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */ + pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */ + pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */ + pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */ + pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */ + + pddcb->pre = DDCB_PRESET_PRE; /* 128 */ + pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++); + return pddcb; +} + +/** + * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue + * @cd: genwqe device descriptor + * @req: DDCB request + * + * This will fail when the request was already FETCHED. In this case + * we need to wait until it is finished. Else the DDCB can be + * reused. This function also ensures that the request data structure + * is removed from ddcb_req[]. + * + * Do not forget to call this function when genwqe_wait_ddcb() fails, + * such that the request gets really removed from ddcb_req[]. + * + * Return: 0 success + */ +int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + struct ddcb *pddcb = NULL; + unsigned int t; + unsigned long flags; + struct ddcb_queue *queue = req->queue; + struct pci_dev *pci_dev = cd->pci_dev; + u64 queue_status; + __be32 icrc_hsi_shi = 0x0000; + __be32 old, new; + + /* unsigned long flags; */ + if (genwqe_ddcb_software_timeout <= 0) { + dev_err(&pci_dev->dev, + "[%s] err: software timeout is not set!\n", __func__); + return -EFAULT; + } + + pddcb = &queue->ddcb_vaddr[req->num]; + + for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) { + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + /* Check if req was meanwhile finished */ + if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) + goto go_home; + + /* try to set PURGE bit if FETCHED/COMPLETED are not set */ + old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ + if ((old & DDCB_FETCHED_BE32) == 0x00000000) { + + new = (old | DDCB_PURGE_BE32); + icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32, + old, new); + if (icrc_hsi_shi == old) + goto finish_ddcb; + } + + /* normal finish with HSI bit */ + barrier(); + icrc_hsi_shi = pddcb->icrc_hsi_shi_32; + if (icrc_hsi_shi & DDCB_COMPLETED_BE32) + goto finish_ddcb; + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + /* + * Here the check_ddcb() function will most likely + * discover this DDCB to be finished some point in + * time. It will mark the req finished and free it up + * in the list. + */ + + copy_ddcb_results(req, req->num); /* for the failing case */ + msleep(100); /* sleep for 1/10 second and try again */ + continue; + +finish_ddcb: + copy_ddcb_results(req, req->num); + ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); + queue->ddcbs_in_flight--; + queue->ddcb_req[req->num] = NULL; /* delete from array */ + ddcb_mark_cleared(pddcb); + + /* Move active DDCB further; Nothing to do here anymore. */ + + /* + * We need to ensure that there is at least one free + * DDCB in the queue. To do that, we must update + * ddcb_act only if the COMPLETED bit is set for the + * DDCB we are working on else we treat that DDCB even + * if we PURGED it as occupied (hardware is supposed + * to set the COMPLETED bit yet!). + */ + icrc_hsi_shi = pddcb->icrc_hsi_shi_32; + if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) && + (queue->ddcb_act == req->num)) { + queue->ddcb_act = ((queue->ddcb_act + 1) % + queue->ddcb_max); + } +go_home: + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; + } + + /* + * If the card is dead and the queue is forced to stop, we + * might see this in the queue status register. + */ + queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); + + dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d not purged and not completed " + "after %d seconds QSTAT=%016llx!!\n", + __func__, req->num, genwqe_ddcb_software_timeout, + queue_status); + + print_ddcb_info(cd, req->queue); + + return -EFAULT; +} + +int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d) +{ + int len; + struct pci_dev *pci_dev = cd->pci_dev; + + if (d == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: invalid memory for debug data!\n", + __func__); + return -EFAULT; + } + + len = sizeof(d->driver_version); + snprintf(d->driver_version, len, "%s", DRV_VERS_STRING); + d->slu_unitcfg = cd->slu_unitcfg; + d->app_unitcfg = cd->app_unitcfg; + return 0; +} + +/** + * __genwqe_enqueue_ddcb() - Enqueue a DDCB + * @cd: pointer to genwqe device descriptor + * @req: pointer to DDCB execution request + * + * Return: 0 if enqueuing succeeded + * -EIO if card is unusable/PCIe problems + * -EBUSY if enqueuing failed + */ +int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + struct ddcb *pddcb; + unsigned long flags; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + u16 icrc; + + if (cd->card_state != GENWQE_CARD_USED) { + printk_ratelimited(KERN_ERR + "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", + GENWQE_DEVNAME, dev_name(&pci_dev->dev), + __func__, req->num); + return -EIO; + } + + queue = req->queue = &cd->queue; + + /* FIXME circumvention to improve performance when no irq is + * there. + */ + if (genwqe_polling_enabled) + genwqe_check_ddcb_queue(cd, queue); + + /* + * It must be ensured to process all DDCBs in successive + * order. Use a lock here in order to prevent nested DDCB + * enqueuing. + */ + spin_lock_irqsave(&queue->ddcb_lock, flags); + + pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ + if (pddcb == NULL) { + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + queue->busy++; + return -EBUSY; + } + + if (queue->ddcb_req[req->num] != NULL) { + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + dev_err(&pci_dev->dev, + "[%s] picked DDCB %d with req=%p still in use!!\n", + __func__, req->num, req); + return -EFAULT; + } + ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED); + queue->ddcb_req[req->num] = req; + + pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts); + pddcb->cmd = req->cmd.cmd; + pddcb->acfunc = req->cmd.acfunc; /* functional unit */ + + /* + * We know that we can get retc 0x104 with CRC error, do not + * stop the queue in those cases for this command. XDIR = 1 + * does not work for old SLU versions. + * + * Last bitstream with the old XDIR behavior had SLU_ID + * 0x34199. + */ + if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull) + pddcb->xdir = 0x1; + else + pddcb->xdir = 0x0; + + + pddcb->psp = (((req->cmd.asiv_length / 8) << 4) | + ((req->cmd.asv_length / 8))); + pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts); + + /* + * If copying the whole DDCB_ASIV_LENGTH is impacting + * performance we need to change it to + * req->cmd.asiv_length. But simulation benefits from some + * non-architectured bits behind the architectured content. + * + * How much data is copied depends on the availability of the + * ATS field, which was introduced late. If the ATS field is + * supported ASIV is 8 bytes shorter than it used to be. Since + * the ATS field is copied too, the code should do exactly + * what it did before, but I wanted to make copying of the ATS + * field very explicit. + */ + if (genwqe_get_slu_id(cd) <= 0x2) { + memcpy(&pddcb->__asiv[0], /* destination */ + &req->cmd.__asiv[0], /* source */ + DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */ + } else { + pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats); + memcpy(&pddcb->n.asiv[0], /* destination */ + &req->cmd.asiv[0], /* source */ + DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */ + } + + pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */ + + /* + * Calculate CRC_16 for corresponding range PSP(7:4). Include + * empty 4 bytes prior to the data. + */ + icrc = genwqe_crc16((const u8 *)pddcb, + ICRC_LENGTH(req->cmd.asiv_length), 0xffff); + pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16); + + /* enable DDCB completion irq */ + if (!genwqe_polling_enabled) + pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32; + + dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + if (ddcb_requ_collect_debug_data(req)) { + /* use the kernel copy of debug data. copying back to + user buffer happens later */ + + genwqe_init_debug_data(cd, &req->debug_data); + memcpy(&req->debug_data.ddcb_before, pddcb, + sizeof(req->debug_data.ddcb_before)); + } + + enqueue_ddcb(cd, queue, pddcb, req->num); + queue->ddcbs_in_flight++; + + if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight) + queue->ddcbs_max_in_flight = queue->ddcbs_in_flight; + + ddcb_requ_set_state(req, GENWQE_REQU_TAPPED); + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + wake_up_interruptible(&cd->queue_waitq); + + return 0; +} + +/** + * __genwqe_execute_raw_ddcb() - Setup and execute DDCB + * @cd: pointer to genwqe device descriptor + * @req: user provided DDCB request + */ +int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd) +{ + int rc = 0; + struct pci_dev *pci_dev = cd->pci_dev; + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + + if (cmd->asiv_length > DDCB_ASIV_LENGTH) { + dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n", + __func__, cmd->asiv_length); + return -EINVAL; + } + if (cmd->asv_length > DDCB_ASV_LENGTH) { + dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", + __func__, cmd->asiv_length); + return -EINVAL; + } + rc = __genwqe_enqueue_ddcb(cd, req); + if (rc != 0) + return rc; + + rc = __genwqe_wait_ddcb(cd, req); + if (rc < 0) /* error or signal interrupt */ + goto err_exit; + + if (ddcb_requ_collect_debug_data(req)) { + if (copy_to_user((struct genwqe_debug_data __user *) + (unsigned long)cmd->ddata_addr, + &req->debug_data, + sizeof(struct genwqe_debug_data))) + return -EFAULT; + } + + /* + * Higher values than 0x102 indicate completion with faults, + * lower values than 0x102 indicate processing faults. Note + * that DDCB might have been purged. E.g. Cntl+C. + */ + if (cmd->retc != DDCB_RETC_COMPLETE) { + /* This might happen e.g. flash read, and needs to be + handled by the upper layer code. */ + rc = -EBADMSG; /* not processed/error retc */ + } + + return rc; + + err_exit: + __genwqe_purge_ddcb(cd, req); + + if (ddcb_requ_collect_debug_data(req)) { + if (copy_to_user((struct genwqe_debug_data __user *) + (unsigned long)cmd->ddata_addr, + &req->debug_data, + sizeof(struct genwqe_debug_data))) + return -EFAULT; + } + return rc; +} + +/** + * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished + * + * We use this as condition for our wait-queue code. + */ +static int genwqe_next_ddcb_ready(struct genwqe_dev *cd) +{ + unsigned long flags; + struct ddcb *pddcb; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + if (queue_empty(queue)) { /* emtpy queue */ + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; + } + + pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; + if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */ + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 1; + } + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; +} + +/** + * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight + * + * Keep track on the number of DDCBs which ware currently in the + * queue. This is needed for statistics as well as conditon if we want + * to wait or better do polling in case of no interrupts available. + */ +int genwqe_ddcbs_in_flight(struct genwqe_dev *cd) +{ + unsigned long flags; + int ddcbs_in_flight = 0; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + ddcbs_in_flight += queue->ddcbs_in_flight; + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + return ddcbs_in_flight; +} + +static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + int rc, i; + struct ddcb *pddcb; + u64 val64; + unsigned int queue_size; + struct pci_dev *pci_dev = cd->pci_dev; + + if (genwqe_ddcb_max < 2) + return -EINVAL; + + queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE); + + queue->ddcbs_in_flight = 0; /* statistics */ + queue->ddcbs_max_in_flight = 0; + queue->ddcbs_completed = 0; + queue->busy = 0; + + queue->ddcb_seq = 0x100; /* start sequence number */ + queue->ddcb_max = genwqe_ddcb_max; /* module parameter */ + queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, + &queue->ddcb_daddr); + if (queue->ddcb_vaddr == NULL) { + dev_err(&pci_dev->dev, + "[%s] **err: could not allocate DDCB **\n", __func__); + return -ENOMEM; + } + memset(queue->ddcb_vaddr, 0, queue_size); + + queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) * + queue->ddcb_max, GFP_KERNEL); + if (!queue->ddcb_req) { + rc = -ENOMEM; + goto free_ddcbs; + } + + queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) * + queue->ddcb_max, GFP_KERNEL); + if (!queue->ddcb_waitqs) { + rc = -ENOMEM; + goto free_requs; + } + + for (i = 0; i < queue->ddcb_max; i++) { + pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */ + pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32; + pddcb->retc_16 = cpu_to_be16(0xfff); + + queue->ddcb_req[i] = NULL; /* requests */ + init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */ + } + + queue->ddcb_act = 0; + queue->ddcb_next = 0; /* queue is empty */ + + spin_lock_init(&queue->ddcb_lock); + init_waitqueue_head(&queue->ddcb_waitq); + + val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ + __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ + __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr); + __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq); + __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64); + return 0; + + free_requs: + kfree(queue->ddcb_req); + queue->ddcb_req = NULL; + free_ddcbs: + __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, + queue->ddcb_daddr); + queue->ddcb_vaddr = NULL; + queue->ddcb_daddr = 0ull; + return -ENODEV; + +} + +static int ddcb_queue_initialized(struct ddcb_queue *queue) +{ + return queue->ddcb_vaddr != NULL; +} + +static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + unsigned int queue_size; + + queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE); + + kfree(queue->ddcb_req); + queue->ddcb_req = NULL; + + if (queue->ddcb_vaddr) { + __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, + queue->ddcb_daddr); + queue->ddcb_vaddr = NULL; + queue->ddcb_daddr = 0ull; + } +} + +static irqreturn_t genwqe_pf_isr(int irq, void *dev_id) +{ + u64 gfir; + struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; + struct pci_dev *pci_dev = cd->pci_dev; + + /* + * In case of fatal FIR error the queue is stopped, such that + * we can safely check it without risking anything. + */ + cd->irqs_processed++; + wake_up_interruptible(&cd->queue_waitq); + + /* + * Checking for errors before kicking the queue might be + * safer, but slower for the good-case ... See above. + */ + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if ((gfir & GFIR_ERR_TRIGGER) != 0x0) { + + wake_up_interruptible(&cd->health_waitq); + + /* + * By default GFIRs causes recovery actions. This + * count is just for debug when recovery is masked. + */ + printk_ratelimited(KERN_ERR + "%s %s: [%s] GFIR=%016llx\n", + GENWQE_DEVNAME, dev_name(&pci_dev->dev), + __func__, gfir); + } + + return IRQ_HANDLED; +} + +static irqreturn_t genwqe_vf_isr(int irq, void *dev_id) +{ + struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; + + cd->irqs_processed++; + wake_up_interruptible(&cd->queue_waitq); + + return IRQ_HANDLED; +} + +/** + * genwqe_card_thread() - Work thread for the DDCB queue + * + * The idea is to check if there are DDCBs in processing. If there are + * some finished DDCBs, we process them and wakeup the + * requestors. Otherwise we give other processes time using + * cond_resched(). + */ +static int genwqe_card_thread(void *data) +{ + int should_stop = 0, rc = 0; + struct genwqe_dev *cd = (struct genwqe_dev *)data; + + while (!kthread_should_stop()) { + + genwqe_check_ddcb_queue(cd, &cd->queue); + + if (genwqe_polling_enabled) { + rc = wait_event_interruptible_timeout( + cd->queue_waitq, + genwqe_ddcbs_in_flight(cd) || + (should_stop = kthread_should_stop()), 1); + } else { + rc = wait_event_interruptible_timeout( + cd->queue_waitq, + genwqe_next_ddcb_ready(cd) || + (should_stop = kthread_should_stop()), HZ); + } + if (should_stop) + break; + + /* + * Avoid soft lockups on heavy loads; we do not want + * to disable our interrupts. + */ + cond_resched(); + } + return 0; +} + +/** + * genwqe_setup_service_layer() - Setup DDCB queue + * @cd: pointer to genwqe device descriptor + * + * Allocate DDCBs. Configure Service Layer Controller (SLC). + * + * Return: 0 success + */ +int genwqe_setup_service_layer(struct genwqe_dev *cd) +{ + int rc; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + + if (genwqe_is_privileged(cd)) { + rc = genwqe_card_reset(cd); + if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: reset failed.\n", __func__); + return rc; + } + genwqe_read_softreset(cd); + } + + queue = &cd->queue; + queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG; + queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS; + queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT; + queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN; + queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET; + queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP; + queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME; + queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS; + queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW; + + rc = setup_ddcb_queue(cd, queue); + if (rc != 0) { + rc = -ENODEV; + goto err_out; + } + + init_waitqueue_head(&cd->queue_waitq); + cd->card_thread = kthread_run(genwqe_card_thread, cd, + GENWQE_DEVNAME "%d_thread", + cd->card_idx); + if (IS_ERR(cd->card_thread)) { + rc = PTR_ERR(cd->card_thread); + cd->card_thread = NULL; + goto stop_free_queue; + } + + rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS); + if (rc > 0) + rc = genwqe_set_interrupt_capability(cd, rc); + if (rc != 0) { + rc = -ENODEV; + goto stop_kthread; + } + + /* + * We must have all wait-queues initialized when we enable the + * interrupts. Otherwise we might crash if we get an early + * irq. + */ + init_waitqueue_head(&cd->health_waitq); + + if (genwqe_is_privileged(cd)) { + rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED, + GENWQE_DEVNAME, cd); + } else { + rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED, + GENWQE_DEVNAME, cd); + } + if (rc < 0) { + dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq); + goto stop_irq_cap; + } + + cd->card_state = GENWQE_CARD_USED; + return 0; + + stop_irq_cap: + genwqe_reset_interrupt_capability(cd); + stop_kthread: + kthread_stop(cd->card_thread); + cd->card_thread = NULL; + stop_free_queue: + free_ddcb_queue(cd, queue); + err_out: + return rc; +} + +/** + * queue_wake_up_all() - Handles fatal error case + * + * The PCI device got unusable and we have to stop all pending + * requests as fast as we can. The code after this must purge the + * DDCBs in question and ensure that all mappings are freed. + */ +static int queue_wake_up_all(struct genwqe_dev *cd) +{ + unsigned int i; + unsigned long flags; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + for (i = 0; i < queue->ddcb_max; i++) + wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + return 0; +} + +/** + * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces + * + * Relies on the pre-condition that there are no users of the card + * device anymore e.g. with open file-descriptors. + * + * This function must be robust enough to be called twice. + */ +int genwqe_finish_queue(struct genwqe_dev *cd) +{ + int i, rc, in_flight; + int waitmax = genwqe_ddcb_software_timeout; + struct pci_dev *pci_dev = cd->pci_dev; + struct ddcb_queue *queue = &cd->queue; + + if (!ddcb_queue_initialized(queue)) + return 0; + + /* Do not wipe out the error state. */ + if (cd->card_state == GENWQE_CARD_USED) + cd->card_state = GENWQE_CARD_UNUSED; + + /* Wake up all requests in the DDCB queue such that they + should be removed nicely. */ + queue_wake_up_all(cd); + + /* We must wait to get rid of the DDCBs in flight */ + for (i = 0; i < waitmax; i++) { + in_flight = genwqe_ddcbs_in_flight(cd); + + if (in_flight == 0) + break; + + dev_dbg(&pci_dev->dev, + " DEBUG [%d/%d] waiting for queue to get empty: " + "%d requests!\n", i, waitmax, in_flight); + + /* + * Severe severe error situation: The card itself has + * 16 DDCB queues, each queue has e.g. 32 entries, + * each DDBC has a hardware timeout of currently 250 + * msec but the PFs have a hardware timeout of 8 sec + * ... so I take something large. + */ + msleep(1000); + } + if (i == waitmax) { + dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n", + __func__); + rc = -EIO; + } + return rc; +} + +/** + * genwqe_release_service_layer() - Shutdown DDCB queue + * @cd: genwqe device descriptor + * + * This function must be robust enough to be called twice. + */ +int genwqe_release_service_layer(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (!ddcb_queue_initialized(&cd->queue)) + return 1; + + free_irq(pci_dev->irq, cd); + genwqe_reset_interrupt_capability(cd); + + if (cd->card_thread != NULL) { + kthread_stop(cd->card_thread); + cd->card_thread = NULL; + } + + free_ddcb_queue(cd, &cd->queue); + return 0; +} diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h new file mode 100644 index 00000000000000..c4f26720753e02 --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.h @@ -0,0 +1,188 @@ +#ifndef __CARD_DDCB_H__ +#define __CARD_DDCB_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "genwqe_driver.h" +#include "card_base.h" + +/** + * struct ddcb - Device Driver Control Block DDCB + * @hsi: Hardware software interlock + * @shi: Software hardware interlock. Hsi and shi are used to interlock + * software and hardware activities. We are using a compare and + * swap operation to ensure that there are no races when + * activating new DDCBs on the queue, or when we need to + * purge a DDCB from a running queue. + * @acfunc: Accelerator function addresses a unit within the chip + * @cmd: Command to work on + * @cmdopts_16: Options for the command + * @asiv: Input data + * @asv: Output data + * + * The DDCB data format is big endian. Multiple consequtive DDBCs form + * a DDCB queue. + */ +#define ASIV_LENGTH 104 /* Old specification without ATS field */ +#define ASIV_LENGTH_ATS 96 /* New specification with ATS field */ +#define ASV_LENGTH 64 + +struct ddcb { + union { + __be32 icrc_hsi_shi_32; /* iCRC, Hardware/SW interlock */ + struct { + __be16 icrc_16; + u8 hsi; + u8 shi; + }; + }; + u8 pre; /* Preamble */ + u8 xdir; /* Execution Directives */ + __be16 seqnum_16; /* Sequence Number */ + + u8 acfunc; /* Accelerator Function.. */ + u8 cmd; /* Command. */ + __be16 cmdopts_16; /* Command Options */ + u8 sur; /* Status Update Rate */ + u8 psp; /* Protection Section Pointer */ + __be16 rsvd_0e_16; /* Reserved invariant */ + + __be64 fwiv_64; /* Firmware Invariant. */ + + union { + struct { + __be64 ats_64; /* Address Translation Spec */ + u8 asiv[ASIV_LENGTH_ATS]; /* New ASIV */ + } n; + u8 __asiv[ASIV_LENGTH]; /* obsolete */ + }; + u8 asv[ASV_LENGTH]; /* Appl Spec Variant */ + + __be16 rsvd_c0_16; /* Reserved Variant */ + __be16 vcrc_16; /* Variant CRC */ + __be32 rsvd_32; /* Reserved unprotected */ + + __be64 deque_ts_64; /* Deque Time Stamp. */ + + __be16 retc_16; /* Return Code */ + __be16 attn_16; /* Attention/Extended Error Codes */ + __be32 progress_32; /* Progress indicator. */ + + __be64 cmplt_ts_64; /* Completion Time Stamp. */ + + /* The following layout matches the new service layer format */ + __be32 ibdc_32; /* Inbound Data Count (* 256) */ + __be32 obdc_32; /* Outbound Data Count (* 256) */ + + __be64 rsvd_SLH_64; /* Reserved for hardware */ + union { /* private data for driver */ + u8 priv[8]; + __be64 priv_64; + }; + __be64 disp_ts_64; /* Dispatch TimeStamp */ +} __attribute__((__packed__)); + +/* CRC polynomials for DDCB */ +#define CRC16_POLYNOMIAL 0x1021 + +/* + * SHI: Software to Hardware Interlock + * This 1 byte field is written by software to interlock the + * movement of one queue entry to another with the hardware in the + * chip. + */ +#define DDCB_SHI_INTR 0x04 /* Bit 2 */ +#define DDCB_SHI_PURGE 0x02 /* Bit 1 */ +#define DDCB_SHI_NEXT 0x01 /* Bit 0 */ + +/* + * HSI: Hardware to Software interlock + * This 1 byte field is written by hardware to interlock the movement + * of one queue entry to another with the software in the chip. + */ +#define DDCB_HSI_COMPLETED 0x40 /* Bit 6 */ +#define DDCB_HSI_FETCHED 0x04 /* Bit 2 */ + +/* + * Accessing HSI/SHI is done 32-bit wide + * Normally 16-bit access would work too, but on some platforms the + * 16 compare and swap operation is not supported. Therefore + * switching to 32-bit such that those platforms will work too. + * + * iCRC HSI/SHI + */ +#define DDCB_INTR_BE32 cpu_to_be32(0x00000004) +#define DDCB_PURGE_BE32 cpu_to_be32(0x00000002) +#define DDCB_NEXT_BE32 cpu_to_be32(0x00000001) +#define DDCB_COMPLETED_BE32 cpu_to_be32(0x00004000) +#define DDCB_FETCHED_BE32 cpu_to_be32(0x00000400) + +/* Definitions of DDCB presets */ +#define DDCB_PRESET_PRE 0x80 +#define ICRC_LENGTH(n) ((n) + 8 + 8 + 8) /* used ASIV + hdr fields */ +#define VCRC_LENGTH(n) ((n)) /* used ASV */ + +/* + * Genwqe Scatter Gather list + * Each element has up to 8 entries. + * The chaining element is element 0 cause of prefetching needs. + */ + +/* + * 0b0110 Chained descriptor. The descriptor is describing the next + * descriptor list. + */ +#define SG_CHAINED (0x6) + +/* + * 0b0010 First entry of a descriptor list. Start from a Buffer-Empty + * condition. + */ +#define SG_DATA (0x2) + +/* + * 0b0000 Early terminator. This is the last entry on the list + * irregardless of the length indicated. + */ +#define SG_END_LIST (0x0) + +/** + * struct sglist - Scatter gather list + * @target_addr: Either a dma addr of memory to work on or a + * dma addr or a subsequent sglist block. + * @len: Length of the data block. + * @flags: See above. + * + * Depending on the command the GenWQE card can use a scatter gather + * list to describe the memory it works on. Always 8 sg_entry's form + * a block. + */ +struct sg_entry { + __be64 target_addr; + __be32 len; + __be32 flags; +}; + +#endif /* __CARD_DDCB_H__ */ diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c new file mode 100644 index 00000000000000..3bfdc07a7248b9 --- /dev/null +++ b/drivers/misc/genwqe/card_debugfs.c @@ -0,0 +1,500 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Debugfs interfaces for the GenWQE card. Help to debug potential + * problems. Dump internal chip state for debugging and failure + * determination. + */ + +#include +#include +#include +#include +#include +#include + +#include "card_base.h" +#include "card_ddcb.h" + +#define GENWQE_DEBUGFS_RO(_name, _showfn) \ + static int genwqe_debugfs_##_name##_open(struct inode *inode, \ + struct file *file) \ + { \ + return single_open(file, _showfn, inode->i_private); \ + } \ + static const struct file_operations genwqe_##_name##_fops = { \ + .open = genwqe_debugfs_##_name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs, + int entries) +{ + unsigned int i; + u32 v_hi, v_lo; + + for (i = 0; i < entries; i++) { + v_hi = (regs[i].val >> 32) & 0xffffffff; + v_lo = (regs[i].val) & 0xffffffff; + + seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n", + regs[i].addr, regs[i].idx, v_hi, v_lo); + } +} + +static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid) +{ + struct genwqe_dev *cd = s->private; + int entries; + struct genwqe_reg *regs; + + entries = genwqe_ffdc_buff_size(cd, uid); + if (entries < 0) + return -EINVAL; + + if (entries == 0) + return 0; + + regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL); + if (regs == NULL) + return -ENOMEM; + + genwqe_stop_traps(cd); /* halt the traps while dumping data */ + genwqe_ffdc_buff_read(cd, uid, regs, entries); + genwqe_start_traps(cd); + + dbg_uidn_show(s, regs, entries); + kfree(regs); + return 0; +} + +static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 0); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show); + +static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 1); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show); + +static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 2); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show); + +static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid) +{ + struct genwqe_dev *cd = s->private; + + dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries); + return 0; +} + +static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 0); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show); + +static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 1); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show); + +static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 2); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show); + +static int genwqe_curr_regs_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct genwqe_reg *regs; + + regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL); + if (regs == NULL) + return -ENOMEM; + + genwqe_stop_traps(cd); + genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1); + genwqe_start_traps(cd); + + for (i = 0; i < GENWQE_FFDC_REGS; i++) { + if (regs[i].addr == 0xffffffff) + break; /* invalid entries */ + + if (regs[i].val == 0x0ull) + continue; /* do not print 0x0 FIRs */ + + seq_printf(s, " 0x%08x 0x%016llx\n", + regs[i].addr, regs[i].val); + } + return 0; +} + +GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show); + +static int genwqe_prev_regs_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs; + + if (regs == NULL) + return -EINVAL; + + for (i = 0; i < GENWQE_FFDC_REGS; i++) { + if (regs[i].addr == 0xffffffff) + break; /* invalid entries */ + + if (regs[i].val == 0x0ull) + continue; /* do not print 0x0 FIRs */ + + seq_printf(s, " 0x%08x 0x%016llx\n", + regs[i].addr, regs[i].val); + } + return 0; +} + +GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show); + +static int genwqe_jtimer_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int vf_num; + u64 jtimer; + + jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0); + seq_printf(s, " PF 0x%016llx %d msec\n", jtimer, + genwqe_pf_jobtimeout_msec); + + for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { + jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + vf_num + 1); + seq_printf(s, " VF%-2d 0x%016llx %d msec\n", vf_num, jtimer, + cd->vf_jobtimeout_msec[vf_num]); + } + return 0; +} + +GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show); + +static int genwqe_queue_working_time_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int vf_num; + u64 t; + + t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0); + seq_printf(s, " PF 0x%016llx\n", t); + + for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { + t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1); + seq_printf(s, " VF%-2d 0x%016llx\n", vf_num, t); + } + return 0; +} + +GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show); + +static int genwqe_ddcb_info_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct ddcb_queue *queue; + struct ddcb *pddcb; + + queue = &cd->queue; + seq_puts(s, "DDCB QUEUE:\n"); + seq_printf(s, " ddcb_max: %d\n" + " ddcb_daddr: %016llx - %016llx\n" + " ddcb_vaddr: %016llx\n" + " ddcbs_in_flight: %u\n" + " ddcbs_max_in_flight: %u\n" + " ddcbs_completed: %u\n" + " busy: %u\n" + " irqs_processed: %u\n", + queue->ddcb_max, (long long)queue->ddcb_daddr, + (long long)queue->ddcb_daddr + + (queue->ddcb_max * DDCB_LENGTH), + (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, + queue->ddcbs_max_in_flight, queue->ddcbs_completed, + queue->busy, cd->irqs_processed); + + /* Hardware State */ + seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n" + " 0x%08x 0x%016llx IO_QUEUE_STATUS\n" + " 0x%08x 0x%016llx IO_QUEUE_SEGMENT\n" + " 0x%08x 0x%016llx IO_QUEUE_INITSQN\n" + " 0x%08x 0x%016llx IO_QUEUE_WRAP\n" + " 0x%08x 0x%016llx IO_QUEUE_OFFSET\n" + " 0x%08x 0x%016llx IO_QUEUE_WTIME\n" + " 0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n" + " 0x%08x 0x%016llx IO_QUEUE_LRW\n", + queue->IO_QUEUE_CONFIG, + __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), + queue->IO_QUEUE_STATUS, + __genwqe_readq(cd, queue->IO_QUEUE_STATUS), + queue->IO_QUEUE_SEGMENT, + __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT), + queue->IO_QUEUE_INITSQN, + __genwqe_readq(cd, queue->IO_QUEUE_INITSQN), + queue->IO_QUEUE_WRAP, + __genwqe_readq(cd, queue->IO_QUEUE_WRAP), + queue->IO_QUEUE_OFFSET, + __genwqe_readq(cd, queue->IO_QUEUE_OFFSET), + queue->IO_QUEUE_WTIME, + __genwqe_readq(cd, queue->IO_QUEUE_WTIME), + queue->IO_QUEUE_ERRCNTS, + __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS), + queue->IO_QUEUE_LRW, + __genwqe_readq(cd, queue->IO_QUEUE_LRW)); + + seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n", + queue->ddcb_act, queue->ddcb_next); + + pddcb = queue->ddcb_vaddr; + for (i = 0; i < queue->ddcb_max; i++) { + seq_printf(s, " %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ", + i, be16_to_cpu(pddcb->retc_16), + be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, pddcb->shi); + seq_printf(s, "PRIV=%06llx CMD=%02x\n", + be64_to_cpu(pddcb->priv_64), pddcb->cmd); + pddcb++; + } + return 0; +} + +GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show); + +static int genwqe_info_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + u16 val16, type; + u64 app_id, slu_id, bitstream = -1; + struct pci_dev *pci_dev = cd->pci_dev; + + slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); + app_id = __genwqe_readq(cd, IO_APP_UNITCFG); + + if (genwqe_is_privileged(cd)) + bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM); + + val16 = (u16)(slu_id & 0x0fLLU); + type = (u16)((slu_id >> 20) & 0xffLLU); + + seq_printf(s, "%s driver version: %s\n" + " Device Name/Type: %s %s CardIdx: %d\n" + " SLU/APP Config : 0x%016llx/0x%016llx\n" + " Build Date : %u/%x/%u\n" + " Base Clock : %u MHz\n" + " Arch/SVN Release: %u/%llx\n" + " Bitstream : %llx\n", + GENWQE_DEVNAME, DRV_VERS_STRING, dev_name(&pci_dev->dev), + genwqe_is_privileged(cd) ? + "Physical" : "Virtual or no SR-IOV", + cd->card_idx, slu_id, app_id, + (u16)((slu_id >> 12) & 0x0fLLU), /* month */ + (u16)((slu_id >> 4) & 0xffLLU), /* day */ + (u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */ + genwqe_base_clock_frequency(cd), + (u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40, + bitstream); + + return 0; +} + +GENWQE_DEBUGFS_RO(info, genwqe_info_show); + +int genwqe_init_debugfs(struct genwqe_dev *cd) +{ + struct dentry *root; + struct dentry *file; + int ret; + char card_name[64]; + char name[64]; + unsigned int i; + + sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx); + + root = debugfs_create_dir(card_name, cd->debugfs_genwqe); + if (!root) { + ret = -ENOMEM; + goto err0; + } + + /* non privileged interfaces are done here */ + file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd, + &genwqe_ddcb_info_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("info", S_IRUGO, root, cd, + &genwqe_info_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_x64("err_inject", 0666, root, &cd->err_inject); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_u32("ddcb_software_timeout", 0666, root, + &cd->ddcb_software_timeout); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_u32("kill_timeout", 0666, root, + &cd->kill_timeout); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + /* privileged interfaces follow here */ + if (!genwqe_is_privileged(cd)) { + cd->debugfs_root = root; + return 0; + } + + file = debugfs_create_file("curr_regs", S_IRUGO, root, cd, + &genwqe_curr_regs_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd, + &genwqe_curr_dbg_uid0_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd, + &genwqe_curr_dbg_uid1_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd, + &genwqe_curr_dbg_uid2_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("prev_regs", S_IRUGO, root, cd, + &genwqe_prev_regs_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd, + &genwqe_prev_dbg_uid0_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd, + &genwqe_prev_dbg_uid1_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd, + &genwqe_prev_dbg_uid2_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + for (i = 0; i < GENWQE_MAX_VFS; i++) { + sprintf(name, "vf%d_jobtimeout_msec", i); + + file = debugfs_create_u32(name, 0666, root, + &cd->vf_jobtimeout_msec[i]); + if (!file) { + ret = -ENOMEM; + goto err1; + } + } + + file = debugfs_create_file("jobtimer", S_IRUGO, root, cd, + &genwqe_jtimer_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd, + &genwqe_queue_working_time_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_u32("skip_recovery", 0666, root, + &cd->skip_recovery); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + cd->debugfs_root = root; + return 0; +err1: + debugfs_remove_recursive(root); +err0: + return ret; +} + +void genqwe_exit_debugfs(struct genwqe_dev *cd) +{ + debugfs_remove_recursive(cd->debugfs_root); +} diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c new file mode 100644 index 00000000000000..8f8a6b327cdb83 --- /dev/null +++ b/drivers/misc/genwqe/card_dev.c @@ -0,0 +1,1414 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Character device representation of the GenWQE device. This allows + * user-space applications to communicate with the card. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "card_base.h" +#include "card_ddcb.h" + +static int genwqe_open_files(struct genwqe_dev *cd) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&cd->file_lock, flags); + rc = list_empty(&cd->file_list); + spin_unlock_irqrestore(&cd->file_lock, flags); + return !rc; +} + +static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) +{ + unsigned long flags; + + cfile->owner = current; + spin_lock_irqsave(&cd->file_lock, flags); + list_add(&cfile->list, &cd->file_list); + spin_unlock_irqrestore(&cd->file_lock, flags); +} + +static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) +{ + unsigned long flags; + + spin_lock_irqsave(&cd->file_lock, flags); + list_del(&cfile->list); + spin_unlock_irqrestore(&cd->file_lock, flags); + + return 0; +} + +static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->pin_lock, flags); + list_add(&m->pin_list, &cfile->pin_list); + spin_unlock_irqrestore(&cfile->pin_lock, flags); +} + +static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->pin_lock, flags); + list_del(&m->pin_list); + spin_unlock_irqrestore(&cfile->pin_lock, flags); + + return 0; +} + +/** + * genwqe_search_pin() - Search for the mapping for a userspace address + * @cfile: Descriptor of opened file + * @u_addr: User virtual address + * @size: Size of buffer + * @dma_addr: DMA address to be updated + * + * Return: Pointer to the corresponding mapping NULL if not found + */ +static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, + unsigned long u_addr, + unsigned int size, + void **virt_addr) +{ + unsigned long flags; + struct dma_mapping *m; + + spin_lock_irqsave(&cfile->pin_lock, flags); + + list_for_each_entry(m, &cfile->pin_list, pin_list) { + if ((((u64)m->u_vaddr) <= (u_addr)) && + (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { + + if (virt_addr) + *virt_addr = m->k_vaddr + + (u_addr - (u64)m->u_vaddr); + + spin_unlock_irqrestore(&cfile->pin_lock, flags); + return m; + } + } + spin_unlock_irqrestore(&cfile->pin_lock, flags); + return NULL; +} + +static void __genwqe_add_mapping(struct genwqe_file *cfile, + struct dma_mapping *dma_map) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_add(&dma_map->card_list, &cfile->map_list); + spin_unlock_irqrestore(&cfile->map_lock, flags); +} + +static void __genwqe_del_mapping(struct genwqe_file *cfile, + struct dma_mapping *dma_map) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_del(&dma_map->card_list); + spin_unlock_irqrestore(&cfile->map_lock, flags); +} + + +/** + * __genwqe_search_mapping() - Search for the mapping for a userspace address + * @cfile: descriptor of opened file + * @u_addr: user virtual address + * @size: size of buffer + * @dma_addr: DMA address to be updated + * Return: Pointer to the corresponding mapping NULL if not found + */ +static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, + unsigned long u_addr, + unsigned int size, + dma_addr_t *dma_addr, + void **virt_addr) +{ + unsigned long flags; + struct dma_mapping *m; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_for_each_entry(m, &cfile->map_list, card_list) { + + if ((((u64)m->u_vaddr) <= (u_addr)) && + (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { + + /* match found: current is as expected and + addr is in range */ + if (dma_addr) + *dma_addr = m->dma_addr + + (u_addr - (u64)m->u_vaddr); + + if (virt_addr) + *virt_addr = m->k_vaddr + + (u_addr - (u64)m->u_vaddr); + + spin_unlock_irqrestore(&cfile->map_lock, flags); + return m; + } + } + spin_unlock_irqrestore(&cfile->map_lock, flags); + + dev_err(&pci_dev->dev, + "[%s] Entry not found: u_addr=%lx, size=%x\n", + __func__, u_addr, size); + + return NULL; +} + +static void genwqe_remove_mappings(struct genwqe_file *cfile) +{ + int i = 0; + struct list_head *node, *next; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + + list_for_each_safe(node, next, &cfile->map_list) { + dma_map = list_entry(node, struct dma_mapping, card_list); + + list_del_init(&dma_map->card_list); + + /* + * This is really a bug, because those things should + * have been already tidied up. + * + * GENWQE_MAPPING_RAW should have been removed via mmunmap(). + * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. + */ + dev_err(&pci_dev->dev, + "[%s] %d. cleanup mapping: u_vaddr=%p " + "u_kaddr=%016lx dma_addr=%lx\n", __func__, i++, + dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr, + (unsigned long)dma_map->dma_addr); + + if (dma_map->type == GENWQE_MAPPING_RAW) { + /* we allocated this dynamically */ + __genwqe_free_consistent(cd, dma_map->size, + dma_map->k_vaddr, + dma_map->dma_addr); + kfree(dma_map); + } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { + /* we use dma_map statically from the request */ + genwqe_user_vunmap(cd, dma_map, NULL); + } + } +} + +static void genwqe_remove_pinnings(struct genwqe_file *cfile) +{ + struct list_head *node, *next; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + + list_for_each_safe(node, next, &cfile->pin_list) { + dma_map = list_entry(node, struct dma_mapping, pin_list); + + /* + * This is not a bug, because a killed processed might + * not call the unpin ioctl, which is supposed to free + * the resources. + * + * Pinnings are dymically allocated and need to be + * deleted. + */ + list_del_init(&dma_map->pin_list); + genwqe_user_vunmap(cd, dma_map, NULL); + kfree(dma_map); + } +} + +/** + * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files + * + * E.g. genwqe_send_signal(cd, SIGIO); + */ +static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) +{ + unsigned int files = 0; + unsigned long flags; + struct genwqe_file *cfile; + + spin_lock_irqsave(&cd->file_lock, flags); + list_for_each_entry(cfile, &cd->file_list, list) { + if (cfile->async_queue) + kill_fasync(&cfile->async_queue, sig, POLL_HUP); + files++; + } + spin_unlock_irqrestore(&cd->file_lock, flags); + return files; +} + +static int genwqe_force_sig(struct genwqe_dev *cd, int sig) +{ + unsigned int files = 0; + unsigned long flags; + struct genwqe_file *cfile; + + spin_lock_irqsave(&cd->file_lock, flags); + list_for_each_entry(cfile, &cd->file_list, list) { + force_sig(sig, cfile->owner); + files++; + } + spin_unlock_irqrestore(&cd->file_lock, flags); + return files; +} + +/** + * genwqe_open() - file open + * @inode: file system information + * @filp: file handle + * + * This function is executed whenever an application calls + * open("/dev/genwqe",..). + * + * Return: 0 if successful or <0 if errors + */ +static int genwqe_open(struct inode *inode, struct file *filp) +{ + struct genwqe_dev *cd; + struct genwqe_file *cfile; + struct pci_dev *pci_dev; + + cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); + if (cfile == NULL) + return -ENOMEM; + + cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); + pci_dev = cd->pci_dev; + cfile->cd = cd; + cfile->filp = filp; + cfile->client = NULL; + + spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */ + INIT_LIST_HEAD(&cfile->map_list); + + spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */ + INIT_LIST_HEAD(&cfile->pin_list); + + filp->private_data = cfile; + + genwqe_add_file(cd, cfile); + return 0; +} + +/** + * genwqe_fasync() - Setup process to receive SIGIO. + * @fd: file descriptor + * @filp: file handle + * @mode: file mode + * + * Sending a signal is working as following: + * + * if (cdev->async_queue) + * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); + * + * Some devices also implement asynchronous notification to indicate + * when the device can be written; in this case, of course, + * kill_fasync must be called with a mode of POLL_OUT. + */ +static int genwqe_fasync(int fd, struct file *filp, int mode) +{ + struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; + return fasync_helper(fd, filp, mode, &cdev->async_queue); +} + + +/** + * genwqe_release() - file close + * @inode: file system information + * @filp: file handle + * + * This function is executed whenever an application calls 'close(fd_genwqe)' + * + * Return: always 0 + */ +static int genwqe_release(struct inode *inode, struct file *filp) +{ + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + + /* there must be no entries in these lists! */ + genwqe_remove_mappings(cfile); + genwqe_remove_pinnings(cfile); + + /* remove this filp from the asynchronously notified filp's */ + genwqe_fasync(-1, filp, 0); + + /* + * For this to work we must not release cd when this cfile is + * not yet released, otherwise the list entry is invalid, + * because the list itself gets reinstantiated! + */ + genwqe_del_file(cd, cfile); + kfree(cfile); + return 0; +} + +static void genwqe_vma_open(struct vm_area_struct *vma) +{ + /* nothing ... */ +} + +/** + * genwqe_vma_close() - Called each time when vma is unmapped + * + * Free memory which got allocated by GenWQE mmap(). + */ +static void genwqe_vma_close(struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, + cdev_genwqe); + struct pci_dev *pci_dev = cd->pci_dev; + dma_addr_t d_addr = 0; + struct genwqe_file *cfile = vma->vm_private_data; + + dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize, + &d_addr, NULL); + if (dma_map == NULL) { + dev_err(&pci_dev->dev, + " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n", + __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, + vsize); + return; + } + __genwqe_del_mapping(cfile, dma_map); + __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr, + dma_map->dma_addr); + kfree(dma_map); +} + +static struct vm_operations_struct genwqe_vma_ops = { + .open = genwqe_vma_open, + .close = genwqe_vma_close, +}; + +/** + * genwqe_mmap() - Provide contignous buffers to userspace + * + * We use mmap() to allocate contignous buffers used for DMA + * transfers. After the buffer is allocated we remap it to user-space + * and remember a reference to our dma_mapping data structure, where + * we store the associated DMA address and allocated size. + * + * When we receive a DDCB execution request with the ATS bits set to + * plain buffer, we lookup our dma_mapping list to find the + * corresponding DMA address for the associated user-space address. + */ +static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int rc; + unsigned long pfn, vsize = vma->vm_end - vma->vm_start; + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + struct dma_mapping *dma_map; + + if (vsize == 0) + return -EINVAL; + + if (get_order(vsize) > MAX_ORDER) + return -ENOMEM; + + dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); + if (dma_map == NULL) + return -ENOMEM; + + genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW); + dma_map->u_vaddr = (void *)vma->vm_start; + dma_map->size = vsize; + dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE); + dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize, + &dma_map->dma_addr); + if (dma_map->k_vaddr == NULL) { + rc = -ENOMEM; + goto free_dma_map; + } + + if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t))) + *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr; + + pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT; + rc = remap_pfn_range(vma, + vma->vm_start, + pfn, + vsize, + vma->vm_page_prot); + if (rc != 0) { + rc = -EFAULT; + goto free_dma_mem; + } + + vma->vm_private_data = cfile; + vma->vm_ops = &genwqe_vma_ops; + __genwqe_add_mapping(cfile, dma_map); + + return 0; + + free_dma_mem: + __genwqe_free_consistent(cd, dma_map->size, + dma_map->k_vaddr, + dma_map->dma_addr); + free_dma_map: + kfree(dma_map); + return rc; +} + +/** + * do_flash_update() - Excute flash update (write image or CVPD) + * @cd: genwqe device + * @load: details about image load + * + * Return: 0 if successful + */ + +#define FLASH_BLOCK 0x40000 /* we use 256k blocks */ + +static int do_flash_update(struct genwqe_file *cfile, + struct genwqe_bitstream *load) +{ + int rc = 0; + int blocks_to_flash; + dma_addr_t dma_addr; + u64 flash = 0; + size_t tocopy = 0; + u8 __user *buf; + u8 *xbuf; + u32 crc; + u8 cmdopts; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cd->pci_dev; + + if ((load->size & 0x3) != 0) + return -EINVAL; + + if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) + return -EINVAL; + + /* FIXME Bits have changed for new service layer! */ + switch ((char)load->partition) { + case '0': + cmdopts = 0x14; + break; /* download/erase_first/part_0 */ + case '1': + cmdopts = 0x1C; + break; /* download/erase_first/part_1 */ + case 'v': /* cmdopts = 0x0c (VPD) */ + default: + return -EINVAL; + } + + buf = (u8 __user *)load->data_addr; + xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); + if (xbuf == NULL) + return -ENOMEM; + + blocks_to_flash = load->size / FLASH_BLOCK; + while (load->size) { + struct genwqe_ddcb_cmd *req; + + /* + * We must be 4 byte aligned. Buffer must be 0 appened + * to have defined values when calculating CRC. + */ + tocopy = min_t(size_t, load->size, FLASH_BLOCK); + + rc = copy_from_user(xbuf, buf, tocopy); + if (rc) { + rc = -EFAULT; + goto free_buffer; + } + crc = genwqe_crc32(xbuf, tocopy, 0xffffffff); + + dev_dbg(&pci_dev->dev, + "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n", + __func__, (unsigned long)dma_addr, crc, tocopy, + blocks_to_flash); + + /* prepare DDCB for SLU process */ + req = ddcb_requ_alloc(); + if (req == NULL) { + rc = -ENOMEM; + goto free_buffer; + } + + req->cmd = SLCMD_MOVE_FLASH; + req->cmdopts = cmdopts; + + /* prepare invariant values */ + if (genwqe_get_slu_id(cd) <= 0x2) { + *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr); + *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy); + *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash); + *(__be32 *)&req->__asiv[24] = cpu_to_be32(0); + req->__asiv[24] = load->uid; + *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc); + + /* for simulation only */ + *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); + *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); + req->asiv_length = 32; /* bytes included in crc calc */ + } else { /* setup DDCB for ATS architecture */ + *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr); + *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy); + *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */ + *(__be64 *)&req->asiv[16] = cpu_to_be64(flash); + *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); + *(__be32 *)&req->asiv[28] = cpu_to_be32(crc); + + /* for simulation only */ + *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); + *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); + + /* Rd only */ + req->ats = 0x4ULL << 44; + req->asiv_length = 40; /* bytes included in crc calc */ + } + req->asv_length = 8; + + /* For Genwqe5 we get back the calculated CRC */ + *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ + + rc = __genwqe_execute_raw_ddcb(cd, req); + + load->retc = req->retc; + load->attn = req->attn; + load->progress = req->progress; + + if (rc < 0) { + ddcb_requ_free(req); + goto free_buffer; + } + + if (req->retc != DDCB_RETC_COMPLETE) { + rc = -EIO; + ddcb_requ_free(req); + goto free_buffer; + } + + load->size -= tocopy; + flash += tocopy; + buf += tocopy; + blocks_to_flash--; + ddcb_requ_free(req); + } + + free_buffer: + __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); + return rc; +} + +static int do_flash_read(struct genwqe_file *cfile, + struct genwqe_bitstream *load) +{ + int rc, blocks_to_flash; + dma_addr_t dma_addr; + u64 flash = 0; + size_t tocopy = 0; + u8 __user *buf; + u8 *xbuf; + u8 cmdopts; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cd->pci_dev; + struct genwqe_ddcb_cmd *cmd; + + if ((load->size & 0x3) != 0) + return -EINVAL; + + if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) + return -EINVAL; + + /* FIXME Bits have changed for new service layer! */ + switch ((char)load->partition) { + case '0': + cmdopts = 0x12; + break; /* upload/part_0 */ + case '1': + cmdopts = 0x1A; + break; /* upload/part_1 */ + case 'v': + default: + return -EINVAL; + } + + buf = (u8 __user *)load->data_addr; + xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); + if (xbuf == NULL) + return -ENOMEM; + + blocks_to_flash = load->size / FLASH_BLOCK; + while (load->size) { + /* + * We must be 4 byte aligned. Buffer must be 0 appened + * to have defined values when calculating CRC. + */ + tocopy = min_t(size_t, load->size, FLASH_BLOCK); + + dev_dbg(&pci_dev->dev, + "[%s] DMA: %lx SZ: %ld %d\n", + __func__, (unsigned long)dma_addr, tocopy, + blocks_to_flash); + + /* prepare DDCB for SLU process */ + cmd = ddcb_requ_alloc(); + if (cmd == NULL) { + rc = -ENOMEM; + goto free_buffer; + } + cmd->cmd = SLCMD_MOVE_FLASH; + cmd->cmdopts = cmdopts; + + /* prepare invariant values */ + if (genwqe_get_slu_id(cd) <= 0x2) { + *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr); + *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy); + *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash); + *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0); + cmd->__asiv[24] = load->uid; + *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */; + cmd->asiv_length = 32; /* bytes included in crc calc */ + } else { /* setup DDCB for ATS architecture */ + *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr); + *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy); + *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */ + *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash); + *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); + *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */ + + /* rd/wr */ + cmd->ats = 0x5ULL << 44; + cmd->asiv_length = 40; /* bytes included in crc calc */ + } + cmd->asv_length = 8; + + /* we only get back the calculated CRC */ + *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ + + rc = __genwqe_execute_raw_ddcb(cd, cmd); + + load->retc = cmd->retc; + load->attn = cmd->attn; + load->progress = cmd->progress; + + if ((rc < 0) && (rc != -EBADMSG)) { + ddcb_requ_free(cmd); + goto free_buffer; + } + + rc = copy_to_user(buf, xbuf, tocopy); + if (rc) { + rc = -EFAULT; + ddcb_requ_free(cmd); + goto free_buffer; + } + + /* We know that we can get retc 0x104 with CRC err */ + if (((cmd->retc == DDCB_RETC_FAULT) && + (cmd->attn != 0x02)) || /* Normally ignore CRC error */ + ((cmd->retc == DDCB_RETC_COMPLETE) && + (cmd->attn != 0x00))) { /* Everything was fine */ + rc = -EIO; + ddcb_requ_free(cmd); + goto free_buffer; + } + + load->size -= tocopy; + flash += tocopy; + buf += tocopy; + blocks_to_flash--; + ddcb_requ_free(cmd); + } + rc = 0; + + free_buffer: + __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); + return rc; +} + +static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) +{ + int rc; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + struct dma_mapping *dma_map; + unsigned long map_addr; + unsigned long map_size; + + if ((m->addr == 0x0) || (m->size == 0)) + return -EINVAL; + + map_addr = (m->addr & PAGE_MASK); + map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); + + dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); + if (dma_map == NULL) + return -ENOMEM; + + genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED); + rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL); + if (rc != 0) { + dev_err(&pci_dev->dev, + "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); + return rc; + } + + genwqe_add_pin(cfile, dma_map); + return 0; +} + +static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) +{ + struct genwqe_dev *cd = cfile->cd; + struct dma_mapping *dma_map; + unsigned long map_addr; + unsigned long map_size; + + if (m->addr == 0x0) + return -EINVAL; + + map_addr = (m->addr & PAGE_MASK); + map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); + + dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL); + if (dma_map == NULL) + return -ENOENT; + + genwqe_del_pin(cfile, dma_map); + genwqe_user_vunmap(cd, dma_map, NULL); + kfree(dma_map); + return 0; +} + +/** + * ddcb_cmd_cleanup() - Remove dynamically created fixup entries + * + * Only if there are any. Pinnings are not removed. + */ +static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) +{ + unsigned int i; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + + for (i = 0; i < DDCB_FIXUPS; i++) { + dma_map = &req->dma_mappings[i]; + + if (dma_mapping_used(dma_map)) { + __genwqe_del_mapping(cfile, dma_map); + genwqe_user_vunmap(cd, dma_map, req); + } + if (req->sgl[i] != NULL) { + genwqe_free_sgl(cd, req->sgl[i], + req->sgl_dma_addr[i], + req->sgl_size[i]); + req->sgl[i] = NULL; + req->sgl_dma_addr[i] = 0x0; + req->sgl_size[i] = 0; + } + + } + return 0; +} + +/** + * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references + * + * Before the DDCB gets executed we need to handle the fixups. We + * replace the user-space addresses with DMA addresses or do + * additional setup work e.g. generating a scatter-gather list which + * is used to describe the memory referred to in the fixup. + */ +static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) +{ + int rc; + unsigned int asiv_offs, i; + struct genwqe_dev *cd = cfile->cd; + struct genwqe_ddcb_cmd *cmd = &req->cmd; + struct dma_mapping *m; + const char *type = "UNKNOWN"; + + for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; + i++, asiv_offs += 0x08) { + + u64 u_addr; + dma_addr_t d_addr; + u32 u_size = 0; + u64 ats_flags; + + ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs); + + switch (ats_flags) { + + case ATS_TYPE_DATA: + break; /* nothing to do here */ + + case ATS_TYPE_FLAT_RDWR: + case ATS_TYPE_FLAT_RD: { + u_addr = be64_to_cpu(*((__be64 *)&cmd-> + asiv[asiv_offs])); + u_size = be32_to_cpu(*((__be32 *)&cmd-> + asiv[asiv_offs + 0x08])); + + /* + * No data available. Ignore u_addr in this + * case and set addr to 0. Hardware must not + * fetch the buffer. + */ + if (u_size == 0x0) { + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(0x0); + break; + } + + m = __genwqe_search_mapping(cfile, u_addr, u_size, + &d_addr, NULL); + if (m == NULL) { + rc = -EFAULT; + goto err_out; + } + + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(d_addr); + break; + } + + case ATS_TYPE_SGL_RDWR: + case ATS_TYPE_SGL_RD: { + int page_offs, nr_pages, offs; + + u_addr = be64_to_cpu(*((__be64 *) + &cmd->asiv[asiv_offs])); + u_size = be32_to_cpu(*((__be32 *) + &cmd->asiv[asiv_offs + 0x08])); + + /* + * No data available. Ignore u_addr in this + * case and set addr to 0. Hardware must not + * fetch the empty sgl. + */ + if (u_size == 0x0) { + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(0x0); + break; + } + + m = genwqe_search_pin(cfile, u_addr, u_size, NULL); + if (m != NULL) { + type = "PINNING"; + page_offs = (u_addr - + (u64)m->u_vaddr)/PAGE_SIZE; + } else { + type = "MAPPING"; + m = &req->dma_mappings[i]; + + genwqe_mapping_init(m, + GENWQE_MAPPING_SGL_TEMP); + rc = genwqe_user_vmap(cd, m, (void *)u_addr, + u_size, req); + if (rc != 0) + goto err_out; + + __genwqe_add_mapping(cfile, m); + page_offs = 0; + } + + offs = offset_in_page(u_addr); + nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE); + + /* create genwqe style scatter gather list */ + req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages, + &req->sgl_dma_addr[i], + &req->sgl_size[i]); + if (req->sgl[i] == NULL) { + rc = -ENOMEM; + goto err_out; + } + genwqe_setup_sgl(cd, offs, u_size, + req->sgl[i], + req->sgl_dma_addr[i], + req->sgl_size[i], + m->dma_list, + page_offs, + nr_pages); + + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(req->sgl_dma_addr[i]); + + break; + } + default: + rc = -EINVAL; + goto err_out; + } + } + return 0; + + err_out: + ddcb_cmd_cleanup(cfile, req); + return rc; +} + +/** + * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups + * + * The code will build up the translation tables or lookup the + * contignous memory allocation table to find the right translations + * and DMA addresses. + */ +static int genwqe_execute_ddcb(struct genwqe_file *cfile, + struct genwqe_ddcb_cmd *cmd) +{ + int rc; + struct genwqe_dev *cd = cfile->cd; + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + + rc = ddcb_cmd_fixups(cfile, req); + if (rc != 0) + return rc; + + rc = __genwqe_execute_raw_ddcb(cd, cmd); + ddcb_cmd_cleanup(cfile, req); + return rc; +} + +static int do_execute_ddcb(struct genwqe_file *cfile, + unsigned long arg, int raw) +{ + int rc; + struct genwqe_ddcb_cmd *cmd; + struct ddcb_requ *req; + struct genwqe_dev *cd = cfile->cd; + + cmd = ddcb_requ_alloc(); + if (cmd == NULL) + return -ENOMEM; + + req = container_of(cmd, struct ddcb_requ, cmd); + + if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) { + ddcb_requ_free(cmd); + return -EFAULT; + } + + if (!raw) + rc = genwqe_execute_ddcb(cfile, cmd); + else + rc = __genwqe_execute_raw_ddcb(cd, cmd); + + /* Copy back only the modifed fields. Do not copy ASIV + back since the copy got modified by the driver. */ + if (copy_to_user((void __user *)arg, cmd, + sizeof(*cmd) - DDCB_ASIV_LENGTH)) { + ddcb_requ_free(cmd); + return -EFAULT; + } + + ddcb_requ_free(cmd); + return rc; +} + +/** + * genwqe_ioctl() - IO control + * @filp: file handle + * @cmd: command identifier (passed from user) + * @arg: argument (passed from user) + * + * Return: 0 success + */ +static long genwqe_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + struct genwqe_reg_io __user *io; + u64 val; + u32 reg_offs; + + if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) + return -EINVAL; + + switch (cmd) { + + case GENWQE_GET_CARD_STATE: + put_user(cd->card_state, (enum genwqe_card_state __user *)arg); + return 0; + + /* Register access */ + case GENWQE_READ_REG64: { + io = (struct genwqe_reg_io __user *)arg; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) + return -EINVAL; + + val = __genwqe_readq(cd, reg_offs); + put_user(val, &io->val64); + return 0; + } + + case GENWQE_WRITE_REG64: { + io = (struct genwqe_reg_io __user *)arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) + return -EINVAL; + + if (get_user(val, &io->val64)) + return -EFAULT; + + __genwqe_writeq(cd, reg_offs, val); + return 0; + } + + case GENWQE_READ_REG32: { + io = (struct genwqe_reg_io __user *)arg; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) + return -EINVAL; + + val = __genwqe_readl(cd, reg_offs); + put_user(val, &io->val64); + return 0; + } + + case GENWQE_WRITE_REG32: { + io = (struct genwqe_reg_io __user *)arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) + return -EINVAL; + + if (get_user(val, &io->val64)) + return -EFAULT; + + __genwqe_writel(cd, reg_offs, val); + return 0; + } + + /* Flash update/reading */ + case GENWQE_SLU_UPDATE: { + struct genwqe_bitstream load; + + if (!genwqe_is_privileged(cd)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (copy_from_user(&load, (void __user *)arg, + sizeof(load))) + return -EFAULT; + + rc = do_flash_update(cfile, &load); + + if (copy_to_user((void __user *)arg, &load, sizeof(load))) + return -EFAULT; + + return rc; + } + + case GENWQE_SLU_READ: { + struct genwqe_bitstream load; + + if (!genwqe_is_privileged(cd)) + return -EPERM; + + if (genwqe_flash_readback_fails(cd)) + return -ENOSPC; /* known to fail for old versions */ + + if (copy_from_user(&load, (void __user *)arg, sizeof(load))) + return -EFAULT; + + rc = do_flash_read(cfile, &load); + + if (copy_to_user((void __user *)arg, &load, sizeof(load))) + return -EFAULT; + + return rc; + } + + /* memory pinning and unpinning */ + case GENWQE_PIN_MEM: { + struct genwqe_mem m; + + if (copy_from_user(&m, (void __user *)arg, sizeof(m))) + return -EFAULT; + + return genwqe_pin_mem(cfile, &m); + } + + case GENWQE_UNPIN_MEM: { + struct genwqe_mem m; + + if (copy_from_user(&m, (void __user *)arg, sizeof(m))) + return -EFAULT; + + return genwqe_unpin_mem(cfile, &m); + } + + /* launch an DDCB and wait for completion */ + case GENWQE_EXECUTE_DDCB: + return do_execute_ddcb(cfile, arg, 0); + + case GENWQE_EXECUTE_RAW_DDCB: { + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return do_execute_ddcb(cfile, arg, 1); + } + + default: + return -EINVAL; + } + + return rc; +} + +#if defined(CONFIG_COMPAT) +/** + * genwqe_compat_ioctl() - Compatibility ioctl + * + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/genwqe_card. + * + * @filp: file pointer. + * @cmd: command. + * @arg: user argument. + * Return: zero on success or negative number on failure. + */ +static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return genwqe_ioctl(filp, cmd, arg); +} +#endif /* defined(CONFIG_COMPAT) */ + +static const struct file_operations genwqe_fops = { + .owner = THIS_MODULE, + .open = genwqe_open, + .fasync = genwqe_fasync, + .mmap = genwqe_mmap, + .unlocked_ioctl = genwqe_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = genwqe_compat_ioctl, +#endif + .release = genwqe_release, +}; + +static int genwqe_device_initialized(struct genwqe_dev *cd) +{ + return cd->dev != NULL; +} + +/** + * genwqe_device_create() - Create and configure genwqe char device + * @cd: genwqe device descriptor + * + * This function must be called before we create any more genwqe + * character devices, because it is allocating the major and minor + * number which are supposed to be used by the client drivers. + */ +int genwqe_device_create(struct genwqe_dev *cd) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + /* + * Here starts the individual setup per client. It must + * initialize its own cdev data structure with its own fops. + * The appropriate devnum needs to be created. The ranges must + * not overlap. + */ + rc = alloc_chrdev_region(&cd->devnum_genwqe, 0, + GENWQE_MAX_MINOR, GENWQE_DEVNAME); + if (rc < 0) { + dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); + goto err_dev; + } + + cdev_init(&cd->cdev_genwqe, &genwqe_fops); + cd->cdev_genwqe.owner = THIS_MODULE; + + rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1); + if (rc < 0) { + dev_err(&pci_dev->dev, "err: cdev_add failed\n"); + goto err_add; + } + + /* + * Finally the device in /dev/... must be created. The rule is + * to use card%d_clientname for each created device. + */ + cd->dev = device_create_with_groups(cd->class_genwqe, + &cd->pci_dev->dev, + cd->devnum_genwqe, cd, + genwqe_attribute_groups, + GENWQE_DEVNAME "%u_card", + cd->card_idx); + if (IS_ERR(cd->dev)) { + rc = PTR_ERR(cd->dev); + goto err_cdev; + } + + rc = genwqe_init_debugfs(cd); + if (rc != 0) + goto err_debugfs; + + return 0; + + err_debugfs: + device_destroy(cd->class_genwqe, cd->devnum_genwqe); + err_cdev: + cdev_del(&cd->cdev_genwqe); + err_add: + unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); + err_dev: + cd->dev = NULL; + return rc; +} + +static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) +{ + int rc; + unsigned int i; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_open_files(cd)) + return 0; + + dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__); + + rc = genwqe_kill_fasync(cd, SIGIO); + if (rc > 0) { + /* give kill_timeout seconds to close file descriptors ... */ + for (i = 0; (i < genwqe_kill_timeout) && + genwqe_open_files(cd); i++) { + dev_info(&pci_dev->dev, " %d sec ...", i); + + cond_resched(); + msleep(1000); + } + + /* if no open files we can safely continue, else ... */ + if (!genwqe_open_files(cd)) + return 0; + + dev_warn(&pci_dev->dev, + "[%s] send SIGKILL and wait ...\n", __func__); + + rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ + if (rc) { + /* Give kill_timout more seconds to end processes */ + for (i = 0; (i < genwqe_kill_timeout) && + genwqe_open_files(cd); i++) { + dev_warn(&pci_dev->dev, " %d sec ...", i); + + cond_resched(); + msleep(1000); + } + } + } + return 0; +} + +/** + * genwqe_device_remove() - Remove genwqe's char device + * + * This function must be called after the client devices are removed + * because it will free the major/minor number range for the genwqe + * drivers. + * + * This function must be robust enough to be called twice. + */ +int genwqe_device_remove(struct genwqe_dev *cd) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_device_initialized(cd)) + return 1; + + genwqe_inform_and_stop_processes(cd); + + /* + * We currently do wait until all filedescriptors are + * closed. This leads to a problem when we abort the + * application which will decrease this reference from + * 1/unused to 0/illegal and not from 2/used 1/empty. + */ + rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount); + if (rc != 1) { + dev_err(&pci_dev->dev, + "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc); + panic("Fatal err: cannot free resources with pending references!"); + } + + genqwe_exit_debugfs(cd); + device_destroy(cd->class_genwqe, cd->devnum_genwqe); + cdev_del(&cd->cdev_genwqe); + unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); + cd->dev = NULL; + + return 0; +} diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c new file mode 100644 index 00000000000000..a72a99266c3c10 --- /dev/null +++ b/drivers/misc/genwqe/card_sysfs.c @@ -0,0 +1,288 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Sysfs interfaces for the GenWQE card. There are attributes to query + * the version of the bitstream as well as some for the driver. For + * debugging, please also see the debugfs interfaces of this driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "card_base.h" +#include "card_ddcb.h" + +static const char * const genwqe_types[] = { + [GENWQE_TYPE_ALTERA_230] = "GenWQE4-230", + [GENWQE_TYPE_ALTERA_530] = "GenWQE4-530", + [GENWQE_TYPE_ALTERA_A4] = "GenWQE5-A4", + [GENWQE_TYPE_ALTERA_A7] = "GenWQE5-A7", +}; + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct genwqe_dev *cd = dev_get_drvdata(dev); + const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" }; + + return sprintf(buf, "%s\n", cs[cd->card_state]); +} +static DEVICE_ATTR_RO(status); + +static ssize_t appid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char app_name[5]; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + genwqe_read_app_id(cd, app_name, sizeof(app_name)); + return sprintf(buf, "%s\n", app_name); +} +static DEVICE_ATTR_RO(appid); + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u64 slu_id, app_id; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); + app_id = __genwqe_readq(cd, IO_APP_UNITCFG); + + return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id); +} +static DEVICE_ATTR_RO(version); + +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 card_type; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + card_type = genwqe_card_type(cd); + return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ? + "invalid" : genwqe_types[card_type]); +} +static DEVICE_ATTR_RO(type); + +static ssize_t driver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", DRV_VERS_STRING); +} +static DEVICE_ATTR_RO(driver); + +static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u64 tempsens; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR); + return sprintf(buf, "%016llx\n", tempsens); +} +static DEVICE_ATTR_RO(tempsens); + +static ssize_t freerunning_timer_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 t; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER); + return sprintf(buf, "%016llx\n", t); +} +static DEVICE_ATTR_RO(freerunning_timer); + +static ssize_t queue_working_time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 t; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME); + return sprintf(buf, "%016llx\n", t); +} +static DEVICE_ATTR_RO(queue_working_time); + +static ssize_t base_clock_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 base_clock; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + base_clock = genwqe_base_clock_frequency(cd); + return sprintf(buf, "%lld\n", base_clock); +} +static DEVICE_ATTR_RO(base_clock); + +/** + * curr_bitstream_show() - Show the current bitstream id + * + * There is a bug in some old versions of the CPLD which selects the + * bitstream, which causes the IO_SLU_BITSTREAM register to report + * unreliable data in very rare cases. This makes this sysfs + * unreliable up to the point were a new CPLD version is being used. + * + * Unfortunately there is no automatic way yet to query the CPLD + * version, such that you need to manually ensure via programming + * tools that you have a recent version of the CPLD software. + * + * The proposed circumvention is to use a special recovery bitstream + * on the backup partition (0) to identify problems while loading the + * image. + */ +static ssize_t curr_bitstream_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int curr_bitstream; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; + return sprintf(buf, "%d\n", curr_bitstream); +} +static DEVICE_ATTR_RO(curr_bitstream); + +/** + * next_bitstream_show() - Show the next activated bitstream + * + * IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF. + */ +static ssize_t next_bitstream_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int next_bitstream; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + switch ((cd->softreset & 0xc) >> 2) { + case 0x2: + next_bitstream = 0; + break; + case 0x3: + next_bitstream = 1; + break; + default: + next_bitstream = -1; + break; /* error */ + } + return sprintf(buf, "%d\n", next_bitstream); +} + +static ssize_t next_bitstream_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int partition; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + if (kstrtoint(buf, 0, &partition) < 0) + return -EINVAL; + + switch (partition) { + case 0x0: + cd->softreset = 0x78; + break; + case 0x1: + cd->softreset = 0x7c; + break; + default: + return -EINVAL; + } + + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); + return count; +} +static DEVICE_ATTR_RW(next_bitstream); + +/* + * Create device_attribute structures / params: name, mode, show, store + * additional flag if valid in VF + */ +static struct attribute *genwqe_attributes[] = { + &dev_attr_tempsens.attr, + &dev_attr_next_bitstream.attr, + &dev_attr_curr_bitstream.attr, + &dev_attr_base_clock.attr, + &dev_attr_driver.attr, + &dev_attr_type.attr, + &dev_attr_version.attr, + &dev_attr_appid.attr, + &dev_attr_status.attr, + &dev_attr_freerunning_timer.attr, + &dev_attr_queue_working_time.attr, + NULL, +}; + +static struct attribute *genwqe_normal_attributes[] = { + &dev_attr_driver.attr, + &dev_attr_type.attr, + &dev_attr_version.attr, + &dev_attr_appid.attr, + &dev_attr_status.attr, + &dev_attr_freerunning_timer.attr, + &dev_attr_queue_working_time.attr, + NULL, +}; + +/** + * genwqe_is_visible() - Determine if sysfs attribute should be visible or not + * + * VFs have restricted mmio capabilities, so not all sysfs entries + * are allowed in VFs. + */ +static umode_t genwqe_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + unsigned int j; + struct device *dev = container_of(kobj, struct device, kobj); + struct genwqe_dev *cd = dev_get_drvdata(dev); + umode_t mode = attr->mode; + + if (genwqe_is_privileged(cd)) + return mode; + + for (j = 0; genwqe_normal_attributes[j] != NULL; j++) + if (genwqe_normal_attributes[j] == attr) + return mode; + + return 0; +} + +static struct attribute_group genwqe_attribute_group = { + .is_visible = genwqe_is_visible, + .attrs = genwqe_attributes, +}; + +const struct attribute_group *genwqe_attribute_groups[] = { + &genwqe_attribute_group, + NULL, +}; diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c new file mode 100644 index 00000000000000..6b1a6ef9f1a8c3 --- /dev/null +++ b/drivers/misc/genwqe/card_utils.c @@ -0,0 +1,944 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Miscelanous functionality used in the other GenWQE driver parts. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "genwqe_driver.h" +#include "card_base.h" +#include "card_ddcb.h" + +/** + * __genwqe_writeq() - Write 64-bit register + * @cd: genwqe device descriptor + * @byte_offs: byte offset within BAR + * @val: 64-bit value + * + * Return: 0 if success; < 0 if error + */ +int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return -EIO; + + if (cd->mmio == NULL) + return -EIO; + + __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs); + return 0; +} + +/** + * __genwqe_readq() - Read 64-bit register + * @cd: genwqe device descriptor + * @byte_offs: offset within BAR + * + * Return: value from register + */ +u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return 0xffffffffffffffffull; + + if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) && + (byte_offs == IO_SLC_CFGREG_GFIR)) + return 0x000000000000ffffull; + + if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) && + (byte_offs == IO_SLC_CFGREG_GFIR)) + return 0x00000000ffff0000ull; + + if (cd->mmio == NULL) + return 0xffffffffffffffffull; + + return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs)); +} + +/** + * __genwqe_writel() - Write 32-bit register + * @cd: genwqe device descriptor + * @byte_offs: byte offset within BAR + * @val: 32-bit value + * + * Return: 0 if success; < 0 if error + */ +int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return -EIO; + + if (cd->mmio == NULL) + return -EIO; + + __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs); + return 0; +} + +/** + * __genwqe_readl() - Read 32-bit register + * @cd: genwqe device descriptor + * @byte_offs: offset within BAR + * + * Return: Value from register + */ +u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return 0xffffffff; + + if (cd->mmio == NULL) + return 0xffffffff; + + return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs)); +} + +/** + * genwqe_read_app_id() - Extract app_id + * + * app_unitcfg need to be filled with valid data first + */ +int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len) +{ + int i, j; + u32 app_id = (u32)cd->app_unitcfg; + + memset(app_name, 0, len); + for (i = 0, j = 0; j < min(len, 4); j++) { + char ch = (char)((app_id >> (24 - j*8)) & 0xff); + if (ch == ' ') + continue; + app_name[i++] = isprint(ch) ? ch : 'X'; + } + return i; +} + +/** + * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations + * + * Existing kernel functions seem to use a different polynom, + * therefore we could not use them here. + * + * Genwqe's Polynomial = 0x20044009 + */ +#define CRC32_POLYNOMIAL 0x20044009 +static u32 crc32_tab[256]; /* crc32 lookup table */ + +void genwqe_init_crc32(void) +{ + int i, j; + u32 crc; + + for (i = 0; i < 256; i++) { + crc = i << 24; + for (j = 0; j < 8; j++) { + if (crc & 0x80000000) + crc = (crc << 1) ^ CRC32_POLYNOMIAL; + else + crc = (crc << 1); + } + crc32_tab[i] = crc; + } +} + +/** + * genwqe_crc32() - Generate 32-bit crc as required for DDCBs + * @buff: pointer to data buffer + * @len: length of data for calculation + * @init: initial crc (0xffffffff at start) + * + * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009) + + * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should + * result in a crc32 of 0xf33cb7d3. + * + * The existing kernel crc functions did not cover this polynom yet. + * + * Return: crc32 checksum. + */ +u32 genwqe_crc32(u8 *buff, size_t len, u32 init) +{ + int i; + u32 crc; + + crc = init; + while (len--) { + i = ((crc >> 24) ^ *buff++) & 0xFF; + crc = (crc << 8) ^ crc32_tab[i]; + } + return crc; +} + +void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, + dma_addr_t *dma_handle) +{ + if (get_order(size) > MAX_ORDER) + return NULL; + + return pci_alloc_consistent(cd->pci_dev, size, dma_handle); +} + +void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + if (vaddr == NULL) + return; + + pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle); +} + +static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, + int num_pages) +{ + int i; + struct pci_dev *pci_dev = cd->pci_dev; + + for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { + pci_unmap_page(pci_dev, dma_list[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + dma_list[i] = 0x0; + } +} + +static int genwqe_map_pages(struct genwqe_dev *cd, + struct page **page_list, int num_pages, + dma_addr_t *dma_list) +{ + int i; + struct pci_dev *pci_dev = cd->pci_dev; + + /* establish DMA mapping for requested pages */ + for (i = 0; i < num_pages; i++) { + dma_addr_t daddr; + + dma_list[i] = 0x0; + daddr = pci_map_page(pci_dev, page_list[i], + 0, /* map_offs */ + PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */ + + if (pci_dma_mapping_error(pci_dev, daddr)) { + dev_err(&pci_dev->dev, + "[%s] err: no dma addr daddr=%016llx!\n", + __func__, (long long)daddr); + goto err; + } + + dma_list[i] = daddr; + } + return 0; + + err: + genwqe_unmap_pages(cd, dma_list, num_pages); + return -EIO; +} + +static int genwqe_sgl_size(int num_pages) +{ + int len, num_tlb = num_pages / 7; + + len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1); + return roundup(len, PAGE_SIZE); +} + +struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages, + dma_addr_t *dma_addr, size_t *sgl_size) +{ + struct pci_dev *pci_dev = cd->pci_dev; + struct sg_entry *sgl; + + *sgl_size = genwqe_sgl_size(num_pages); + if (get_order(*sgl_size) > MAX_ORDER) { + dev_err(&pci_dev->dev, + "[%s] err: too much memory requested!\n", __func__); + return NULL; + } + + sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr); + if (sgl == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: no memory available!\n", __func__); + return NULL; + } + + return sgl; +} + +int genwqe_setup_sgl(struct genwqe_dev *cd, + unsigned long offs, + unsigned long size, + struct sg_entry *sgl, + dma_addr_t dma_addr, size_t sgl_size, + dma_addr_t *dma_list, int page_offs, int num_pages) +{ + int i = 0, j = 0, p; + unsigned long dma_offs, map_offs; + struct pci_dev *pci_dev = cd->pci_dev; + dma_addr_t prev_daddr = 0; + struct sg_entry *s, *last_s = NULL; + + /* sanity checks */ + if (offs > PAGE_SIZE) { + dev_err(&pci_dev->dev, + "[%s] too large start offs %08lx\n", __func__, offs); + return -EFAULT; + } + if (sgl_size < genwqe_sgl_size(num_pages)) { + dev_err(&pci_dev->dev, + "[%s] sgl_size too small %08lx for %d pages\n", + __func__, sgl_size, num_pages); + return -EFAULT; + } + + dma_offs = 128; /* next block if needed/dma_offset */ + map_offs = offs; /* offset in first page */ + + s = &sgl[0]; /* first set of 8 entries */ + p = 0; /* page */ + while (p < num_pages) { + dma_addr_t daddr; + unsigned int size_to_map; + + /* always write the chaining entry, cleanup is done later */ + j = 0; + s[j].target_addr = cpu_to_be64(dma_addr + dma_offs); + s[j].len = cpu_to_be32(128); + s[j].flags = cpu_to_be32(SG_CHAINED); + j++; + + while (j < 8) { + /* DMA mapping for requested page, offs, size */ + size_to_map = min(size, PAGE_SIZE - map_offs); + daddr = dma_list[page_offs + p] + map_offs; + size -= size_to_map; + map_offs = 0; + + if (prev_daddr == daddr) { + u32 prev_len = be32_to_cpu(last_s->len); + + /* pr_info("daddr combining: " + "%016llx/%08x -> %016llx\n", + prev_daddr, prev_len, daddr); */ + + last_s->len = cpu_to_be32(prev_len + + size_to_map); + + p++; /* process next page */ + if (p == num_pages) + goto fixup; /* nothing to do */ + + prev_daddr = daddr + size_to_map; + continue; + } + + /* start new entry */ + s[j].target_addr = cpu_to_be64(daddr); + s[j].len = cpu_to_be32(size_to_map); + s[j].flags = cpu_to_be32(SG_DATA); + prev_daddr = daddr + size_to_map; + last_s = &s[j]; + j++; + + p++; /* process next page */ + if (p == num_pages) + goto fixup; /* nothing to do */ + } + dma_offs += 128; + s += 8; /* continue 8 elements further */ + } + fixup: + if (j == 1) { /* combining happend on last entry! */ + s -= 8; /* full shift needed on previous sgl block */ + j = 7; /* shift all elements */ + } + + for (i = 0; i < j; i++) /* move elements 1 up */ + s[i] = s[i + 1]; + + s[i].target_addr = cpu_to_be64(0); + s[i].len = cpu_to_be32(0); + s[i].flags = cpu_to_be32(SG_END_LIST); + return 0; +} + +void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, + dma_addr_t dma_addr, size_t size) +{ + __genwqe_free_consistent(cd, size, sg_list, dma_addr); +} + +/** + * free_user_pages() - Give pinned pages back + * + * Documentation of get_user_pages is in mm/memory.c: + * + * If the page is written to, set_page_dirty (or set_page_dirty_lock, + * as appropriate) must be called after the page is finished with, and + * before put_page is called. + * + * FIXME Could be of use to others and might belong in the generic + * code, if others agree. E.g. + * ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c + * ceph_put_page_vector in net/ceph/pagevec.c + * maybe more? + */ +static int free_user_pages(struct page **page_list, unsigned int nr_pages, + int dirty) +{ + unsigned int i; + + for (i = 0; i < nr_pages; i++) { + if (page_list[i] != NULL) { + if (dirty) + set_page_dirty_lock(page_list[i]); + put_page(page_list[i]); + } + } + return 0; +} + +/** + * genwqe_user_vmap() - Map user-space memory to virtual kernel memory + * @cd: pointer to genwqe device + * @m: mapping params + * @uaddr: user virtual address + * @size: size of memory to be mapped + * + * We need to think about how we could speed this up. Of course it is + * not a good idea to do this over and over again, like we are + * currently doing it. Nevertheless, I am curious where on the path + * the performance is spend. Most probably within the memory + * allocation functions, but maybe also in the DMA mapping code. + * + * Restrictions: The maximum size of the possible mapping currently depends + * on the amount of memory we can get using kzalloc() for the + * page_list and pci_alloc_consistent for the sg_list. + * The sg_list is currently itself not scattered, which could + * be fixed with some effort. The page_list must be split into + * PAGE_SIZE chunks too. All that will make the complicated + * code more complicated. + * + * Return: 0 if success + */ +int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, + unsigned long size, struct ddcb_requ *req) +{ + int rc = -EINVAL; + unsigned long data, offs; + struct pci_dev *pci_dev = cd->pci_dev; + + if ((uaddr == NULL) || (size == 0)) { + m->size = 0; /* mark unused and not added */ + return -EINVAL; + } + m->u_vaddr = uaddr; + m->size = size; + + /* determine space needed for page_list. */ + data = (unsigned long)uaddr; + offs = offset_in_page(data); + m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); + + m->page_list = kcalloc(m->nr_pages, + sizeof(struct page *) + sizeof(dma_addr_t), + GFP_KERNEL); + if (!m->page_list) { + dev_err(&pci_dev->dev, "err: alloc page_list failed\n"); + m->nr_pages = 0; + m->u_vaddr = NULL; + m->size = 0; /* mark unused and not added */ + return -ENOMEM; + } + m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); + + /* pin user pages in memory */ + rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ + m->nr_pages, + 1, /* write by caller */ + m->page_list); /* ptrs to pages */ + + /* assumption: get_user_pages can be killed by signals. */ + if (rc < m->nr_pages) { + free_user_pages(m->page_list, rc, 0); + rc = -EFAULT; + goto fail_get_user_pages; + } + + rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); + if (rc != 0) + goto fail_free_user_pages; + + return 0; + + fail_free_user_pages: + free_user_pages(m->page_list, m->nr_pages, 0); + + fail_get_user_pages: + kfree(m->page_list); + m->page_list = NULL; + m->dma_list = NULL; + m->nr_pages = 0; + m->u_vaddr = NULL; + m->size = 0; /* mark unused and not added */ + return rc; +} + +/** + * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel + * memory + * @cd: pointer to genwqe device + * @m: mapping params + */ +int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, + struct ddcb_requ *req) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (!dma_mapping_used(m)) { + dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n", + __func__, m); + return -EINVAL; + } + + if (m->dma_list) + genwqe_unmap_pages(cd, m->dma_list, m->nr_pages); + + if (m->page_list) { + free_user_pages(m->page_list, m->nr_pages, 1); + + kfree(m->page_list); + m->page_list = NULL; + m->dma_list = NULL; + m->nr_pages = 0; + } + + m->u_vaddr = NULL; + m->size = 0; /* mark as unused and not added */ + return 0; +} + +/** + * genwqe_card_type() - Get chip type SLU Configuration Register + * @cd: pointer to the genwqe device descriptor + * Return: 0: Altera Stratix-IV 230 + * 1: Altera Stratix-IV 530 + * 2: Altera Stratix-V A4 + * 3: Altera Stratix-V A7 + */ +u8 genwqe_card_type(struct genwqe_dev *cd) +{ + u64 card_type = cd->slu_unitcfg; + return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20); +} + +/** + * genwqe_card_reset() - Reset the card + * @cd: pointer to the genwqe device descriptor + */ +int genwqe_card_reset(struct genwqe_dev *cd) +{ + u64 softrst; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_is_privileged(cd)) + return -ENODEV; + + /* new SL */ + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull); + msleep(1000); + __genwqe_readq(cd, IO_HSU_FIR_CLR); + __genwqe_readq(cd, IO_APP_FIR_CLR); + __genwqe_readq(cd, IO_SLU_FIR_CLR); + + /* + * Read-modify-write to preserve the stealth bits + * + * For SL >= 039, Stealth WE bit allows removing + * the read-modify-wrote. + * r-m-w may require a mask 0x3C to avoid hitting hard + * reset again for error reset (should be 0, chicken). + */ + softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull); + + /* give ERRORRESET some time to finish */ + msleep(50); + + if (genwqe_need_err_masking(cd)) { + dev_info(&pci_dev->dev, + "[%s] masking errors for old bitstreams\n", __func__); + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); + } + return 0; +} + +int genwqe_read_softreset(struct genwqe_dev *cd) +{ + u64 bitstream; + + if (!genwqe_is_privileged(cd)) + return -ENODEV; + + bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; + cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull; + return 0; +} + +/** + * genwqe_set_interrupt_capability() - Configure MSI capability structure + * @cd: pointer to the device + * Return: 0 if no error + */ +int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + rc = pci_enable_msi_block(pci_dev, count); + if (rc == 0) + cd->flags |= GENWQE_FLAG_MSI_ENABLED; + return rc; +} + +/** + * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability() + * @cd: pointer to the device + */ +void genwqe_reset_interrupt_capability(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (cd->flags & GENWQE_FLAG_MSI_ENABLED) { + pci_disable_msi(pci_dev); + cd->flags &= ~GENWQE_FLAG_MSI_ENABLED; + } +} + +/** + * set_reg_idx() - Fill array with data. Ignore illegal offsets. + * @cd: card device + * @r: debug register array + * @i: index to desired entry + * @m: maximum possible entries + * @addr: addr which is read + * @index: index in debug array + * @val: read value + */ +static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r, + unsigned int *i, unsigned int m, u32 addr, u32 idx, + u64 val) +{ + if (WARN_ON_ONCE(*i >= m)) + return -EFAULT; + + r[*i].addr = addr; + r[*i].idx = idx; + r[*i].val = val; + ++*i; + return 0; +} + +static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r, + unsigned int *i, unsigned int m, u32 addr, u64 val) +{ + return set_reg_idx(cd, r, i, m, addr, 0, val); +} + +int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, + unsigned int max_regs, int all) +{ + unsigned int i, j, idx = 0; + u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr; + u64 gfir, sluid, appid, ufir, ufec, sfir, sfec; + + /* Global FIR */ + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir); + + /* UnitCfg for SLU */ + sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */ + set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid); + + /* UnitCfg for APP */ + appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */ + set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid); + + /* Check all chip Units */ + for (i = 0; i < GENWQE_MAX_UNITS; i++) { + + /* Unit FIR */ + ufir_addr = (i << 24) | 0x008; + ufir = __genwqe_readq(cd, ufir_addr); + set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir); + + /* Unit FEC */ + ufec_addr = (i << 24) | 0x018; + ufec = __genwqe_readq(cd, ufec_addr); + set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec); + + for (j = 0; j < 64; j++) { + /* wherever there is a primary 1, read the 2ndary */ + if (!all && (!(ufir & (1ull << j)))) + continue; + + sfir_addr = (i << 24) | (0x100 + 8 * j); + sfir = __genwqe_readq(cd, sfir_addr); + set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir); + + sfec_addr = (i << 24) | (0x300 + 8 * j); + sfec = __genwqe_readq(cd, sfec_addr); + set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec); + } + } + + /* fill with invalid data until end */ + for (i = idx; i < max_regs; i++) { + regs[i].addr = 0xffffffff; + regs[i].val = 0xffffffffffffffffull; + } + return idx; +} + +/** + * genwqe_ffdc_buff_size() - Calculates the number of dump registers + */ +int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid) +{ + int entries = 0, ring, traps, traces, trace_entries; + u32 eevptr_addr, l_addr, d_len, d_type; + u64 eevptr, val, addr; + + eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; + eevptr = __genwqe_readq(cd, eevptr_addr); + + if ((eevptr != 0x0) && (eevptr != -1ull)) { + l_addr = GENWQE_UID_OFFS(uid) | eevptr; + + while (1) { + val = __genwqe_readq(cd, l_addr); + + if ((val == 0x0) || (val == -1ull)) + break; + + /* 38:24 */ + d_len = (val & 0x0000007fff000000ull) >> 24; + + /* 39 */ + d_type = (val & 0x0000008000000000ull) >> 36; + + if (d_type) { /* repeat */ + entries += d_len; + } else { /* size in bytes! */ + entries += d_len >> 3; + } + + l_addr += 8; + } + } + + for (ring = 0; ring < 8; ring++) { + addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); + val = __genwqe_readq(cd, addr); + + if ((val == 0x0ull) || (val == -1ull)) + continue; + + traps = (val >> 24) & 0xff; + traces = (val >> 16) & 0xff; + trace_entries = val & 0xffff; + + entries += traps + (traces * trace_entries); + } + return entries; +} + +/** + * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure + */ +int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid, + struct genwqe_reg *regs, unsigned int max_regs) +{ + int i, traps, traces, trace, trace_entries, trace_entry, ring; + unsigned int idx = 0; + u32 eevptr_addr, l_addr, d_addr, d_len, d_type; + u64 eevptr, e, val, addr; + + eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; + eevptr = __genwqe_readq(cd, eevptr_addr); + + if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) { + l_addr = GENWQE_UID_OFFS(uid) | eevptr; + while (1) { + e = __genwqe_readq(cd, l_addr); + if ((e == 0x0) || (e == 0xffffffffffffffffull)) + break; + + d_addr = (e & 0x0000000000ffffffull); /* 23:0 */ + d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */ + d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */ + d_addr |= GENWQE_UID_OFFS(uid); + + if (d_type) { + for (i = 0; i < (int)d_len; i++) { + val = __genwqe_readq(cd, d_addr); + set_reg_idx(cd, regs, &idx, max_regs, + d_addr, i, val); + } + } else { + d_len >>= 3; /* Size in bytes! */ + for (i = 0; i < (int)d_len; i++, d_addr += 8) { + val = __genwqe_readq(cd, d_addr); + set_reg_idx(cd, regs, &idx, max_regs, + d_addr, 0, val); + } + } + l_addr += 8; + } + } + + /* + * To save time, there are only 6 traces poplulated on Uid=2, + * Ring=1. each with iters=512. + */ + for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds, + 2...7 are ASI rings */ + addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); + val = __genwqe_readq(cd, addr); + + if ((val == 0x0ull) || (val == -1ull)) + continue; + + traps = (val >> 24) & 0xff; /* Number of Traps */ + traces = (val >> 16) & 0xff; /* Number of Traces */ + trace_entries = val & 0xffff; /* Entries per trace */ + + /* Note: This is a combined loop that dumps both the traps */ + /* (for the trace == 0 case) as well as the traces 1 to */ + /* 'traces'. */ + for (trace = 0; trace <= traces; trace++) { + u32 diag_sel = + GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace); + + addr = (GENWQE_UID_OFFS(uid) | + IO_EXTENDED_DIAG_SELECTOR); + __genwqe_writeq(cd, addr, diag_sel); + + for (trace_entry = 0; + trace_entry < (trace ? trace_entries : traps); + trace_entry++) { + addr = (GENWQE_UID_OFFS(uid) | + IO_EXTENDED_DIAG_READ_MBX); + val = __genwqe_readq(cd, addr); + set_reg_idx(cd, regs, &idx, max_regs, addr, + (diag_sel<<16) | trace_entry, val); + } + } + } + return 0; +} + +/** + * genwqe_write_vreg() - Write register in virtual window + * + * Note, these registers are only accessible to the PF through the + * VF-window. It is not intended for the VF to access. + */ +int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func) +{ + __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); + __genwqe_writeq(cd, reg, val); + return 0; +} + +/** + * genwqe_read_vreg() - Read register in virtual window + * + * Note, these registers are only accessible to the PF through the + * VF-window. It is not intended for the VF to access. + */ +u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func) +{ + __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); + return __genwqe_readq(cd, reg); +} + +/** + * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card + * + * Note: From a design perspective it turned out to be a bad idea to + * use codes here to specifiy the frequency/speed values. An old + * driver cannot understand new codes and is therefore always a + * problem. Better is to measure out the value or put the + * speed/frequency directly into a register which is always a valid + * value for old as well as for new software. + * + * Return: Card clock in MHz + */ +int genwqe_base_clock_frequency(struct genwqe_dev *cd) +{ + u16 speed; /* MHz MHz MHz MHz */ + static const int speed_grade[] = { 250, 200, 166, 175 }; + + speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); + if (speed >= ARRAY_SIZE(speed_grade)) + return 0; /* illegal value */ + + return speed_grade[speed]; +} + +/** + * genwqe_stop_traps() - Stop traps + * + * Before reading out the analysis data, we need to stop the traps. + */ +void genwqe_stop_traps(struct genwqe_dev *cd) +{ + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull); +} + +/** + * genwqe_start_traps() - Start traps + * + * After having read the data, we can/must enable the traps again. + */ +void genwqe_start_traps(struct genwqe_dev *cd) +{ + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull); + + if (genwqe_need_err_masking(cd)) + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); +} diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h new file mode 100644 index 00000000000000..46e916b36c7089 --- /dev/null +++ b/drivers/misc/genwqe/genwqe_driver.h @@ -0,0 +1,77 @@ +#ifndef __GENWQE_DRIVER_H__ +#define __GENWQE_DRIVER_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp + * Author: Joerg-Stephan Vogt + * Author: Michael Jung + * Author: Michael Ruettger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRV_VERS_STRING "2.0.0" + +/* + * Static minor number assignement, until we decide/implement + * something dynamic. + */ +#define GENWQE_MAX_MINOR 128 /* up to 128 possible genwqe devices */ + +/** + * genwqe_requ_alloc() - Allocate a new DDCB execution request + * + * This data structure contains the user visiable fields of the DDCB + * to be executed. + * + * Return: ptr to genwqe_ddcb_cmd data structure + */ +struct genwqe_ddcb_cmd *ddcb_requ_alloc(void); + +/** + * ddcb_requ_free() - Free DDCB execution request. + * @req: ptr to genwqe_ddcb_cmd data structure. + */ +void ddcb_requ_free(struct genwqe_ddcb_cmd *req); + +u32 genwqe_crc32(u8 *buff, size_t len, u32 init); + +static inline void genwqe_hexdump(struct pci_dev *pci_dev, + const void *buff, unsigned int size) +{ + char prefix[32]; + + scnprintf(prefix, sizeof(prefix), "%s %s: ", + GENWQE_DEVNAME, pci_name(pci_dev)); + + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, 16, 1, buff, + size, true); +} + +#endif /* __GENWQE_DRIVER_H__ */ diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index a2edb2ee092165..49c7a23f02fc55 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -224,7 +224,7 @@ static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd) } #ifdef CONFIG_IDE -int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, +static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev, unsigned int cmd, unsigned long arg) { @@ -334,9 +334,10 @@ static void execute_location(void *dst) static void execute_user_location(void *dst) { + /* Intentionally crossing kernel/user memory boundary. */ void (*func)(void) = dst; - if (copy_to_user(dst, do_nothing, EXEC_SIZE)) + if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) return; func(); } @@ -408,6 +409,8 @@ static void lkdtm_do_action(enum ctype which) case CT_SPINLOCKUP: /* Must be called twice to trigger. */ spin_lock(&lock_me_up); + /* Let sparse know we intended to exit holding the lock. */ + __release(&lock_me_up); break; case CT_HUNG_TASK: set_current_state(TASK_UNINTERRUPTIBLE); diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index d22c6864508b2d..2fad8443282913 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c @@ -177,7 +177,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, unsigned long timeout; int i; - /* Only Posible if we are in timeout */ + /* Only possible if we are in timeout */ if (!cl || cl != &dev->iamthif_cl) { dev_dbg(&dev->pdev->dev, "bad file ext.\n"); return -ETIMEDOUT; @@ -249,7 +249,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, cb->response_buffer.size); dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); - /* length is being turncated to PAGE_SIZE, however, + /* length is being truncated to PAGE_SIZE, however, * the buf_idx may point beyond */ length = min_t(size_t, length, (cb->buf_idx - *offset)); @@ -316,6 +316,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) mei_hdr.host_addr = dev->iamthif_cl.host_client_id; mei_hdr.me_addr = dev->iamthif_cl.me_client_id; mei_hdr.reserved = 0; + mei_hdr.internal = 0; dev->iamthif_msg_buf_index += mei_hdr.length; ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf); if (ret) @@ -477,6 +478,7 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; + mei_hdr.internal = 0; if (*slots >= msg_slots) { mei_hdr.length = len; diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 87c96e4669e2c5..1ee2b9492a8252 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -154,7 +154,7 @@ int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) return 0; } /** - * mei_io_cb_alloc_resp_buf - allocate respose buffer + * mei_io_cb_alloc_resp_buf - allocate response buffer * * @cb: io callback structure * @length: size of the buffer @@ -207,7 +207,7 @@ int mei_cl_flush_queues(struct mei_cl *cl) /** - * mei_cl_init - initializes intialize cl. + * mei_cl_init - initializes cl. * * @cl: host client to be initialized * @dev: mei device @@ -263,10 +263,10 @@ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) return NULL; } -/** mei_cl_link: allocte host id in the host map +/** mei_cl_link: allocate host id in the host map * * @cl - host client - * @id - fixed host id or -1 for genereting one + * @id - fixed host id or -1 for generic one * * returns 0 on success * -EINVAL on incorrect values @@ -282,19 +282,19 @@ int mei_cl_link(struct mei_cl *cl, int id) dev = cl->dev; - /* If Id is not asigned get one*/ + /* If Id is not assigned get one*/ if (id == MEI_HOST_CLIENT_ID_ANY) id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); if (id >= MEI_CLIENTS_MAX) { - dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; + dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); return -EMFILE; } open_handle_count = dev->open_handle_count + dev->iamthif_open_count; if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { - dev_err(&dev->pdev->dev, "open_handle_count exceded %d", + dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", MEI_MAX_OPEN_HANDLE_COUNT); return -EMFILE; } @@ -344,8 +344,6 @@ int mei_cl_unlink(struct mei_cl *cl) cl->state = MEI_FILE_INITIALIZING; - list_del_init(&cl->link); - return 0; } @@ -372,13 +370,14 @@ void mei_host_client_init(struct work_struct *work) } dev->dev_state = MEI_DEV_ENABLED; + dev->reset_count = 0; mutex_unlock(&dev->device_lock); } /** - * mei_cl_disconnect - disconnect host clinet form the me one + * mei_cl_disconnect - disconnect host client from the me one * * @cl: host client * @@ -457,7 +456,7 @@ int mei_cl_disconnect(struct mei_cl *cl) * * @cl: private data of the file object * - * returns ture if other client is connected, 0 - otherwise. + * returns true if other client is connected, false - otherwise. */ bool mei_cl_is_other_connecting(struct mei_cl *cl) { @@ -481,7 +480,7 @@ bool mei_cl_is_other_connecting(struct mei_cl *cl) } /** - * mei_cl_connect - connect host clinet to the me one + * mei_cl_connect - connect host client to the me one * * @cl: host client * @@ -729,6 +728,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; + mei_hdr.internal = cb->internal; if (*slots >= msg_slots) { mei_hdr.length = len; @@ -775,7 +775,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, * @cl: host client * @cl: write callback with filled data * - * returns numbe of bytes sent on success, <0 on failure. + * returns number of bytes sent on success, <0 on failure. */ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) { @@ -828,6 +828,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; + mei_hdr.internal = cb->internal; rets = mei_write_message(dev, &mei_hdr, buf->data); diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index e3870f22d238bb..a3ae154444b2e8 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -43,7 +43,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, mutex_lock(&dev->device_lock); - /* if the driver is not enabled the list won't b consitent */ + /* if the driver is not enabled the list won't be consistent */ if (dev->dev_state != MEI_DEV_ENABLED) goto out; @@ -101,7 +101,7 @@ static const struct file_operations mei_dbgfs_fops_devstate = { /** * mei_dbgfs_deregister - Remove the debugfs files and directories - * @mei - pointer to mei device private dat + * @mei - pointer to mei device private data */ void mei_dbgfs_deregister(struct mei_device *dev) { diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 9b3a0fb7f26586..28cd74c073b99e 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -28,9 +28,9 @@ * * @dev: the device structure * - * returns none. + * returns 0 on success -ENOMEM on allocation failure */ -static void mei_hbm_me_cl_allocate(struct mei_device *dev) +static int mei_hbm_me_cl_allocate(struct mei_device *dev) { struct mei_me_client *clients; int b; @@ -44,7 +44,7 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev) dev->me_clients_num++; if (dev->me_clients_num == 0) - return; + return 0; kfree(dev->me_clients); dev->me_clients = NULL; @@ -56,12 +56,10 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev) sizeof(struct mei_me_client), GFP_KERNEL); if (!clients) { dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); - dev->dev_state = MEI_DEV_RESETTING; - mei_reset(dev, 1); - return; + return -ENOMEM; } dev->me_clients = clients; - return; + return 0; } /** @@ -85,12 +83,12 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) } /** - * same_disconn_addr - tells if they have the same address + * mei_hbm_cl_addr_equal - tells if they have the same address * - * @file: private data of the file object. - * @disconn: disconnection request. + * @cl: - client + * @buf: buffer with cl header * - * returns true if addres are same + * returns true if addresses are the same */ static inline bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) @@ -128,6 +126,17 @@ static bool is_treat_specially_client(struct mei_cl *cl, return false; } +/** + * mei_hbm_idle - set hbm to idle state + * + * @dev: the device structure + */ +void mei_hbm_idle(struct mei_device *dev) +{ + dev->init_clients_timer = 0; + dev->hbm_state = MEI_HBM_IDLE; +} + int mei_hbm_start_wait(struct mei_device *dev) { int ret; @@ -137,7 +146,7 @@ int mei_hbm_start_wait(struct mei_device *dev) mutex_unlock(&dev->device_lock); ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, dev->hbm_state == MEI_HBM_IDLE || - dev->hbm_state > MEI_HBM_START, + dev->hbm_state >= MEI_HBM_STARTED, mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); mutex_lock(&dev->device_lock); @@ -153,12 +162,15 @@ int mei_hbm_start_wait(struct mei_device *dev) * mei_hbm_start_req - sends start request message. * * @dev: the device structure + * + * returns 0 on success and < 0 on failure */ int mei_hbm_start_req(struct mei_device *dev) { struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; struct hbm_host_version_request *start_req; const size_t len = sizeof(struct hbm_host_version_request); + int ret; mei_hbm_hdr(mei_hdr, len); @@ -170,12 +182,13 @@ int mei_hbm_start_req(struct mei_device *dev) start_req->host_version.minor_version = HBM_MINOR_VERSION; dev->hbm_state = MEI_HBM_IDLE; - if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { - dev_err(&dev->pdev->dev, "version message write failed\n"); - dev->dev_state = MEI_DEV_RESETTING; - mei_reset(dev, 1); - return -EIO; + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n", + ret); + return ret; } + dev->hbm_state = MEI_HBM_START; dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; return 0; @@ -186,13 +199,15 @@ int mei_hbm_start_req(struct mei_device *dev) * * @dev: the device structure * - * returns none. + * returns 0 on success and < 0 on failure */ -static void mei_hbm_enum_clients_req(struct mei_device *dev) +static int mei_hbm_enum_clients_req(struct mei_device *dev) { struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; struct hbm_host_enum_request *enum_req; const size_t len = sizeof(struct hbm_host_enum_request); + int ret; + /* enumerate clients */ mei_hbm_hdr(mei_hdr, len); @@ -200,14 +215,15 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev) memset(enum_req, 0, len); enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; - if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { - dev->dev_state = MEI_DEV_RESETTING; - dev_err(&dev->pdev->dev, "enumeration request write failed.\n"); - mei_reset(dev, 1); + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n", + ret); + return ret; } dev->hbm_state = MEI_HBM_ENUM_CLIENTS; dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; - return; + return 0; } /** @@ -215,7 +231,7 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev) * * @dev: the device structure * - * returns none. + * returns 0 on success and < 0 on failure */ static int mei_hbm_prop_req(struct mei_device *dev) @@ -226,7 +242,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) const size_t len = sizeof(struct hbm_props_request); unsigned long next_client_index; unsigned long client_num; - + int ret; client_num = dev->me_client_presentation_num; @@ -253,12 +269,11 @@ static int mei_hbm_prop_req(struct mei_device *dev) prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; prop_req->address = next_client_index; - if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { - dev->dev_state = MEI_DEV_RESETTING; - dev_err(&dev->pdev->dev, "properties request write failed\n"); - mei_reset(dev, 1); - - return -EIO; + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n", + ret); + return ret; } dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; @@ -268,7 +283,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) } /** - * mei_hbm_stop_req_prepare - perpare stop request message + * mei_hbm_stop_req_prepare - prepare stop request message * * @dev - mei device * @mei_hdr - mei message header @@ -289,7 +304,7 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev, } /** - * mei_hbm_cl_flow_control_req - sends flow control requst. + * mei_hbm_cl_flow_control_req - sends flow control request. * * @dev: the device structure * @cl: client info @@ -451,7 +466,7 @@ int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) } /** - * mei_hbm_cl_connect_res - connect resposne from the ME + * mei_hbm_cl_connect_res - connect response from the ME * * @dev: the device structure * @rs: connect response bus message @@ -505,8 +520,8 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, /** - * mei_hbm_fw_disconnect_req - disconnect request initiated by me - * host sends disoconnect response + * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware + * host sends disconnect response * * @dev: the device structure. * @disconnect_req: disconnect request bus message from the me @@ -559,8 +574,10 @@ bool mei_hbm_version_is_supported(struct mei_device *dev) * * @dev: the device structure * @mei_hdr: header of bus message + * + * returns 0 on success and < 0 on failure */ -void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) { struct mei_bus_message *mei_msg; struct mei_me_client *me_client; @@ -577,8 +594,20 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) mei_read_slots(dev, dev->rd_msg_buf, hdr->length); mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; + /* ignore spurious message and prevent reset nesting + * hbm is put to idle during system reset + */ + if (dev->hbm_state == MEI_HBM_IDLE) { + dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n"); + return 0; + } + switch (mei_msg->hbm_cmd) { case HOST_START_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n"); + + dev->init_clients_timer = 0; + version_res = (struct hbm_host_version_response *)mei_msg; dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", @@ -597,73 +626,89 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) } if (!mei_hbm_version_is_supported(dev)) { - dev_warn(&dev->pdev->dev, "hbm version mismatch: stopping the driver.\n"); + dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); - dev->hbm_state = MEI_HBM_STOP; + dev->hbm_state = MEI_HBM_STOPPED; mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, dev->wr_msg.data); - mei_write_message(dev, &dev->wr_msg.hdr, - dev->wr_msg.data); + if (mei_write_message(dev, &dev->wr_msg.hdr, + dev->wr_msg.data)) { + dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); + return -EIO; + } + break; + } - return; + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_START) { + dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; } - if (dev->dev_state == MEI_DEV_INIT_CLIENTS && - dev->hbm_state == MEI_HBM_START) { - dev->init_clients_timer = 0; - mei_hbm_enum_clients_req(dev); - } else { - dev_err(&dev->pdev->dev, "reset: wrong host start response\n"); - mei_reset(dev, 1); - return; + dev->hbm_state = MEI_HBM_STARTED; + + if (mei_hbm_enum_clients_req(dev)) { + dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n"); + return -EIO; } wake_up_interruptible(&dev->wait_recvd_msg); - dev_dbg(&dev->pdev->dev, "host start response message received.\n"); break; case CLIENT_CONNECT_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n"); + connect_res = (struct hbm_client_connect_response *) mei_msg; mei_hbm_cl_connect_res(dev, connect_res); - dev_dbg(&dev->pdev->dev, "client connect response message received.\n"); wake_up(&dev->wait_recvd_msg); break; case CLIENT_DISCONNECT_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n"); + disconnect_res = (struct hbm_client_connect_response *) mei_msg; mei_hbm_cl_disconnect_res(dev, disconnect_res); - dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n"); wake_up(&dev->wait_recvd_msg); break; case MEI_FLOW_CONTROL_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n"); + flow_control = (struct hbm_flow_control *) mei_msg; mei_hbm_cl_flow_control_res(dev, flow_control); - dev_dbg(&dev->pdev->dev, "client flow control response message received.\n"); break; case HOST_CLIENT_PROPERTIES_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->me_clients == NULL) { + dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n"); + return -EPROTO; + } + props_res = (struct hbm_props_response *)mei_msg; me_client = &dev->me_clients[dev->me_client_presentation_num]; - if (props_res->status || !dev->me_clients) { - dev_err(&dev->pdev->dev, "reset: properties response hbm wrong status.\n"); - mei_reset(dev, 1); - return; + if (props_res->status) { + dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n", + props_res->status); + return -EPROTO; } if (me_client->client_id != props_res->address) { - dev_err(&dev->pdev->dev, "reset: host properties response address mismatch\n"); - mei_reset(dev, 1); - return; + dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n", + me_client->client_id, props_res->address); + return -EPROTO; } if (dev->dev_state != MEI_DEV_INIT_CLIENTS || dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { - dev_err(&dev->pdev->dev, "reset: unexpected properties response\n"); - mei_reset(dev, 1); - - return; + dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; } me_client->props = props_res->client_properties; @@ -671,49 +716,70 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev->me_client_presentation_num++; /* request property for the next client */ - mei_hbm_prop_req(dev); + if (mei_hbm_prop_req(dev)) + return -EIO; break; case HOST_ENUM_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n"); + + dev->init_clients_timer = 0; + enum_res = (struct hbm_host_enum_response *) mei_msg; BUILD_BUG_ON(sizeof(dev->me_clients_map) < sizeof(enum_res->valid_addresses)); memcpy(dev->me_clients_map, enum_res->valid_addresses, sizeof(enum_res->valid_addresses)); - if (dev->dev_state == MEI_DEV_INIT_CLIENTS && - dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { - dev->init_clients_timer = 0; - mei_hbm_me_cl_allocate(dev); - dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; - - /* first property reqeust */ - mei_hbm_prop_req(dev); - } else { - dev_err(&dev->pdev->dev, "reset: unexpected enumeration response hbm.\n"); - mei_reset(dev, 1); - return; + + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { + dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + if (mei_hbm_me_cl_allocate(dev)) { + dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n"); + return -ENOMEM; } + + dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; + + /* first property request */ + if (mei_hbm_prop_req(dev)) + return -EIO; + break; case HOST_STOP_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n"); + + dev->init_clients_timer = 0; - if (dev->hbm_state != MEI_HBM_STOP) - dev_err(&dev->pdev->dev, "unexpected stop response hbm.\n"); - dev->dev_state = MEI_DEV_DISABLED; - dev_info(&dev->pdev->dev, "reset: FW stop response.\n"); - mei_reset(dev, 1); + if (dev->hbm_state != MEI_HBM_STOPPED) { + dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dev->dev_state = MEI_DEV_POWER_DOWN; + dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n"); + /* force the reset */ + return -EPROTO; break; case CLIENT_DISCONNECT_REQ_CMD: - /* search for client */ + dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n"); + disconnect_req = (struct hbm_client_connect_request *)mei_msg; mei_hbm_fw_disconnect_req(dev, disconnect_req); break; case ME_STOP_REQ_CMD: + dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); - dev->hbm_state = MEI_HBM_STOP; + dev->hbm_state = MEI_HBM_STOPPED; mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, dev->wr_ext_msg.data); break; @@ -722,5 +788,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; } + return 0; } diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index 4ae2e56e404f8d..5f92188a5cd732 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -32,13 +32,13 @@ struct mei_cl; enum mei_hbm_state { MEI_HBM_IDLE = 0, MEI_HBM_START, + MEI_HBM_STARTED, MEI_HBM_ENUM_CLIENTS, MEI_HBM_CLIENT_PROPERTIES, - MEI_HBM_STARTED, - MEI_HBM_STOP, + MEI_HBM_STOPPED, }; -void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) { @@ -49,6 +49,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) hdr->reserved = 0; } +void mei_hbm_idle(struct mei_device *dev); int mei_hbm_start_req(struct mei_device *dev); int mei_hbm_start_wait(struct mei_device *dev); int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 3412adcdaeb08e..6f656c053b144a 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -185,7 +185,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) mei_me_reg_write(hw, H_CSR, hcsr); - if (dev->dev_state == MEI_DEV_POWER_DOWN) + if (intr_enable == false) mei_me_hw_reset_release(dev); return 0; @@ -469,7 +469,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) struct mei_device *dev = (struct mei_device *) dev_id; struct mei_cl_cb complete_list; s32 slots; - int rets; + int rets = 0; dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); /* initialize our complete list */ @@ -482,15 +482,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) mei_clear_interrupts(dev); /* check if ME wants a reset */ - if (!mei_hw_is_ready(dev) && - dev->dev_state != MEI_DEV_RESETTING && - dev->dev_state != MEI_DEV_INITIALIZING && - dev->dev_state != MEI_DEV_POWER_DOWN && - dev->dev_state != MEI_DEV_POWER_UP) { - dev_dbg(&dev->pdev->dev, "FW not ready.\n"); - mei_reset(dev, 1); - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; + if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { + dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; } /* check if we need to start the dev */ @@ -500,15 +495,12 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) dev->recvd_hw_ready = true; wake_up_interruptible(&dev->wait_hw_ready); - - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; } else { + dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); mei_me_hw_reset_release(dev); - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; } + goto end; } /* check slots available for reading */ slots = mei_count_full_read_slots(dev); @@ -516,21 +508,23 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* we have urgent data to send so break the read */ if (dev->wr_ext_msg.hdr.length) break; - dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots); - dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n"); + dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots); rets = mei_irq_read_handler(dev, &complete_list, &slots); - if (rets) + if (rets && dev->dev_state != MEI_DEV_RESETTING) { + schedule_work(&dev->reset_work); goto end; + } } + rets = mei_irq_write_handler(dev, &complete_list); -end: - dev_dbg(&dev->pdev->dev, "end of bottom half function.\n"); - dev->hbuf_is_ready = mei_hbuf_is_ready(dev); - mutex_unlock(&dev->device_lock); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); mei_irq_compl_handler(dev, &complete_list); +end: + dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); + mutex_unlock(&dev->device_lock); return IRQ_HANDLED; } static const struct mei_hw_ops mei_me_hw_ops = { diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index cb2f556b42528a..dd44e33ad2b6a3 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -111,7 +111,8 @@ struct mei_msg_hdr { u32 me_addr:8; u32 host_addr:8; u32 length:9; - u32 reserved:6; + u32 reserved:5; + u32 internal:1; u32 msg_complete:1; } __packed; diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index f7f3abbe12b6e2..cdd31c2a2a2bf0 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -43,41 +43,119 @@ const char *mei_dev_state_str(int state) #undef MEI_DEV_STATE } -void mei_device_init(struct mei_device *dev) -{ - /* setup our list array */ - INIT_LIST_HEAD(&dev->file_list); - INIT_LIST_HEAD(&dev->device_list); - mutex_init(&dev->device_lock); - init_waitqueue_head(&dev->wait_hw_ready); - init_waitqueue_head(&dev->wait_recvd_msg); - init_waitqueue_head(&dev->wait_stop_wd); - dev->dev_state = MEI_DEV_INITIALIZING; - mei_io_list_init(&dev->read_list); - mei_io_list_init(&dev->write_list); - mei_io_list_init(&dev->write_waiting_list); - mei_io_list_init(&dev->ctrl_wr_list); - mei_io_list_init(&dev->ctrl_rd_list); +/** + * mei_cancel_work. Cancel mei background jobs + * + * @dev: the device structure + * + * returns 0 on success or < 0 if the reset hasn't succeeded + */ +void mei_cancel_work(struct mei_device *dev) +{ + cancel_work_sync(&dev->init_work); + cancel_work_sync(&dev->reset_work); - INIT_DELAYED_WORK(&dev->timer_work, mei_timer); - INIT_WORK(&dev->init_work, mei_host_client_init); + cancel_delayed_work(&dev->timer_work); +} +EXPORT_SYMBOL_GPL(mei_cancel_work); - INIT_LIST_HEAD(&dev->wd_cl.link); - INIT_LIST_HEAD(&dev->iamthif_cl.link); - mei_io_list_init(&dev->amthif_cmd_list); - mei_io_list_init(&dev->amthif_rd_complete_list); +/** + * mei_reset - resets host and fw. + * + * @dev: the device structure + */ +int mei_reset(struct mei_device *dev) +{ + enum mei_dev_state state = dev->dev_state; + bool interrupts_enabled; + int ret; - bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); - dev->open_handle_count = 0; + if (state != MEI_DEV_INITIALIZING && + state != MEI_DEV_DISABLED && + state != MEI_DEV_POWER_DOWN && + state != MEI_DEV_POWER_UP) + dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", + mei_dev_state_str(state)); - /* - * Reserving the first client ID - * 0: Reserved for MEI Bus Message communications + /* we're already in reset, cancel the init timer + * if the reset was called due the hbm protocol error + * we need to call it before hw start + * so the hbm watchdog won't kick in */ - bitmap_set(dev->host_clients_map, 0, 1); + mei_hbm_idle(dev); + + /* enter reset flow */ + interrupts_enabled = state != MEI_DEV_POWER_DOWN; + dev->dev_state = MEI_DEV_RESETTING; + + dev->reset_count++; + if (dev->reset_count > MEI_MAX_CONSEC_RESET) { + dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); + dev->dev_state = MEI_DEV_DISABLED; + return -ENODEV; + } + + ret = mei_hw_reset(dev, interrupts_enabled); + /* fall through and remove the sw state even if hw reset has failed */ + + /* no need to clean up software state in case of power up */ + if (state != MEI_DEV_INITIALIZING && + state != MEI_DEV_POWER_UP) { + + /* remove all waiting requests */ + mei_cl_all_write_clear(dev); + + mei_cl_all_disconnect(dev); + + /* wake up all readers and writers so they can be interrupted */ + mei_cl_all_wakeup(dev); + + /* remove entry if already in list */ + dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); + mei_cl_unlink(&dev->wd_cl); + mei_cl_unlink(&dev->iamthif_cl); + mei_amthif_reset_params(dev); + memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); + } + + + dev->me_clients_num = 0; + dev->rd_msg_hdr = 0; + dev->wd_pending = false; + + if (ret) { + dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); + dev->dev_state = MEI_DEV_DISABLED; + return ret; + } + + if (state == MEI_DEV_POWER_DOWN) { + dev_dbg(&dev->pdev->dev, "powering down: end of reset\n"); + dev->dev_state = MEI_DEV_DISABLED; + return 0; + } + + ret = mei_hw_start(dev); + if (ret) { + dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); + dev->dev_state = MEI_DEV_DISABLED; + return ret; + } + + dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); + + dev->dev_state = MEI_DEV_INIT_CLIENTS; + ret = mei_hbm_start_req(dev); + if (ret) { + dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); + dev->dev_state = MEI_DEV_DISABLED; + return ret; + } + + return 0; } -EXPORT_SYMBOL_GPL(mei_device_init); +EXPORT_SYMBOL_GPL(mei_reset); /** * mei_start - initializes host and fw to start work. @@ -90,14 +168,21 @@ int mei_start(struct mei_device *dev) { mutex_lock(&dev->device_lock); - /* acknowledge interrupt and stop interupts */ + /* acknowledge interrupt and stop interrupts */ mei_clear_interrupts(dev); mei_hw_config(dev); dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); - mei_reset(dev, 1); + dev->dev_state = MEI_DEV_INITIALIZING; + dev->reset_count = 0; + mei_reset(dev); + + if (dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "reset failed"); + goto err; + } if (mei_hbm_start_wait(dev)) { dev_err(&dev->pdev->dev, "HBM haven't started"); @@ -132,101 +217,64 @@ int mei_start(struct mei_device *dev) EXPORT_SYMBOL_GPL(mei_start); /** - * mei_reset - resets host and fw. + * mei_restart - restart device after suspend * * @dev: the device structure - * @interrupts_enabled: if interrupt should be enabled after reset. + * + * returns 0 on success or -ENODEV if the restart hasn't succeeded */ -void mei_reset(struct mei_device *dev, int interrupts_enabled) +int mei_restart(struct mei_device *dev) { - bool unexpected; - int ret; - - unexpected = (dev->dev_state != MEI_DEV_INITIALIZING && - dev->dev_state != MEI_DEV_DISABLED && - dev->dev_state != MEI_DEV_POWER_DOWN && - dev->dev_state != MEI_DEV_POWER_UP); + int err; - if (unexpected) - dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", - mei_dev_state_str(dev->dev_state)); - - ret = mei_hw_reset(dev, interrupts_enabled); - if (ret) { - dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n"); - interrupts_enabled = false; - dev->dev_state = MEI_DEV_DISABLED; - } - - dev->hbm_state = MEI_HBM_IDLE; + mutex_lock(&dev->device_lock); - if (dev->dev_state != MEI_DEV_INITIALIZING && - dev->dev_state != MEI_DEV_POWER_UP) { - if (dev->dev_state != MEI_DEV_DISABLED && - dev->dev_state != MEI_DEV_POWER_DOWN) - dev->dev_state = MEI_DEV_RESETTING; + mei_clear_interrupts(dev); - /* remove all waiting requests */ - mei_cl_all_write_clear(dev); + dev->dev_state = MEI_DEV_POWER_UP; + dev->reset_count = 0; - mei_cl_all_disconnect(dev); + err = mei_reset(dev); - /* wake up all readings so they can be interrupted */ - mei_cl_all_wakeup(dev); - - /* remove entry if already in list */ - dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); - mei_cl_unlink(&dev->wd_cl); - mei_cl_unlink(&dev->iamthif_cl); - mei_amthif_reset_params(dev); - memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); - } + mutex_unlock(&dev->device_lock); - /* we're already in reset, cancel the init timer */ - dev->init_clients_timer = 0; + if (err || dev->dev_state == MEI_DEV_DISABLED) + return -ENODEV; - dev->me_clients_num = 0; - dev->rd_msg_hdr = 0; - dev->wd_pending = false; + return 0; +} +EXPORT_SYMBOL_GPL(mei_restart); - if (!interrupts_enabled) { - dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n"); - return; - } - ret = mei_hw_start(dev); - if (ret) { - dev_err(&dev->pdev->dev, "hw_start failed disabling the device\n"); - dev->dev_state = MEI_DEV_DISABLED; - return; - } +static void mei_reset_work(struct work_struct *work) +{ + struct mei_device *dev = + container_of(work, struct mei_device, reset_work); - dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); - /* link is established * start sending messages. */ + mutex_lock(&dev->device_lock); - dev->dev_state = MEI_DEV_INIT_CLIENTS; + mei_reset(dev); - mei_hbm_start_req(dev); + mutex_unlock(&dev->device_lock); + if (dev->dev_state == MEI_DEV_DISABLED) + dev_err(&dev->pdev->dev, "reset failed"); } -EXPORT_SYMBOL_GPL(mei_reset); void mei_stop(struct mei_device *dev) { dev_dbg(&dev->pdev->dev, "stopping the device.\n"); - flush_scheduled_work(); + mei_cancel_work(dev); - mutex_lock(&dev->device_lock); + mei_nfc_host_exit(dev); - cancel_delayed_work(&dev->timer_work); + mutex_lock(&dev->device_lock); mei_wd_stop(dev); - mei_nfc_host_exit(); - dev->dev_state = MEI_DEV_POWER_DOWN; - mei_reset(dev, 0); + mei_reset(dev); mutex_unlock(&dev->device_lock); @@ -236,3 +284,41 @@ EXPORT_SYMBOL_GPL(mei_stop); +void mei_device_init(struct mei_device *dev) +{ + /* setup our list array */ + INIT_LIST_HEAD(&dev->file_list); + INIT_LIST_HEAD(&dev->device_list); + mutex_init(&dev->device_lock); + init_waitqueue_head(&dev->wait_hw_ready); + init_waitqueue_head(&dev->wait_recvd_msg); + init_waitqueue_head(&dev->wait_stop_wd); + dev->dev_state = MEI_DEV_INITIALIZING; + dev->reset_count = 0; + + mei_io_list_init(&dev->read_list); + mei_io_list_init(&dev->write_list); + mei_io_list_init(&dev->write_waiting_list); + mei_io_list_init(&dev->ctrl_wr_list); + mei_io_list_init(&dev->ctrl_rd_list); + + INIT_DELAYED_WORK(&dev->timer_work, mei_timer); + INIT_WORK(&dev->init_work, mei_host_client_init); + INIT_WORK(&dev->reset_work, mei_reset_work); + + INIT_LIST_HEAD(&dev->wd_cl.link); + INIT_LIST_HEAD(&dev->iamthif_cl.link); + mei_io_list_init(&dev->amthif_cmd_list); + mei_io_list_init(&dev->amthif_rd_complete_list); + + bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); + dev->open_handle_count = 0; + + /* + * Reserving the first client ID + * 0: Reserved for MEI Bus Message communications + */ + bitmap_set(dev->host_clients_map, 0, 1); +} +EXPORT_SYMBOL_GPL(mei_device_init); + diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 7a95c07e59a6d6..f0fbb5179f80cf 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -31,7 +31,7 @@ /** - * mei_irq_compl_handler - dispatch complete handelers + * mei_irq_compl_handler - dispatch complete handlers * for the completed callbacks * * @dev - mei device @@ -301,13 +301,11 @@ int mei_irq_read_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list, s32 *slots) { struct mei_msg_hdr *mei_hdr; - struct mei_cl *cl_pos = NULL; - struct mei_cl *cl_next = NULL; - int ret = 0; + struct mei_cl *cl; + int ret; if (!dev->rd_msg_hdr) { dev->rd_msg_hdr = mei_read_hdr(dev); - dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); (*slots)--; dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); } @@ -315,61 +313,67 @@ int mei_irq_read_handler(struct mei_device *dev, dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_hdr->reserved || !dev->rd_msg_hdr) { - dev_dbg(&dev->pdev->dev, "corrupted message header.\n"); + dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n", + dev->rd_msg_hdr); ret = -EBADMSG; goto end; } - if (mei_hdr->host_addr || mei_hdr->me_addr) { - list_for_each_entry_safe(cl_pos, cl_next, - &dev->file_list, link) { - dev_dbg(&dev->pdev->dev, - "list_for_each_entry_safe read host" - " client = %d, ME client = %d\n", - cl_pos->host_client_id, - cl_pos->me_client_id); - if (mei_cl_hbm_equal(cl_pos, mei_hdr)) - break; - } - - if (&cl_pos->link == &dev->file_list) { - dev_dbg(&dev->pdev->dev, "corrupted message header\n"); - ret = -EBADMSG; - goto end; - } - } - if (((*slots) * sizeof(u32)) < mei_hdr->length) { - dev_err(&dev->pdev->dev, - "we can't read the message slots =%08x.\n", + if (mei_slots2data(*slots) < mei_hdr->length) { + dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", *slots); /* we can't read the message */ ret = -ERANGE; goto end; } - /* decide where to read the message too */ - if (!mei_hdr->host_addr) { - dev_dbg(&dev->pdev->dev, "call mei_hbm_dispatch.\n"); - mei_hbm_dispatch(dev, mei_hdr); - dev_dbg(&dev->pdev->dev, "end mei_hbm_dispatch.\n"); - } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && - (MEI_FILE_CONNECTED == dev->iamthif_cl.state) && - (dev->iamthif_state == MEI_IAMTHIF_READING)) { + /* HBM message */ + if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { + ret = mei_hbm_dispatch(dev, mei_hdr); + if (ret) { + dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n", + ret); + goto end; + } + goto reset_slots; + } + + /* find recipient cl */ + list_for_each_entry(cl, &dev->file_list, link) { + if (mei_cl_hbm_equal(cl, mei_hdr)) { + cl_dbg(dev, cl, "got a message\n"); + break; + } + } + + /* if no recipient cl was found we assume corrupted header */ + if (&cl->link == &dev->file_list) { + dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n", + dev->rd_msg_hdr); + ret = -EBADMSG; + goto end; + } - dev_dbg(&dev->pdev->dev, "call mei_amthif_irq_read_msg.\n"); - dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); + if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && + MEI_FILE_CONNECTED == dev->iamthif_cl.state && + dev->iamthif_state == MEI_IAMTHIF_READING) { ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); - if (ret) + if (ret) { + dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n", + ret); goto end; + } } else { - dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n"); - dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); - if (ret) + if (ret) { + dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n", + ret); goto end; + } } +reset_slots: /* reset the number of slots and header */ *slots = mei_count_full_read_slots(dev); dev->rd_msg_hdr = 0; @@ -533,7 +537,6 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler); * * @work: pointer to the work_struct structure * - * NOTE: This function is called by timer interrupt work */ void mei_timer(struct work_struct *work) { @@ -548,24 +551,30 @@ void mei_timer(struct work_struct *work) mutex_lock(&dev->device_lock); - if (dev->dev_state != MEI_DEV_ENABLED) { - if (dev->dev_state == MEI_DEV_INIT_CLIENTS) { - if (dev->init_clients_timer) { - if (--dev->init_clients_timer == 0) { - dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n", - dev->hbm_state); - mei_reset(dev, 1); - } + + /* Catch interrupt stalls during HBM init handshake */ + if (dev->dev_state == MEI_DEV_INIT_CLIENTS && + dev->hbm_state != MEI_HBM_IDLE) { + + if (dev->init_clients_timer) { + if (--dev->init_clients_timer == 0) { + dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n", + dev->hbm_state); + mei_reset(dev); + goto out; } } - goto out; } + + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + /*** connect/disconnect timeouts ***/ list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { if (cl_pos->timer_count) { if (--cl_pos->timer_count == 0) { - dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n"); - mei_reset(dev, 1); + dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); + mei_reset(dev); goto out; } } @@ -573,8 +582,8 @@ void mei_timer(struct work_struct *work) if (dev->iamthif_stall_timer) { if (--dev->iamthif_stall_timer == 0) { - dev_err(&dev->pdev->dev, "reset: amthif hanged.\n"); - mei_reset(dev, 1); + dev_err(&dev->pdev->dev, "timer: amthif hanged.\n"); + mei_reset(dev); dev->iamthif_msg_buf_size = 0; dev->iamthif_msg_buf_index = 0; dev->iamthif_canceled = false; @@ -627,7 +636,8 @@ void mei_timer(struct work_struct *work) } } out: - schedule_delayed_work(&dev->timer_work, 2 * HZ); + if (dev->dev_state != MEI_DEV_DISABLED) + schedule_delayed_work(&dev->timer_work, 2 * HZ); mutex_unlock(&dev->device_lock); } diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 9661a812f55045..5424f8ff3f7f7d 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -48,7 +48,7 @@ * * @inode: pointer to inode structure * @file: pointer to file structure - e + * * returns 0 on success, <0 on error */ static int mei_open(struct inode *inode, struct file *file) diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 406f68e05b4ed4..f7de95b4cdd907 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -60,12 +60,17 @@ extern const uuid_le mei_wd_guid; */ #define MEI_CLIENTS_MAX 256 +/* + * maximum number of consecutive resets + */ +#define MEI_MAX_CONSEC_RESET 3 + /* * Number of File descriptors/handles * that can be opened to the driver. * * Limit to 255: 256 Total Clients - * minus internal client for MEI Bus Messags + * minus internal client for MEI Bus Messages */ #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) @@ -178,9 +183,10 @@ struct mei_cl_cb { unsigned long buf_idx; unsigned long read_time; struct file *file_object; + u32 internal:1; }; -/* MEI client instance carried as file->pirvate_data*/ +/* MEI client instance carried as file->private_data*/ struct mei_cl { struct list_head link; struct mei_device *dev; @@ -326,6 +332,7 @@ struct mei_cl_device { /** * struct mei_device - MEI private device struct + * @reset_count - limits the number of consecutive resets * @hbm_state - state of host bus message protocol * @mem_addr - mem mapped base register address @@ -369,6 +376,7 @@ struct mei_device { /* * mei device states */ + unsigned long reset_count; enum mei_dev_state dev_state; enum mei_hbm_state hbm_state; u16 init_clients_timer; @@ -427,6 +435,7 @@ struct mei_device { bool iamthif_canceled; struct work_struct init_work; + struct work_struct reset_work; /* List of bus devices */ struct list_head device_list; @@ -456,13 +465,25 @@ static inline u32 mei_data2slots(size_t length) return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); } +/** + * mei_slots2data- get data in slots - bytes from slots + * @slots - number of available slots + * returns - number of bytes in slots + */ +static inline u32 mei_slots2data(int slots) +{ + return slots * 4; +} + /* * mei init function prototypes */ void mei_device_init(struct mei_device *dev); -void mei_reset(struct mei_device *dev, int interrupts); +int mei_reset(struct mei_device *dev); int mei_start(struct mei_device *dev); +int mei_restart(struct mei_device *dev); void mei_stop(struct mei_device *dev); +void mei_cancel_work(struct mei_device *dev); /* * MEI interrupt functions prototype @@ -510,7 +531,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); * NFC functions */ int mei_nfc_host_init(struct mei_device *dev); -void mei_nfc_host_exit(void); +void mei_nfc_host_exit(struct mei_device *dev); /* * NFC Client UUID @@ -626,9 +647,9 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {} int mei_register(struct mei_device *dev); void mei_deregister(struct mei_device *dev); -#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d" +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" #define MEI_HDR_PRM(hdr) \ (hdr)->host_addr, (hdr)->me_addr, \ - (hdr)->length, (hdr)->msg_complete + (hdr)->length, (hdr)->internal, (hdr)->msg_complete #endif diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c index 994ca4aff1a37e..a58320c0c049c7 100644 --- a/drivers/misc/mei/nfc.c +++ b/drivers/misc/mei/nfc.c @@ -92,7 +92,7 @@ struct mei_nfc_hci_hdr { * @cl: NFC host client * @cl_info: NFC info host client * @init_work: perform connection to the info client - * @fw_ivn: NFC Intervace Version Number + * @fw_ivn: NFC Interface Version Number * @vendor_id: NFC manufacturer ID * @radio_type: NFC radio type */ @@ -163,7 +163,7 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) return 0; default: - dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", + dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", ndev->radio_type); return -EINVAL; @@ -175,14 +175,14 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) ndev->bus_name = "pn544"; return 0; default: - dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", + dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", ndev->radio_type); return -EINVAL; } default: - dev_err(&dev->pdev->dev, "Unknow vendor ID 0x%x\n", + dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n", ndev->vendor_id); return -EINVAL; @@ -428,7 +428,7 @@ static void mei_nfc_init(struct work_struct *work) mutex_unlock(&dev->device_lock); if (mei_nfc_if_version(ndev) < 0) { - dev_err(&dev->pdev->dev, "Could not get the NFC interfave version"); + dev_err(&dev->pdev->dev, "Could not get the NFC interface version"); goto err; } @@ -469,7 +469,9 @@ static void mei_nfc_init(struct work_struct *work) return; err: + mutex_lock(&dev->device_lock); mei_nfc_free(ndev); + mutex_unlock(&dev->device_lock); return; } @@ -481,7 +483,7 @@ int mei_nfc_host_init(struct mei_device *dev) struct mei_cl *cl_info, *cl = NULL; int i, ret; - /* already initialzed */ + /* already initialized */ if (ndev->cl_info) return 0; @@ -547,12 +549,16 @@ int mei_nfc_host_init(struct mei_device *dev) return ret; } -void mei_nfc_host_exit(void) +void mei_nfc_host_exit(struct mei_device *dev) { struct mei_nfc_dev *ndev = &nfc_dev; + cancel_work_sync(&ndev->init_work); + + mutex_lock(&dev->device_lock); if (ndev->cl && ndev->cl->device) mei_cl_remove_device(ndev->cl->device); mei_nfc_free(ndev); + mutex_unlock(&dev->device_lock); } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 2cab3c0a680536..ddadd08956f46b 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -144,6 +144,21 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + } + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto release_regions; + } + + /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev); if (!dev) { @@ -197,8 +212,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; release_irq: + mei_cancel_work(dev); mei_disable_interrupts(dev); - flush_scheduled_work(); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); @@ -306,16 +321,14 @@ static int mei_me_pci_resume(struct device *device) return err; } - mutex_lock(&dev->device_lock); - dev->dev_state = MEI_DEV_POWER_UP; - mei_clear_interrupts(dev); - mei_reset(dev, 1); - mutex_unlock(&dev->device_lock); + err = mei_restart(dev); + if (err) + return err; /* Start timer if stopped in suspend */ schedule_delayed_work(&dev->timer_work, HZ); - return err; + return 0; } static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); #define MEI_ME_PM_OPS (&mei_me_pm_ops) diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c index 9e354216c163d4..f70945ed96f6f8 100644 --- a/drivers/misc/mei/wd.c +++ b/drivers/misc/mei/wd.c @@ -115,6 +115,7 @@ int mei_wd_send(struct mei_device *dev) hdr.me_addr = dev->wd_cl.me_client_id; hdr.msg_complete = 1; hdr.reserved = 0; + hdr.internal = 0; if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) hdr.length = MEI_WD_START_MSG_SIZE; diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h index 3574cc375bb95f..1a6edce2ecde56 100644 --- a/drivers/misc/mic/host/mic_device.h +++ b/drivers/misc/mic/host/mic_device.h @@ -112,7 +112,7 @@ struct mic_device { struct work_struct shutdown_work; u8 state; u8 shutdown_status; - struct sysfs_dirent *state_sysfs; + struct kernfs_node *state_sysfs; struct completion reset_wait; void *log_buf_addr; int *log_buf_len; @@ -134,6 +134,8 @@ struct mic_device { * @send_intr: Send an interrupt for a particular doorbell on the card. * @ack_interrupt: Hardware specific operations to ack the h/w on * receipt of an interrupt. + * @intr_workarounds: Hardware specific workarounds needed after + * handling an interrupt. * @reset: Reset the remote processor. * @reset_fw_ready: Reset firmware ready field. * @is_fw_ready: Check if firmware is ready for OS download. @@ -149,6 +151,7 @@ struct mic_hw_ops { void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val); void (*send_intr)(struct mic_device *mdev, int doorbell); u32 (*ack_interrupt)(struct mic_device *mdev); + void (*intr_workarounds)(struct mic_device *mdev); void (*reset)(struct mic_device *mdev); void (*reset_fw_ready)(struct mic_device *mdev); bool (*is_fw_ready)(struct mic_device *mdev); diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c index ad838c7651c468..c04a021e20c776 100644 --- a/drivers/misc/mic/host/mic_main.c +++ b/drivers/misc/mic/host/mic_main.c @@ -115,7 +115,7 @@ static irqreturn_t mic_shutdown_db(int irq, void *data) struct mic_device *mdev = data; struct mic_bootparam *bootparam = mdev->dp; - mdev->ops->ack_interrupt(mdev); + mdev->ops->intr_workarounds(mdev); switch (bootparam->shutdown_status) { case MIC_HALTED: diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c index e04bb4fe68235a..752ff873f891bb 100644 --- a/drivers/misc/mic/host/mic_virtio.c +++ b/drivers/misc/mic/host/mic_virtio.c @@ -369,7 +369,7 @@ static irqreturn_t mic_virtio_intr_handler(int irq, void *data) struct mic_vdev *mvdev = data; struct mic_device *mdev = mvdev->mdev; - mdev->ops->ack_interrupt(mdev); + mdev->ops->intr_workarounds(mdev); schedule_work(&mvdev->virtio_bh_work); return IRQ_HANDLED; } diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c index 0dfa8a81436e80..5562fdd3ef4e56 100644 --- a/drivers/misc/mic/host/mic_x100.c +++ b/drivers/misc/mic/host/mic_x100.c @@ -174,35 +174,38 @@ static void mic_x100_send_intr(struct mic_device *mdev, int doorbell) } /** - * mic_ack_interrupt - Device specific interrupt handling. - * @mdev: pointer to mic_device instance + * mic_x100_ack_interrupt - Read the interrupt sources register and + * clear it. This function will be called in the MSI/INTx case. + * @mdev: Pointer to mic_device instance. * - * Returns: bitmask of doorbell events triggered. + * Returns: bitmask of interrupt sources triggered. */ static u32 mic_x100_ack_interrupt(struct mic_device *mdev) { - u32 reg = 0; - struct mic_mw *mw = &mdev->mmio; u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0; + u32 reg = mic_mmio_read(&mdev->mmio, sicr0); + mic_mmio_write(&mdev->mmio, reg, sicr0); + return reg; +} + +/** + * mic_x100_intr_workarounds - These hardware specific workarounds are + * to be invoked everytime an interrupt is handled. + * @mdev: Pointer to mic_device instance. + * + * Returns: none + */ +static void mic_x100_intr_workarounds(struct mic_device *mdev) +{ + struct mic_mw *mw = &mdev->mmio; /* Clear pending bit array. */ if (MIC_A0_STEP == mdev->stepping) mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_MSIXPBACR); - if (mdev->irq_info.num_vectors <= 1) { - reg = mic_mmio_read(mw, sicr0); - - if (unlikely(!reg)) - goto done; - - mic_mmio_write(mw, reg, sicr0); - } - if (mdev->stepping >= MIC_B0_STEP) mdev->intr_ops->enable_interrupts(mdev); -done: - return reg; } /** @@ -553,6 +556,7 @@ struct mic_hw_ops mic_x100_ops = { .write_spad = mic_x100_write_spad, .send_intr = mic_x100_send_intr, .ack_interrupt = mic_x100_ack_interrupt, + .intr_workarounds = mic_x100_intr_workarounds, .reset = mic_x100_hw_reset, .reset_fw_ready = mic_x100_reset_fw_ready, .is_fw_ready = mic_x100_is_fw_ready, diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 652593fc486d69..128d5615c8040c 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -828,6 +828,7 @@ enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *ch) { enum xp_retval ret; + DEFINE_WAIT(wait); if (ch->flags & XPC_C_DISCONNECTING) { DBUG_ON(ch->reason == xpInterrupted); @@ -835,7 +836,9 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) } atomic_inc(&ch->n_on_msg_allocate_wq); - ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); + prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); + ret = schedule_timeout(1); + finish_wait(&ch->msg_allocate_wq, &wait); atomic_dec(&ch->n_on_msg_allocate_wq); if (ch->flags & XPC_C_DISCONNECTING) { diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 8d64b681dd9326..3aed525e55b48b 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -812,7 +812,7 @@ static void st_tty_flush_buffer(struct tty_struct *tty) kfree_skb(st_gdata->tx_skb); st_gdata->tx_skb = NULL; - tty->ops->flush_buffer(tty); + tty_driver_flush_buffer(tty); return; } diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 96853a09788a9a..9d3dbb28734b9f 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -531,7 +531,6 @@ long st_kim_stop(void *kim_data) /* Flush any pending characters in the driver and discipline. */ tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); - tty->ops->flush_buffer(tty); } /* send uninstall notification to UIM */ diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index c98b03b993537a..d35cda06b5e866 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -165,7 +165,7 @@ static void vmci_guest_cid_update(u32 sub_id, * true if required hypercalls (or fallback hypercalls) are * supported by the host, false otherwise. */ -static bool vmci_check_host_caps(struct pci_dev *pdev) +static int vmci_check_host_caps(struct pci_dev *pdev) { bool result; struct vmci_resource_query_msg *msg; @@ -176,7 +176,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev) check_msg = kmalloc(msg_size, GFP_KERNEL); if (!check_msg) { dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); - return false; + return -ENOMEM; } check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, @@ -196,7 +196,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev) __func__, result ? "PASSED" : "FAILED"); /* We need the vector. There are no fallbacks. */ - return result; + return result ? 0 : -ENXIO; } /* @@ -564,12 +564,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, dev_warn(&pdev->dev, "VMCI device unable to register notification bitmap with PPN 0x%x\n", (u32) bitmap_ppn); + error = -ENXIO; goto err_remove_vmci_dev_g; } } /* Check host capabilities. */ - if (!vmci_check_host_caps(pdev)) + error = vmci_check_host_caps(pdev); + if (error) goto err_remove_bitmap; /* Enable device. */ diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index d210d131fef255..0f55589a56b815 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c @@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) return -ENOMEM; } info->map.cached = - ioremap_cached(info->map.phys, info->map.size); + ioremap_cache(info->map.phys, info->map.size); if (!info->map.cached) printk(KERN_WARNING "Failed to ioremap cached %s\n", info->map.name); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 187b1b7772ef1b..4ced59436558e6 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) port = &(SLAVE_AD_INFO(slave).port); - // if slave is null, the whole port is not initialized + /* if slave is null, the whole port is not initialized */ if (!port->slave) { pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } + __get_state_machine_lock(port); + port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= (__get_link_speed(port) << 1); pr_debug("Port %d changed speed\n", port->actor_port_number); - // there is no need to reselect a new aggregator, just signal the - // state machines to reinitialize + /* there is no need to reselect a new aggregator, just signal the + * state machines to reinitialize + */ port->sm_vars |= AD_PORT_BEGIN; + + __release_state_machine_lock(port); } /** @@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) port = &(SLAVE_AD_INFO(slave).port); - // if slave is null, the whole port is not initialized + /* if slave is null, the whole port is not initialized */ if (!port->slave) { pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } + __get_state_machine_lock(port); + port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= __get_duplex(port); pr_debug("Port %d changed duplex\n", port->actor_port_number); - // there is no need to reselect a new aggregator, just signal the - // state machines to reinitialize + /* there is no need to reselect a new aggregator, just signal the + * state machines to reinitialize + */ port->sm_vars |= AD_PORT_BEGIN; + + __release_state_machine_lock(port); } /** @@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) port = &(SLAVE_AD_INFO(slave).port); - // if slave is null, the whole port is not initialized + /* if slave is null, the whole port is not initialized */ if (!port->slave) { pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } - // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) - // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report + __get_state_machine_lock(port); + /* on link down we are zeroing duplex and speed since + * some of the adaptors(ce1000.lan) report full duplex/speed + * instead of N/A(duplex) / 0(speed). + * + * on link up we are forcing recheck on the duplex and speed since + * some of he adaptors(ce1000.lan) report. + */ if (link == BOND_LINK_UP) { port->is_enabled = true; port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; @@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) port->actor_oper_port_key = (port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS); } - //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); - // there is no need to reselect a new aggregator, just signal the - // state machines to reinitialize + pr_debug("Port %d changed link status to %s", + port->actor_port_number, + (link == BOND_LINK_UP) ? "UP" : "DOWN"); + /* there is no need to reselect a new aggregator, just signal the + * state machines to reinitialize + */ port->sm_vars |= AD_PORT_BEGIN; + + __release_state_machine_lock(port); } /* diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 398e299ee1bded..6191b551a0e83b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1763,7 +1763,7 @@ static int __bond_release_one(struct net_device *bond_dev, } if (all) { - rcu_assign_pointer(bond->curr_active_slave, NULL); + RCU_INIT_POINTER(bond->curr_active_slave, NULL); } else if (oldcurrent == slave) { /* * Note that we hold RTNL over this sequence, so there @@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond, } -static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c index fb3dd4399cf36e..f615fdec0f1b2f 100644 --- a/drivers/net/ethernet/8390/hydra.c +++ b/drivers/net/ethernet/8390/hydra.c @@ -113,7 +113,7 @@ static const struct net_device_ops hydra_netdev_ops = { static int hydra_init(struct zorro_dev *z) { struct net_device *dev; - unsigned long board = ZTWO_VADDR(z->resource.start); + unsigned long board = (unsigned long)ZTWO_VADDR(z->resource.start); unsigned long ioaddr = board+HYDRA_NIC_BASE; const char name[] = "NE2000"; int start_page, stop_page; diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c index 85ec4c2d264558..ae2a12b7db62db 100644 --- a/drivers/net/ethernet/8390/zorro8390.c +++ b/drivers/net/ethernet/8390/zorro8390.c @@ -287,7 +287,7 @@ static const struct net_device_ops zorro8390_netdev_ops = { }; static int zorro8390_init(struct net_device *dev, unsigned long board, - const char *name, unsigned long ioaddr) + const char *name, void __iomem *ioaddr) { int i; int err; @@ -354,7 +354,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board, start_page = NESM_START_PG; stop_page = NESM_STOP_PG; - dev->base_addr = ioaddr; + dev->base_addr = (unsigned long)ioaddr; dev->irq = IRQ_AMIGA_PORTS; /* Install the Interrupt handler */ diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 0866e7627433c1..56139184b8019c 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -57,6 +57,7 @@ #include #include +#include #include #include #include @@ -678,6 +679,7 @@ static int a2065_init_one(struct zorro_dev *z, unsigned long base_addr = board + A2065_LANCE; unsigned long mem_start = board + A2065_RAM; struct resource *r1, *r2; + u32 serial; int err; r1 = request_mem_region(base_addr, sizeof(struct lance_regs), @@ -702,6 +704,7 @@ static int a2065_init_one(struct zorro_dev *z, r1->name = dev->name; r2->name = dev->name; + serial = be32_to_cpu(z->rom.er_SerialNumber); dev->dev_addr[0] = 0x00; if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ dev->dev_addr[1] = 0x80; @@ -710,11 +713,11 @@ static int a2065_init_one(struct zorro_dev *z, dev->dev_addr[1] = 0x00; dev->dev_addr[2] = 0x9f; } - dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; - dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; - dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; - dev->base_addr = ZTWO_VADDR(base_addr); - dev->mem_start = ZTWO_VADDR(mem_start); + dev->dev_addr[3] = (serial >> 16) & 0xff; + dev->dev_addr[4] = (serial >> 8) & 0xff; + dev->dev_addr[5] = serial & 0xff; + dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr); + dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start); dev->mem_end = dev->mem_start + A2065_RAM_SIZE; priv->ll = (volatile struct lance_regs *)dev->base_addr; diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c index c178eb4c81668a..b08101b31b8bc5 100644 --- a/drivers/net/ethernet/amd/ariadne.c +++ b/drivers/net/ethernet/amd/ariadne.c @@ -51,6 +51,7 @@ #include #include +#include #include #include #include @@ -718,6 +719,7 @@ static int ariadne_init_one(struct zorro_dev *z, struct resource *r1, *r2; struct net_device *dev; struct ariadne_private *priv; + u32 serial; int err; r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960"); @@ -741,14 +743,15 @@ static int ariadne_init_one(struct zorro_dev *z, r1->name = dev->name; r2->name = dev->name; + serial = be32_to_cpu(z->rom.er_SerialNumber); dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x60; dev->dev_addr[2] = 0x30; - dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; - dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; - dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; - dev->base_addr = ZTWO_VADDR(base_addr); - dev->mem_start = ZTWO_VADDR(mem_start); + dev->dev_addr[3] = (serial >> 16) & 0xff; + dev->dev_addr[4] = (serial >> 8) & 0xff; + dev->dev_addr[5] = serial & 0xff; + dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr); + dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start); dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE; dev->netdev_ops = &ariadne_netdev_ops; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index b2ffad1304d221..248baf6273fb76 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) /* Make sure pointer to data buffer is set */ wmb(); + skb_tx_timestamp(skb); + *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); /* Increment index to point to the next BD */ @@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) arc_reg_set(priv, R_STATUS, TXPL_MASK); - skb_tx_timestamp(skb); - return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index a36a760ada28af..29801750f239b2 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) * Mask some pcie error bits */ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); - data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); - pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); + if (pos) { + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); + data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); + } /* clear error status */ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_NFED | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a1f66e2c9a8694..ec6119089b82b8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -520,10 +520,12 @@ struct bnx2x_fastpath { #define BNX2X_FP_STATE_IDLE 0 #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ -#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ -#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ +#define BNX2X_FP_STATE_DISABLED (1 << 2) +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ +#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) -#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) /* protect state */ spinlock_t lock; @@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) { bool rc = true; - spin_lock(&fp->lock); + spin_lock_bh(&fp->lock); if (fp->state & BNX2X_FP_LOCKED) { WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); fp->state |= BNX2X_FP_STATE_NAPI_YIELD; @@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) /* we don't care if someone yielded */ fp->state = BNX2X_FP_STATE_NAPI; } - spin_unlock(&fp->lock); + spin_unlock_bh(&fp->lock); return rc; } @@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) { bool rc = false; - spin_lock(&fp->lock); + spin_lock_bh(&fp->lock); WARN_ON(fp->state & (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); if (fp->state & BNX2X_FP_STATE_POLL_YIELD) rc = true; - fp->state = BNX2X_FP_STATE_IDLE; - spin_unlock(&fp->lock); + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); return rc; } @@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) if (fp->state & BNX2X_FP_STATE_POLL_YIELD) rc = true; - fp->state = BNX2X_FP_STATE_IDLE; + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; spin_unlock_bh(&fp->lock); return rc; } @@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) /* true if a socket is polling, even if it did not get the lock */ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { - WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); + WARN_ON(!(fp->state & BNX2X_FP_OWNED)); return fp->state & BNX2X_FP_USER_PEND; } + +/* false if fp is currently owned */ +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + int rc = true; + + spin_lock_bh(&fp->lock); + if (fp->state & BNX2X_FP_OWNED) + rc = false; + fp->state |= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); + + return rc; +} #else static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) { @@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { return false; } +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + return true; +} #endif /* CONFIG_NET_RX_BUSY_POLL */ /* Use 2500 as a mini-jumbo MTU for FCoE */ @@ -1250,7 +1274,10 @@ struct bnx2x_slowpath { * Therefore, if they would have been defined in the same union, * data can get corrupted. */ - struct afex_vif_list_ramrod_data func_afex_rdata; + union { + struct afex_vif_list_ramrod_data viflist_data; + struct function_update_data func_update; + } func_afex_rdata; /* used by dmae command executer */ struct dmae_command dmae[MAX_DMAE_C]; @@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp); #define MCPR_SCRATCH_BASE(bp) \ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) +#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ec96130533cc54..bf811565ee245a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, struct sk_buff *skb = tx_buf->skb; u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; int nbd; + u16 split_bd_len = 0; /* prefetch skb end pointer to speedup dev_kfree_skb() */ prefetch(&skb->end); @@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", txdata->txq_index, idx, tx_buf, skb); - /* unmap first bd */ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), - BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR @@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - /* ...and the TSO split header bd since they have no mapping */ + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + split_bd_len = BD_UNMAP_LEN(tx_data_bd); --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); } + /* unmap first bd */ + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd) + split_bd_len, + DMA_TO_DEVICE); + /* now free frags */ while (nbd > 0) { @@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) { int i; - local_bh_disable(); for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_lock_napi(&bp->fp[i])) - mdelay(1); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); } - local_bh_enable(); } static void bnx2x_napi_disable(struct bnx2x *bp) { int i; - local_bh_disable(); for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_lock_napi(&bp->fp[i])) - mdelay(1); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); } - local_bh_enable(); } void bnx2x_netif_start(struct bnx2x *bp) @@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) bnx2x_napi_disable_cnic(bp); } -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa7449547..41f3ca5ad972b3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); /* select_queue callback */ -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 20dcc02431cac4..11fc79585491f4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, bnx2x_warpcore_enable_AN_KR2(phy, params, vars); } else { + /* Enable Auto-Detect to support 1G over CL37 as well */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); + + /* Force cl48 sync_status LOW to avoid getting stuck in CL73 + * parallel-detect loop when CL73 and CL37 are enabled. + */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); + bnx2x_set_aer_mmd(params, phy); + bnx2x_disable_kr2(params, vars, phy); } @@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, *edc_mode = EDC_MODE_ACTIVE_DAC; else check_limiting_mode = 1; - } else if (copper_module_type & - SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { + } else { + *edc_mode = EDC_MODE_PASSIVE_DAC; + /* Even in case PASSIVE_DAC indication is not set, + * treat it as a passive DAC cable, since some cables + * don't have this indication. + */ + if (copper_module_type & + SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { DP(NETIF_MSG_LINK, "Passive Copper cable detected\n"); - *edc_mode = - EDC_MODE_PASSIVE_DAC; - } else { - DP(NETIF_MSG_LINK, - "Unknown copper-cable-type 0x%x !!!\n", - copper_module_type); - return -EINVAL; + } else { + DP(NETIF_MSG_LINK, + "Unknown copper-cable-type\n"); + } } break; } @@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, (1<<11)); if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || - (phy->req_line_speed == SPEED_1000)) { + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { an_1000_val |= (1<<8); autoneg_val |= (1<<9 | 1<<12); if (phy->req_duplex == DUPLEX_FULL) @@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, 0x09, &an_1000_val); - /* Set 100 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { - an_10_100_val |= (1<<7); - /* Enable autoneg and restart autoneg for legacy speeds */ - autoneg_val |= (1<<9 | 1<<12); - - if (phy->req_duplex == DUPLEX_FULL) - an_10_100_val |= (1<<8); - DP(NETIF_MSG_LINK, "Advertising 100M\n"); - } - - /* Set 10 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { - an_10_100_val |= (1<<5); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) + /* Advertise 10/100 link speed */ + if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { an_10_100_val |= (1<<6); - DP(NETIF_MSG_LINK, "Advertising 10M\n"); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + an_10_100_val |= (1<<7); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + an_10_100_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); + } } /* Only 10/100 are allowed to work in FORCE mode */ @@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, old_status, status); + /* Do not touch the link in case physical link down */ + if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) + return 1; + /* a. Update shmem->link_status accordingly * b. Update link_vars->link_up */ @@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, */ not_kr2_device = (((base_page & 0x8000) == 0) || (((base_page & 0x8000) && - ((next_page & 0xe0) == 0x2)))); + ((next_page & 0xe0) == 0x20)))); /* In case KR2 is already disabled, check if we need to re-enable it */ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 814d0eca9b334e..0067b975873f15 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } } - /* adjust igu_sb_cnt to MF for E1x */ - if (CHIP_IS_E1x(bp) && IS_MF(bp)) - bp->igu_sb_cnt /= E1HVN_MAX; + /* adjust igu_sb_cnt to MF for E1H */ + if (CHIP_IS_E1H(bp) && IS_MF(bp)) + bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); /* port info */ bnx2x_get_port_hwinfo(bp); @@ -12942,25 +12942,26 @@ static void __bnx2x_remove(struct pci_dev *pdev, pci_set_power_state(pdev, PCI_D3hot); } - if (bp->regview) - iounmap(bp->regview); + if (remove_netdev) { + if (bp->regview) + iounmap(bp->regview); - /* for vf doorbells are part of the regview and were unmapped along with - * it. FW is only loaded by PF. - */ - if (IS_PF(bp)) { - if (bp->doorbells) - iounmap(bp->doorbells); + /* For vfs, doorbells are part of the regview and were unmapped + * along with it. FW is only loaded by PF. + */ + if (IS_PF(bp)) { + if (bp->doorbells) + iounmap(bp->doorbells); - bnx2x_release_firmware(bp); - } - bnx2x_free_mem_bp(bp); + bnx2x_release_firmware(bp); + } + bnx2x_free_mem_bp(bp); - if (remove_netdev) free_netdev(dev); - if (atomic_read(&pdev->enable_cnt) == 1) - pci_release_regions(pdev); + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + } pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 3efbb35267c853..14ffb6e56e593d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea +#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 #define MDIO_WC_REG_XGXS_STATUS3 0x8129 #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 32c92abf50949f..18438a504d5730 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + unsigned long flags; int read_lock; int rc = 0; @@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, spin_lock_bh(&exeq->lock); list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { - if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == - *vlan_mac_flags) { + flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { rc = exeq->remove(bp, exeq->owner, exeq_pos); if (rc) { BNX2X_ERR("Failed to remove command\n"); @@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, return read_lock; list_for_each_entry(pos, &o->head, link) { - if (pos->vlan_mac_flags == *vlan_mac_flags) { + flags = pos->vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { p.user_req.vlan_mac_flags = pos->vlan_mac_flags; memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); rc = bnx2x_config_vlan_mac(bp, &p); @@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; /* Do nothing if only driver cleanup was requested */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", + p->ramrod_flags); return 0; + } r->set_pending(r); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 658f4e33abf928..6a53c15c85a338 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -266,6 +266,13 @@ enum { BNX2X_DONT_CONSUME_CAM_CREDIT, BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, }; +/* When looking for matching filters, some flags are not interesting */ +#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ + 1 << BNX2X_ETH_MAC | \ + 1 << BNX2X_ISCSI_ETH_MAC | \ + 1 << BNX2X_NETQ_ETH_MAC) +#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ + ((flags) & BNX2X_VLAN_MAC_CMP_MASK) struct bnx2x_vlan_mac_ramrod_params { /* Object to run the command from */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2e46c28fc6019a..e7845e5be1c76f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) /* next state */ vfop->state = BNX2X_VFOP_RXMODE_DONE; + /* record the accept flags in vfdb so hypervisor can modify them + * if necessary + */ + bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = + ramrod->rx_accept_flags; vfop->rc = bnx2x_config_rx_mode(bp, ramrod); bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); op_err: @@ -1224,39 +1229,43 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) return; } +static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, + struct bnx2x_rx_mode_ramrod_params *ramrod, + struct bnx2x_virtf *vf, + unsigned long accept_flags) +{ + struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); + + memset(ramrod, 0, sizeof(*ramrod)); + ramrod->cid = vfq->cid; + ramrod->cl_id = vfq_cl_id(vf, vfq); + ramrod->rx_mode_obj = &bp->rx_mode_obj; + ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); + ramrod->rx_accept_flags = accept_flags; + ramrod->tx_accept_flags = accept_flags; + ramrod->pstate = &vf->filter_state; + ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + set_bit(RAMROD_RX, &ramrod->ramrod_flags); + set_bit(RAMROD_TX, &ramrod->ramrod_flags); + + ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); + ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); +} + int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd, int qid, unsigned long accept_flags) { - struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); if (vfop) { struct bnx2x_rx_mode_ramrod_params *ramrod = &vf->op_params.rx_mode; - memset(ramrod, 0, sizeof(*ramrod)); - - /* Prepare ramrod parameters */ - ramrod->cid = vfq->cid; - ramrod->cl_id = vfq_cl_id(vf, vfq); - ramrod->rx_mode_obj = &bp->rx_mode_obj; - ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); - - ramrod->rx_accept_flags = accept_flags; - ramrod->tx_accept_flags = accept_flags; - ramrod->pstate = &vf->filter_state; - ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; - - set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); - set_bit(RAMROD_RX, &ramrod->ramrod_flags); - set_bit(RAMROD_TX, &ramrod->ramrod_flags); - - ramrod->rdata = - bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); - ramrod->rdata_mapping = - bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); + bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, bnx2x_vfop_rxmode, cmd->done); @@ -3202,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp) bnx2x_iov_static_resc(bp, vf); } - /* prepare msix vectors in VF configuration space */ + /* prepare msix vectors in VF configuration space - the value in the + * PCI configuration space should be the index of the last entry, + * namely one less than the actual size of the table + */ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, - num_vf_queues); + num_vf_queues - 1); DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", - vf_idx, num_vf_queues); + vf_idx, num_vf_queues - 1); } bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); @@ -3436,10 +3448,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) { + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + struct bnx2x_queue_update_params *update_params; + struct pf_vf_bulletin_content *bulletin = NULL; + struct bnx2x_rx_mode_ramrod_params rx_ramrod; struct bnx2x *bp = netdev_priv(dev); - int rc, q_logical_state; + struct bnx2x_vlan_mac_obj *vlan_obj; + unsigned long vlan_mac_flags = 0; + unsigned long ramrod_flags = 0; struct bnx2x_virtf *vf = NULL; - struct pf_vf_bulletin_content *bulletin = NULL; + unsigned long accept_flags; + int rc; /* sanity and init */ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); @@ -3457,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) /* update PF's copy of the VF's bulletin. No point in posting the vlan * to the VF since it doesn't have anything to do with it. But it useful * to store it here in case the VF is not up yet and we can only - * configure the vlan later when it does. + * configure the vlan later when it does. Treat vlan id 0 as remove the + * Host tag. */ - bulletin->valid_bitmap |= 1 << VLAN_VALID; + if (vlan > 0) + bulletin->valid_bitmap |= 1 << VLAN_VALID; + else + bulletin->valid_bitmap &= ~(1 << VLAN_VALID); bulletin->vlan = vlan; /* is vf initialized and queue set up? */ - q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); - if (vf->state == VF_ENABLED && - q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { - /* configure the vlan in device on this vf's queue */ - unsigned long ramrod_flags = 0; - unsigned long vlan_mac_flags = 0; - struct bnx2x_vlan_mac_obj *vlan_obj = - &bnx2x_leading_vfq(vf, vlan_obj); - struct bnx2x_vlan_mac_ramrod_params ramrod_param; - struct bnx2x_queue_state_params q_params = {NULL}; - struct bnx2x_queue_update_params *update_params; + if (vf->state != VF_ENABLED || + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + return rc; - rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); - if (rc) - return rc; - memset(&ramrod_param, 0, sizeof(ramrod_param)); + /* configure the vlan in device on this vf's queue */ + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; - /* must lock vfpf channel to protect against vf flows */ - bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); - /* remove existing vlans */ - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, - &ramrod_flags); - if (rc) { - BNX2X_ERR("failed to delete vlans\n"); - rc = -EINVAL; - goto out; - } + /* remove existing vlans */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc) { + BNX2X_ERR("failed to delete vlans\n"); + rc = -EINVAL; + goto out; + } + + /* need to remove/add the VF's accept_any_vlan bit */ + accept_flags = bnx2x_leading_vfq(vf, accept_flags); + if (vlan) + clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + else + set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, + accept_flags); + bnx2x_leading_vfq(vf, accept_flags) = accept_flags; + bnx2x_config_rx_mode(bp, &rx_ramrod); + + /* configure the new vlan to device */ + memset(&ramrod_param, 0, sizeof(ramrod_param)); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + ramrod_param.vlan_mac_obj = vlan_obj; + ramrod_param.ramrod_flags = ramrod_flags; + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod_param.user_req.vlan_mac_flags); + ramrod_param.user_req.u.vlan.vlan = vlan; + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc) { + BNX2X_ERR("failed to configure vlan\n"); + rc = -EINVAL; + goto out; + } - /* send queue update ramrod to configure default vlan and silent - * vlan removal + /* send queue update ramrod to configure default vlan and silent + * vlan removal + */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &update_params->update_flags); + if (vlan == 0) { + /* if vlan is 0 then we want to leave the VF traffic + * untagged, and leave the incoming traffic untouched + * (i.e. do not remove any vlan tags). */ - __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - q_params.cmd = BNX2X_Q_CMD_UPDATE; - q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); - update_params = &q_params.params.update; - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + } else { + /* configure default vlan to vf queue and set silent + * vlan removal (the vf remains unaware of this vlan). + */ + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &update_params->update_flags); + update_params->def_vlan = vlan; + update_params->silent_removal_value = + vlan & VLAN_VID_MASK; + update_params->silent_removal_mask = VLAN_VID_MASK; + } - if (vlan == 0) { - /* if vlan is 0 then we want to leave the VF traffic - * untagged, and leave the incoming traffic untouched - * (i.e. do not remove any vlan tags). - */ - __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, - &update_params->update_flags); - __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, - &update_params->update_flags); - } else { - /* configure the new vlan to device */ - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - ramrod_param.vlan_mac_obj = vlan_obj; - ramrod_param.ramrod_flags = ramrod_flags; - ramrod_param.user_req.u.vlan.vlan = vlan; - ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; - rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc) { - BNX2X_ERR("failed to configure vlan\n"); - rc = -EINVAL; - goto out; - } - - /* configure default vlan to vf queue and set silent - * vlan removal (the vf remains unaware of this vlan). - */ - update_params = &q_params.params.update; - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, - &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, - &update_params->update_flags); - update_params->def_vlan = vlan; - } + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure default VLAN\n"); + goto out; + } - /* Update the Queue state */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Failed to configure default VLAN\n"); - goto out; - } - /* clear the flag indicating that this VF needs its vlan - * (will only be set if the HV configured the Vlan before vf was - * up and we were called because the VF came up later - */ + /* clear the flag indicating that this VF needs its vlan + * (will only be set if the HV configured the Vlan before vf was + * up and we were called because the VF came up later + */ out: - vf->cfg_flags &= ~VF_CFG_VLAN; - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); - } + vf->cfg_flags &= ~VF_CFG_VLAN; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 1ff6a9366629ed..8c213fa52174f9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -74,6 +74,7 @@ struct bnx2x_vf_queue { /* VLANs object */ struct bnx2x_vlan_mac_obj vlan_obj; atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ + unsigned long accept_flags; /* last accept flags configured */ /* Queue Slow-path State object */ struct bnx2x_queue_sp_obj sp_obj; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index efa8a151d78907..0756d7dabdd59a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) return -EINVAL; } - BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); + DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg); *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; @@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { unsigned long accept = 0; + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); /* covert VF-PF if mask to bnx2x accept flags */ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) @@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); /* A packet arriving the vf's mac should be accepted - * with any vlan + * with any vlan, unless a vlan has already been + * configured. */ - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); /* set rx-mode */ rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, @@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, goto response; } } + /* if vlan was set by hypervisor we don't allow guest to config vlan */ + if (bulletin->valid_bitmap & 1 << VLAN_VALID) { + int i; + + /* search for vlan filters */ + for (i = 0; i < filters->n_mac_vlan_filters; i++) { + if (filters->filters[i].flags & + VFPF_Q_FILTER_VLAN_TAG_VALID) { + BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", + vf->abs_vfid); + vf->op_rc = -EPERM; + goto response; + } + } + } /* verify vf_qid */ if (filters->vf_qid > vf_rxq_count(vf)) @@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; /* flags handled individually for backward/forward compatability */ + vf_op_params->rss_flags = 0; + vf_op_params->ramrod_flags = 0; + if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index f3dd93b4aeaac1..15a66e4b1f57a1 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) { u32 base = (u32) mapping & 0xffffffff; - return (base > 0xffffdcc0) && (base + len + 8 < base); + return base + len + 8 < base; } /* Test for TSO DMA buffers that cross into regions which are within MSS bytes diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 6c9308850453bf..56e0415f8cdff3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -228,6 +228,25 @@ struct tp_params { uint32_t dack_re; /* DACK timer resolution */ unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ + + u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ + u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + + /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a + * subset of the set of fields which may be present in the Compressed + * Filter Tuple portion of filters and TCP TCB connections. The + * fields which are present are controlled by the TP_VLAN_PRI_MAP. + * Since a variable number of fields may or may not be present, their + * shifted field positions within the Compressed Filter Tuple may + * vary, or not even be present if the field isn't selected in + * TP_VLAN_PRI_MAP. Since some of these fields are needed in various + * places we store their offsets here, or a -1 if the field isn't + * present. + */ + int vlan_shift; + int vnic_shift; + int port_shift; + int protocol_shift; }; struct vpd_params { @@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); +int t4_init_tp_params(struct adapter *adap); +int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); void t4_fatal_err(struct adapter *adapter); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d6b12e035a7d9f..fff02ed1295e5e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) if (stid >= 0) { t->stid_tab[stid].data = data; stid += t->stid_base; - t->stids_in_use++; + /* IPv6 requires max of 520 bits or 16 cells in TCAM + * This is equivalent to 4 TIDs. With CLIP enabled it + * needs 2 TIDs. + */ + if (family == PF_INET) + t->stids_in_use++; + else + t->stids_in_use += 4; } spin_unlock_bh(&t->stid_lock); return stid; @@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) } if (stid >= 0) { t->stid_tab[stid].data = data; - stid += t->stid_base; + stid -= t->nstids; + stid += t->sftid_base; t->stids_in_use++; } spin_unlock_bh(&t->stid_lock); @@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid); */ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) { - stid -= t->stid_base; + /* Is it a server filter TID? */ + if (t->nsftids && (stid >= t->sftid_base)) { + stid -= t->sftid_base; + stid += t->nstids; + } else { + stid -= t->stid_base; + } + spin_lock_bh(&t->stid_lock); if (family == PF_INET) __clear_bit(stid, t->stid_bmap); else bitmap_release_region(t->stid_bmap, stid, 2); t->stid_tab[stid].data = NULL; - t->stids_in_use--; + if (family == PF_INET) + t->stids_in_use--; + else + t->stids_in_use -= 4; spin_unlock_bh(&t->stid_lock); } EXPORT_SYMBOL(cxgb4_free_stid); @@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t) size_t size; unsigned int stid_bmap_size; unsigned int natids = t->natids; + struct adapter *adap = container_of(t, struct adapter, tids); stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); size = t->ntids * sizeof(*t->tid_tab) + @@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t) t->afree = t->atid_tab; } bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); + /* Reserve stid 0 for T4/T5 adapters */ + if (!t->stid_base && + (is_t4(adap->params.chip) || is_t5(adap->params.chip))) + __set_bit(0, t->stid_bmap); + return 0; } @@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> (adap->fn * 4)); - lli.filt_mode = adap->filter_mode; + lli.filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) lli.tx_modq[i] = i; @@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, adap = netdev2adap(dev); /* Adjust stid to correct filter index */ - stid -= adap->tids.nstids; + stid -= adap->tids.sftid_base; stid += adap->tids.nftids; /* Check to make sure the filter requested is writable ... @@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, f->fs.val.lip[i] = val[i]; f->fs.mask.lip[i] = ~0; } - if (adap->filter_mode & F_PORT) { + if (adap->params.tp.vlan_pri_map & F_PORT) { f->fs.val.iport = port; f->fs.mask.iport = mask; } } + if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { + f->fs.val.proto = IPPROTO_TCP; + f->fs.mask.proto = ~0; + } + f->fs.dirsteer = 1; f->fs.iq = queue; /* Mark filter as locked */ @@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, adap = netdev2adap(dev); /* Adjust stid to correct filter index */ - stid -= adap->tids.nstids; + stid -= adap->tids.sftid_base; stid += adap->tids.nftids; f = &adap->tids.ftid_tab[stid]; @@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap) enum dev_state state; u32 params[7], val[7]; struct fw_caps_config_cmd caps_cmd; - int reset = 1, j; + int reset = 1; /* * Contact FW, advertising Master capability (and potentially forcing @@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap) /* * These are finalized by FW initialization, load their values now. */ - v = t4_read_reg(adap, TP_TIMER_RESOLUTION); - adap->params.tp.tre = TIMERRESOLUTION_GET(v); - adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); t4_read_mtu_tbl(adap, adap->params.mtus, NULL); t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); - /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ - for (j = 0; j < NCHAN; j++) - adap->params.tp.tx_modq[j] = j; - - t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, - &adap->filter_mode, 1, - TP_VLAN_PRI_MAP); - + t4_init_tp_params(adap); adap->flags |= FW_OK; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 6f21f2451c3052..4dd0a82533e442 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) { - stid -= t->stid_base; + /* Is it a server filter TID? */ + if (t->nsftids && (stid >= t->sftid_base)) { + stid -= t->sftid_base; + stid += t->nstids; + } else { + stid -= t->stid_base; + } + return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 29878098101eb0..81e8402a74b41c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -45,6 +45,7 @@ #include "l2t.h" #include "t4_msg.h" #include "t4fw_api.h" +#include "t4_regs.h" #define VLAN_NONE 0xfff @@ -411,6 +412,40 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, } EXPORT_SYMBOL(cxgb4_l2t_get); +u64 cxgb4_select_ntuple(struct net_device *dev, + const struct l2t_entry *l2t) +{ + struct adapter *adap = netdev2adap(dev); + struct tp_params *tp = &adap->params.tp; + u64 ntuple = 0; + + /* Initialize each of the fields which we care about which are present + * in the Compressed Filter Tuple. + */ + if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) + ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; + + if (tp->port_shift >= 0) + ntuple |= (u64)l2t->lport << tp->port_shift; + + if (tp->protocol_shift >= 0) + ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; + + if (tp->vnic_shift >= 0) { + u32 viid = cxgb4_port_viid(dev); + u32 vf = FW_VIID_VIN_GET(viid); + u32 pf = FW_VIID_PFN_GET(viid); + u32 vld = FW_VIID_VIVLD_GET(viid); + + ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | + V_FT_VNID_ID_PF(pf) | + V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; + } + + return ntuple; +} +EXPORT_SYMBOL(cxgb4_select_ntuple); + /* * Called when address resolution fails for an L2T entry to handle packets * on the arpq head. If a packet specifies a failure handler it is invoked, diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index 108c0f1fce1c4e..85eb5c71358d80 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, const struct net_device *physdev, unsigned int priority); - +u64 cxgb4_select_ntuple(struct net_device *dev, + const struct l2t_entry *l2t); void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index cc380c36e1a868..cc3511a5cd0c0f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap) #undef READ_FL_BUF if (fl_small_pg != PAGE_SIZE || - (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || + (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || (fl_large_pg & (fl_large_pg-1)) != 0))) { dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", fl_small_pg, fl_large_pg); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 74a6fce5a15a69..e1413eacdbd20a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter) return 0; } +/** + * t4_init_tp_params - initialize adap->params.tp + * @adap: the adapter + * + * Initialize various fields of the adapter's TP Parameters structure. + */ +int t4_init_tp_params(struct adapter *adap) +{ + int chan; + u32 v; + + v = t4_read_reg(adap, TP_TIMER_RESOLUTION); + adap->params.tp.tre = TIMERRESOLUTION_GET(v); + adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); + + /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ + for (chan = 0; chan < NCHAN; chan++) + adap->params.tp.tx_modq[chan] = chan; + + /* Cache the adapter's Compressed Filter Mode and global Incress + * Configuration. + */ + t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, + &adap->params.tp.vlan_pri_map, 1, + TP_VLAN_PRI_MAP); + t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, + &adap->params.tp.ingress_config, 1, + TP_INGRESS_CONFIG); + + /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field + * shift positions of several elements of the Compressed Filter Tuple + * for this adapter which we need frequently ... + */ + adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); + adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); + adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); + adap->params.tp.protocol_shift = t4_filter_field_shift(adap, + F_PROTOCOL); + + /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID + * represents the presense of an Outer VLAN instead of a VNIC ID. + */ + if ((adap->params.tp.ingress_config & F_VNIC) == 0) + adap->params.tp.vnic_shift = -1; + + return 0; +} + +/** + * t4_filter_field_shift - calculate filter field shift + * @adap: the adapter + * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) + * + * Return the shift position of a filter field within the Compressed + * Filter Tuple. The filter field is specified via its selection bit + * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. + */ +int t4_filter_field_shift(const struct adapter *adap, int filter_sel) +{ + unsigned int filter_mode = adap->params.tp.vlan_pri_map; + unsigned int sel; + int field_shift; + + if ((filter_mode & filter_sel) == 0) + return -1; + + for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { + switch (filter_mode & sel) { + case F_FCOE: + field_shift += W_FT_FCOE; + break; + case F_PORT: + field_shift += W_FT_PORT; + break; + case F_VNIC_ID: + field_shift += W_FT_VNIC_ID; + break; + case F_VLAN: + field_shift += W_FT_VLAN; + break; + case F_TOS: + field_shift += W_FT_TOS; + break; + case F_PROTOCOL: + field_shift += W_FT_PROTOCOL; + break; + case F_ETHERTYPE: + field_shift += W_FT_ETHERTYPE; + break; + case F_MACMATCH: + field_shift += W_FT_MACMATCH; + break; + case F_MPSHITTYPE: + field_shift += W_FT_MPSHITTYPE; + break; + case F_FRAGMENTATION: + field_shift += W_FT_FRAGMENTATION; + break; + } + } + return field_shift; +} + int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) { u8 addr[6]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 0a8205d69d2c29..4082522d81408b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1171,10 +1171,50 @@ #define A_TP_TX_SCHED_PCMD 0x25 +#define S_VNIC 11 +#define V_VNIC(x) ((x) << S_VNIC) +#define F_VNIC V_VNIC(1U) + +#define S_FRAGMENTATION 9 +#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) +#define F_FRAGMENTATION V_FRAGMENTATION(1U) + +#define S_MPSHITTYPE 8 +#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) +#define F_MPSHITTYPE V_MPSHITTYPE(1U) + +#define S_MACMATCH 7 +#define V_MACMATCH(x) ((x) << S_MACMATCH) +#define F_MACMATCH V_MACMATCH(1U) + +#define S_ETHERTYPE 6 +#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) +#define F_ETHERTYPE V_ETHERTYPE(1U) + +#define S_PROTOCOL 5 +#define V_PROTOCOL(x) ((x) << S_PROTOCOL) +#define F_PROTOCOL V_PROTOCOL(1U) + +#define S_TOS 4 +#define V_TOS(x) ((x) << S_TOS) +#define F_TOS V_TOS(1U) + +#define S_VLAN 3 +#define V_VLAN(x) ((x) << S_VLAN) +#define F_VLAN V_VLAN(1U) + +#define S_VNIC_ID 2 +#define V_VNIC_ID(x) ((x) << S_VNIC_ID) +#define F_VNIC_ID V_VNIC_ID(1U) + #define S_PORT 1 #define V_PORT(x) ((x) << S_PORT) #define F_PORT V_PORT(1U) +#define S_FCOE 0 +#define V_FCOE(x) ((x) << S_FCOE) +#define F_FCOE V_FCOE(1U) + #define NUM_MPS_CLS_SRAM_L_INSTANCES 336 #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 @@ -1213,4 +1253,37 @@ #define V_CHIPID(x) ((x) << S_CHIPID) #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) +/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the + * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP + * selects for a particular field being present. These fields, when present + * in the Compressed Filter Tuple, have the following widths in bits. + */ +#define W_FT_FCOE 1 +#define W_FT_PORT 3 +#define W_FT_VNIC_ID 17 +#define W_FT_VLAN 17 +#define W_FT_TOS 8 +#define W_FT_PROTOCOL 8 +#define W_FT_ETHERTYPE 16 +#define W_FT_MACMATCH 9 +#define W_FT_MPSHITTYPE 3 +#define W_FT_FRAGMENTATION 1 + +/* Some of the Compressed Filter Tuple fields have internal structure. These + * bit shifts/masks describe those structures. All shifts are relative to the + * base position of the fields within the Compressed Filter Tuple + */ +#define S_FT_VLAN_VLD 16 +#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD) +#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U) + +#define S_FT_VNID_ID_VF 0 +#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF) + +#define S_FT_VNID_ID_PF 7 +#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF) + +#define S_FT_VNID_ID_VLD 16 +#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD) + #endif /* __T4_REGS_H */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 5878df619b531a..4ccaf9af6fc90c 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev) #define BE3_MAX_RSS_QS 16 #define BE3_MAX_TX_QS 16 #define BE3_MAX_EVT_QS 16 +#define BE3_SRIOV_MAX_EVT_QS 8 #define MAX_RX_QS 32 #define MAX_EVT_QS 32 @@ -480,7 +481,7 @@ struct be_adapter { struct list_head entry; u32 flash_status; - struct completion flash_compl; + struct completion et_cmd_compl; struct be_resources res; /* resources available for the func */ u16 num_vfs; /* Number of VFs provisioned by PF */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index e0e8bc1ef14c47..94c35c8d799d9f 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter, subsystem = resp_hdr->subsystem; } + if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && + subsystem == CMD_SUBSYSTEM_LOWLEVEL) { + complete(&adapter->et_cmd_compl); + return 0; + } + if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || (opcode == OPCODE_COMMON_WRITE_OBJECT)) && (subsystem == CMD_SUBSYSTEM_COMMON)) { adapter->flash_status = compl_status; - complete(&adapter->flash_compl); + complete(&adapter->et_cmd_compl); } if (compl_status == MCC_STATUS_SUCCESS) { @@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 0x3ea83c02, 0x4a110304}; int status; + if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) + return 0; + if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, be_mcc_notify(adapter); spin_unlock_bh(&adapter->mcc_lock); - if (!wait_for_completion_timeout(&adapter->flash_compl, + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(60000))) status = -1; else @@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, be_mcc_notify(adapter); spin_unlock_bh(&adapter->mcc_lock); - if (!wait_for_completion_timeout(&adapter->flash_compl, - msecs_to_jiffies(40000))) + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, + msecs_to_jiffies(40000))) status = -1; else status = adapter->flash_status; @@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, { struct be_mcc_wrb *wrb; struct be_cmd_req_loopback_test *req; + struct be_cmd_resp_loopback_test *resp; int status; spin_lock_bh(&adapter->mcc_lock); @@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); - req->hdr.timeout = cpu_to_le32(4); + req->hdr.timeout = cpu_to_le32(15); req->pattern = cpu_to_le64(pattern); req->src_port = cpu_to_le32(port_num); req->dest_port = cpu_to_le32(port_num); @@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, req->num_pkts = cpu_to_le32(num_pkts); req->loopback_type = cpu_to_le32(loopback_type); - status = be_mcc_notify_wait(adapter); - if (!status) { - struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); - status = le32_to_cpu(resp->status); - } + be_mcc_notify(adapter); + + spin_unlock_bh(&adapter->mcc_lock); + wait_for_completion(&adapter->et_cmd_compl); + resp = embedded_payload(wrb); + status = le32_to_cpu(resp->status); + + return status; err: spin_unlock_bh(&adapter->mcc_lock); return status; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0fde69d5cb6afd..a37039d353c57e 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1776,6 +1776,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; struct be_queue_info *rxq = &rxo->q; struct page *pagep = NULL; + struct device *dev = &adapter->pdev->dev; struct be_eth_rx_d *rxd; u64 page_dmaaddr = 0, frag_dmaaddr; u32 posted, page_offset = 0; @@ -1788,9 +1789,15 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) rx_stats(rxo)->rx_post_fail++; break; } - page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep, - 0, adapter->big_page_size, + page_dmaaddr = dma_map_page(dev, pagep, 0, + adapter->big_page_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, page_dmaaddr)) { + put_page(pagep); + pagep = NULL; + rx_stats(rxo)->rx_post_fail++; + break; + } page_info->page_offset = 0; } else { get_page(pagep); @@ -2744,13 +2751,16 @@ static int be_rx_qs_create(struct be_adapter *adapter) if (!BEx_chip(adapter)) adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | RSS_ENABLE_UDP_IPV6; + } else { + /* Disable RSS, if only default RX Q is created */ + adapter->rss_flags = RSS_ENABLE_NONE; + } - rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, - 128); - if (rc) { - adapter->rss_flags = 0; - return rc; - } + rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, + 128); + if (rc) { + adapter->rss_flags = RSS_ENABLE_NONE; + return rc; } /* First time posting */ @@ -3124,11 +3134,11 @@ static void BEx_get_resources(struct be_adapter *adapter, { struct pci_dev *pdev = adapter->pdev; bool use_sriov = false; + int max_vfs; - if (BE3_chip(adapter) && sriov_want(adapter)) { - int max_vfs; + max_vfs = pci_sriov_get_totalvfs(pdev); - max_vfs = pci_sriov_get_totalvfs(pdev); + if (BE3_chip(adapter) && sriov_want(adapter)) { res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; use_sriov = res->max_vfs; } @@ -3159,7 +3169,11 @@ static void BEx_get_resources(struct be_adapter *adapter, BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; res->max_rx_qs = res->max_rss_qs + 1; - res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; + if (be_physfn(adapter)) + res->max_evt_qs = (max_vfs > 0) ? + BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; + else + res->max_evt_qs = 1; res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) @@ -4205,7 +4219,7 @@ static int be_ctrl_init(struct be_adapter *adapter) spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); - init_completion(&adapter->flash_compl); + init_completion(&adapter->et_cmd_compl); pci_save_state(adapter->pdev); return 0; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e7c8b749c5a53f..50bb71c663e20a 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* If this was the last BD in the ring, start at the beginning again. */ bdp = fec_enet_get_nextdesc(bdp, fep); + skb_tx_timestamp(skb); + fep->cur_tx = bdp; if (fep->cur_tx == fep->dirty_tx) @@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Trigger transmission start */ writel(0, fep->hwp + FEC_X_DES_ACTIVE); - skb_tx_timestamp(skb); - return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 895450e9bb3cfe..ff2d806eaef71b 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) e1000_release_phy_80003es2lan(hw); /* Disable IBIST slave mode (far-end loopback) */ - e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - &kum_reg_data); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (ret_val) + return ret_val; kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, kum_reg_data); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8d3945ab733484..6d14eea1791846 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7015,13 +7015,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); -#ifdef CONFIG_PM static const struct dev_pm_ops e1000_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, e1000_idle) }; -#endif /* PCI Device API Driver */ static struct pci_driver e1000_driver = { @@ -7029,11 +7027,9 @@ static struct pci_driver e1000_driver = { .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = e1000_remove, -#ifdef CONFIG_PM .driver = { .pm = &e1000_pm_ops, }, -#endif .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler }; diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index da2be59505c062..20e71f4ca4261f 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, * it across the board. */ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); - if (ret_val) + if (ret_val) { /* If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ - udelay(usec_interval); + if (usec_interval >= 1000) + msleep(usec_interval / 1000); + else + udelay(usec_interval); + } ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; if (phy_status & BMSR_LSTATUS) break; if (usec_interval >= 1000) - mdelay(usec_interval / 1000); + msleep(usec_interval / 1000); else udelay(usec_interval); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cc06854296a379..5bcc870f8367f8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) return __ixgbe_maybe_stop_tx(tx_ring, size); } -#ifdef IXGBE_FCOE -static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { + struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; +#ifdef IXGBE_FCOE struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; int txq; +#endif + + if (fwd_adapter) + return skb->queue_mapping + fwd_adapter->tx_base_queue; + +#ifdef IXGBE_FCOE /* * only execute the code below if protocol is FCoE @@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) txq -= f->indices; return txq + f->offset; +#else + return __netdev_pick_tx(dev, skb); +#endif } -#endif netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) @@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) kfree(fwd_adapter); } -static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, - struct net_device *dev, - void *priv) -{ - struct ixgbe_fwd_adapter *fwd_adapter = priv; - unsigned int queue; - struct ixgbe_ring *tx_ring; - - queue = skb->queue_mapping + fwd_adapter->tx_base_queue; - tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; - - return __ixgbe_xmit_frame(skb, dev, tx_ring); -} - static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, -#ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, -#endif .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, }; /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d6f0c0d8cf11dd..72084f70adbba8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) { struct ixgbe_adapter *adapter = pci_get_drvdata(dev); int err; +#ifdef CONFIG_PCI_IOV u32 current_flags = adapter->flags; +#endif err = ixgbe_disable_sriov(adapter); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a6c1f76d8e044..ec94a20d709952 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev) } static u16 -ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) +ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { /* we are currently only using the first queue */ return 0; diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 7354960b583bc8..c4eeb69a5beee6 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus) if (time_is_before_jiffies(end)) ++timedout; } else { + /* wait_event_timeout does not guarantee a delay of at + * least one whole jiffie, so timeout must be no less + * than two. + */ + if (timeout < 2) + timeout = 2; wait_event_timeout(dev->smi_busy_wait, orion_mdio_smi_is_done(dev), timeout); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f54ebd5a170245..a7fcd593b2dbb3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk } } -u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f3758de59c05f5..d5758adceaa2f2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); -u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index 346a4e025c3410..04b3ec1352f1eb 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -52,7 +52,6 @@ #include #include -#include #include #include #include diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 7692dfd4f262db..cc68657f05368d 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter, u32 seq_number; u8 vhdr_len = 0; - if (unlikely(ring > adapter->max_rds_rings)) + if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = netxen_get_lro_sts_refhandle(sts_data0); - if (unlikely(index > rds_ring->num_desc)) + if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 631ea0ac1cd89c..f2a7c7166e2408 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -487,6 +487,7 @@ struct qlcnic_hardware_context { struct qlcnic_mailbox *mailbox; u8 extend_lb_time; u8 phys_port_id[ETH_ALEN]; + u8 lb_mode; }; struct qlcnic_adapter_stats { @@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring { dma_addr_t phys_addr; dma_addr_t hw_cons_phys_addr; struct netdev_queue *txq; + /* Lock to protect Tx descriptors cleanup */ + spinlock_t tx_clean_lock; } ____cacheline_internodealigned_in_smp; /* @@ -808,6 +811,7 @@ struct qlcnic_mac_list_s { #define QLCNIC_ILB_MODE 0x1 #define QLCNIC_ELB_MODE 0x2 +#define QLCNIC_LB_MODE_MASK 0x3 #define QLCNIC_LINKEVENT 0x1 #define QLCNIC_LB_RESPONSE 0x2 @@ -1093,7 +1097,6 @@ struct qlcnic_adapter { struct qlcnic_filter_hash rx_fhash; struct list_head vf_mc_list; - spinlock_t tx_clean_lock; spinlock_t mac_learn_lock; /* spinlock for catching rcv filters for eswitch traffic */ spinlock_t rx_mac_learn_lock; @@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); +void qlcnic_update_stats(struct qlcnic_adapter *); /* Adapter hardware abstraction */ struct qlcnic_hardware_ops { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 6055d397a29edf..f776f99f79155e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1684,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) } } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); - /* Make sure carrier is off and queue is stopped during loopback */ - if (netif_running(netdev)) { - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - } - ret = qlcnic_do_lb_test(adapter, mode); qlcnic_83xx_clear_lb_mode(adapter, mode); @@ -2121,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, ahw->link_autoneg = MSB(MSW(data[3])); ahw->module_type = MSB(LSW(data[3])); ahw->has_link_events = 1; + ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK; qlcnic_advert_link_change(adapter, link_status); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index e3be2760665cd9..6b08194aa0d490 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) -static inline int qlcnic_82xx_statistics(void) +static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) { - return ARRAY_SIZE(qlcnic_device_gstrings_stats) + - ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + return ARRAY_SIZE(qlcnic_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; } -static inline int qlcnic_83xx_statistics(void) +static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) { - return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + + return ARRAY_SIZE(qlcnic_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + - ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); + ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; } static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) { - if (qlcnic_82xx_check(adapter)) - return qlcnic_82xx_statistics(); - else if (qlcnic_83xx_check(adapter)) - return qlcnic_83xx_statistics(); - else - return -1; + int len = -1; + + if (qlcnic_82xx_check(adapter)) { + len = qlcnic_82xx_statistics(adapter); + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + len += ARRAY_SIZE(qlcnic_device_gstrings_stats); + } else if (qlcnic_83xx_check(adapter)) { + len = qlcnic_83xx_statistics(adapter); + } + + return len; } #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 @@ -920,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev) static int qlcnic_get_sset_count(struct net_device *dev, int sset) { - int len; struct qlcnic_adapter *adapter = netdev_priv(dev); switch (sset) { case ETH_SS_TEST: return QLCNIC_TEST_LEN; case ETH_SS_STATS: - len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || - qlcnic_83xx_check(adapter)) - return len; - return qlcnic_82xx_statistics(); + return qlcnic_dev_statistics_len(adapter); default: return -EOPNOTSUPP; } @@ -1267,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) return data; } -static void qlcnic_update_stats(struct qlcnic_adapter *adapter) +void qlcnic_update_stats(struct qlcnic_adapter *adapter) { struct qlcnic_host_tx_ring *tx_ring; int ring; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index e9c21e5d0ca95c..c4262c23ed7c77 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, struct qlcnic_skb_frag *buffrag; int i, j; + spin_lock(&tx_ring->tx_clean_lock); + cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; @@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, } cmd_buf++; } + + spin_unlock(&tx_ring->tx_clean_lock); } void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index eda6c691d89704..ad1531ae3aa8f8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -689,6 +689,10 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) adapter->ahw->linkup = 0; netif_carrier_off(netdev); } else if (!adapter->ahw->linkup && linkup) { + /* Do not advertise Link up if the port is in loopback mode */ + if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) + return; + netdev_info(netdev, "NIC Link is up\n"); adapter->ahw->linkup = 1; netif_carrier_on(netdev); @@ -778,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, struct net_device *netdev = adapter->netdev; struct qlcnic_skb_frag *frag; - if (!spin_trylock(&adapter->tx_clean_lock)) + if (!spin_trylock(&tx_ring->tx_clean_lock)) return 1; sw_consumer = tx_ring->sw_consumer; @@ -807,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, break; } + tx_ring->sw_consumer = sw_consumer; + if (count && netif_running(netdev)) { - tx_ring->sw_consumer = sw_consumer; smp_mb(); if (netif_tx_queue_stopped(tx_ring->txq) && netif_carrier_ok(netdev)) { @@ -834,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); + + spin_unlock(&tx_ring->tx_clean_lock); return done; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 2c8cac0c6a55a7..550791b8fbae98 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1756,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) if (qlcnic_sriov_vf_check(adapter)) qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); smp_mb(); - spin_lock(&adapter->tx_clean_lock); netif_carrier_off(netdev); adapter->ahw->linkup = 0; netif_tx_disable(netdev); @@ -1777,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) for (ring = 0; ring < adapter->drv_tx_rings; ring++) qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); - spin_unlock(&adapter->tx_clean_lock); } /* Usage: During suspend and firmware recovery module */ @@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, } memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); tx_ring->cmd_buf_arr = cmd_buf_arr; + spin_lock_init(&tx_ring->tx_clean_lock); } if (qlcnic_83xx_check(adapter) || @@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) rwlock_init(&adapter->ahw->crb_lock); mutex_init(&adapter->ahw->mem_lock); - spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); qlcnic_register_dcb(adapter); @@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_update_stats(adapter); + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; stats->tx_packets = adapter->stats.xmitfinished; stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 686f460b15022b..024f8161d2fe1c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, num_vfs = sriov->num_vfs; max = num_vfs + 1; info->bit_offsets = 0xffff; - info->max_tx_ques = res->num_tx_queues / max; info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; @@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, info->max_tx_mac_filters = temp; info->min_tx_bw = 0; info->max_tx_bw = MAX_BW; + info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; } else { id = qlcnic_sriov_func_to_index(adapter, func); if (id < 0) @@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, info->max_tx_bw = vp->max_tx_bw; info->max_rx_ucast_mac_filters = num_vf_macs; info->max_tx_mac_filters = num_vf_macs; + info->max_tx_ques = QLCNIC_SINGLE_RING; } info->max_rx_ip_addr = res->num_destip / max; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 449f506d2e8ff3..f705aeeba767ee 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4765,6 +4765,8 @@ static int qlge_probe(struct pci_dev *pdev, NETIF_F_RXCSUM; ndev->features = ndev->hw_features; ndev->vlan_features = ndev->hw_features; + /* vlan gets same features (except vlan filter) */ + ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; if (test_bit(QL_DMA64, &qdev->flags)) ndev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8a7a23a84ac5c3..797b56a0efc4a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; - if (netif_msg_hw(priv)) { - if (priv->dma_cap.time_stamp) { - pr_debug("IEEE 1588-2002 Time Stamp supported\n"); - priv->adv_ts = 0; - } - if (priv->dma_cap.atime_stamp && priv->extend_desc) { - pr_debug - ("IEEE 1588-2008 Advanced Time Stamp supported\n"); - priv->adv_ts = 1; - } - } + priv->adv_ts = 0; + if (priv->dma_cap.atime_stamp && priv->extend_desc) + priv->adv_ts = 1; + + if (netif_msg_hw(priv) && priv->dma_cap.time_stamp) + pr_debug("IEEE 1588-2002 Time Stamp supported\n"); + + if (netif_msg_hw(priv) && priv->adv_ts) + pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); priv->hw->ptp = &stmmac_ptp; priv->hwts_tx_en = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b8b0eeed0f92bb..7680581ebe12fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) priv->hw->ptp->config_addend(priv->ioaddr, addend); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; } @@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 5120d9ce1dd4cd..5330fd298705e0 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) mac_control |= BIT(15); + else if (phy->speed == 10) + mac_control |= BIT(18); /* In Band mode */ *link = true; } else { @@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev) while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, - dev_name(priv->dev), priv)) { + dev_name(&pdev->dev), priv)) { dev_err(priv->dev, "error attaching irq\n"); goto clean_ale_ret; } diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 628b736e5ae776..0e9fb3301b1136 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) } /* Return subqueue id on this core (one per core). */ -static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { return smp_processor_id(); } diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index cce6c4bc556a97..ef312bc6b8658d 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1618,6 +1618,7 @@ static void rhine_reset_task(struct work_struct *work) goto out_unlock; napi_disable(&rp->napi); + netif_tx_disable(dev); spin_lock_bh(&rp->lock); /* clear all descriptors */ diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 3169252613faae..5d78c1d08abd60 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case HDLCDRVCTL_CALIBRATE: if(!capable(CAP_SYS_RAWIO)) return -EPERM; + if (bi.data.calibrate > INT_MAX / s->par.bitrate) + return -EINVAL; s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; return 0; diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 1971411574db1c..61dd2447e1bb4e 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; case SIOCYAMGCFG: + memset(&yi, 0, sizeof(yi)); yi.cfg.mask = 0xffffffff; yi.cfg.iobase = yp->iobase; yi.cfg.irq = yp->irq; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f8135725bcf678..71baeb3ed905cf 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, struct sk_buff *skb; net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; - if (!net) { - netdev_err(net, "got receive callback but net device" - " not initialized yet\n"); + if (!net || net->reg_state != NETREG_REGISTERED) { packet->status = NVSP_STAT_FAIL; return 0; } @@ -435,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev, SET_ETHTOOL_OPS(net, ðtool_ops); SET_NETDEV_DEV(net, &dev->device); - ret = register_netdev(net); - if (ret != 0) { - pr_err("Unable to register netdev.\n"); - free_netdev(net); - goto out; - } - /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - unregister_netdev(net); free_netdev(net); hv_set_drvdata(dev, NULL); return ret; @@ -456,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev, netif_carrier_on(net); -out: + ret = register_netdev(net); + if (ret != 0) { + pr_err("Unable to register netdev.\n"); + rndis_filter_device_remove(dev); + free_netdev(net); + } + return ret; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index acf93798dc6759..bc8faaec33f5af 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, if (vlan->fwd_priv) { skb->dev = vlan->lowerdev; - ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); + ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); } else { ret = macvlan_queue_xmit(skb, dev); } @@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = { .cache_update = eth_header_cache_update, }; +static struct rtnl_link_ops macvlan_link_ops; + static int macvlan_open(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev) goto hash_add; } - if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { + if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD && + dev->rtnl_link_ops == &macvlan_link_ops) { vlan->fwd_priv = lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); @@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev) */ if (IS_ERR_OR_NULL(vlan->fwd_priv)) { vlan->fwd_priv = NULL; - } else { - dev->features &= ~NETIF_F_LLTX; + } else return 0; - } } err = -EBUSY; @@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, netdev_features_t features) { struct macvlan_dev *vlan = netdev_priv(dev); + netdev_features_t mask; + + features |= NETIF_F_ALL_FOR_ALL; + features &= (vlan->set_features | ~MACVLAN_FEATURES); + mask = features; + + features = netdev_increment_features(vlan->lowerdev->features, + features, + mask); + features |= NETIF_F_LLTX; - return features & (vlan->set_features | ~MACVLAN_FEATURES); + return features; } static const struct ethtool_ops macvlan_ethtool_ops = { @@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused, break; case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { - vlan->dev->features = dev->features & MACVLAN_FEATURES; vlan->dev->gso_max_size = dev->gso_max_size; - netdev_features_change(vlan->dev); + netdev_update_features(vlan->dev); } break; case NETDEV_UNREGISTER: diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 36c6994436b7ce..98434b84f0415e 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev) int err = 0; atomic_set(&phydev->irq_disable, 0); - if (request_irq(phydev->irq, phy_interrupt, - IRQF_SHARED, - "phy_interrupt", - phydev) < 0) { + if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", + phydev) < 0) { pr_warn("%s: Can't get IRQ %d (PHY)\n", phydev->bus->name, phydev->irq); phydev->irq = PHY_POLL; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 736050d6b4516b..b75ae5bde6734c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7c8343a4f91823..ecec8029c5e84c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -348,7 +348,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, * different rxq no. here. If we could not get rxhash, then we would * hope the rxq no. may help here. */ -static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct tun_struct *tun = netdev_priv(dev); struct tun_flow_entry *e; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 85e4a01670f06e..47b0f732b0b10d 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM module will be called cdc_mbim. config USB_NET_DM9601 - tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" + tristate "Davicom DM96xx based USB 10/100 ethernet devices" depends on USB_USBNET select CRC32 help - This option adds support for Davicom DM9601 based USB 1.1 - 10/100 Ethernet adapters. + This option adds support for Davicom DM9601/DM9620/DM9621A + based USB 10/100 Ethernet adapters. config USB_NET_SR9700 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index c6867f926cffc1..e8021987773097 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -1,5 +1,5 @@ /* - * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices + * Davicom DM96xx USB 10/100Mbps ethernet devices * * Peter Korsgaard * @@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &dm9601_ethtool_ops; dev->net->hard_header_len += DM_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; - dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; + + /* dm9620/21a require room for 4 byte padding, even in dm9601 + * mode, so we need +1 to be able to receive full size + * ethernet frames. + */ + dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1; dev->mii.dev = dev->net; dev->mii.mdio_read = dm9601_mdio_read; @@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { - int len; + int len, pad; /* format: b1: packet length low @@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, b3..n: packet data */ - len = skb->len; + len = skb->len + DM_TX_OVERHEAD; - if (skb_headroom(skb) < DM_TX_OVERHEAD) { + /* workaround for dm962x errata with tx fifo getting out of + * sync if a USB bulk transfer retry happens right after a + * packet with odd / maxpacket length by adding up to 3 bytes + * padding. + */ + while ((len & 1) || !(len % dev->maxpacket)) + len++; + + len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */ + pad = len - skb->len; + + if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) { struct sk_buff *skb2; - skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); + skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) @@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, __skb_push(skb, DM_TX_OVERHEAD); - /* usbnet adds padding if length is a multiple of packet size - if so, adjust length value in header */ - if ((skb->len % dev->maxpacket) == 0) - len++; + if (pad) { + memset(skb->data + skb->len, 0, pad); + __skb_put(skb, pad); + } skb->data[0] = len; skb->data[1] = len >> 8; @@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev) } static const struct driver_info dm9601_info = { - .description = "Davicom DM9601 USB Ethernet", + .description = "Davicom DM96xx USB 10/100 Ethernet", .flags = FLAG_ETHER | FLAG_LINK_INTR, .bind = dm9601_bind, .rx_fixup = dm9601_rx_fixup, @@ -594,6 +610,22 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, + { + USB_DEVICE(0x0a46, 0x9622), /* DM9622 USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, + { + USB_DEVICE(0x0a46, 0x0269), /* DM962OA USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, + { + USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; @@ -612,5 +644,5 @@ static struct usb_driver dm9601_driver = { module_usb_driver(dm9601_driver); MODULE_AUTHOR("Peter Korsgaard "); -MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); +MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 86292e6aaf4955..1a482344b3f507 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -185,7 +185,6 @@ enum rx_ctrl_state{ #define BM_REQUEST_TYPE (0xa1) #define B_NOTIFICATION (0x20) #define W_VALUE (0x0) -#define W_INDEX (0x2) #define W_LENGTH (0x2) #define B_OVERRUN (0x1<<6) @@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb) struct uart_icount *icount; struct hso_serial_state_notification *serial_state_notification; struct usb_device *usb; + int if_num; /* Sanity checks */ if (!serial) @@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb) handle_usb_error(status, __func__, serial->parent); return; } + + /* tiocmget is only supported on HSO_PORT_MODEM */ tiocmget = serial->tiocmget; if (!tiocmget) return; + BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); + usb = serial->parent->usb; + if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; + + /* wIndex should be the USB interface number of the port to which the + * notification applies, which should always be the Modem port. + */ serial_state_notification = &tiocmget->serial_state_notification; if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || serial_state_notification->bNotification != B_NOTIFICATION || le16_to_cpu(serial_state_notification->wValue) != W_VALUE || - le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || + le16_to_cpu(serial_state_notification->wIndex) != if_num || le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { dev_warn(&usb->dev, "hso received invalid serial state notification\n"); diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 03832d3780aa61..f54637828574f6 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -117,7 +117,6 @@ enum { struct mcs7830_data { u8 multi_filter[8]; u8 config; - u8 link_counter; }; static const char driver_name[] = "MOSCHIP usb-ethernet driver"; @@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb) { u8 *buf = urb->transfer_buffer; bool link, link_changed; - struct mcs7830_data *data = mcs7830_get_data(dev); if (urb->actual_length < 16) return; - link = !(buf[1] & 0x20); + link = !(buf[1] == 0x20); link_changed = netif_carrier_ok(dev->net) != link; if (link_changed) { - data->link_counter++; - /* - track link state 20 times to guard against erroneous - link state changes reported sometimes by the chip - */ - if (data->link_counter > 20) { - data->link_counter = 0; - usbnet_link_change(dev, link, 0); - netdev_dbg(dev->net, "Link Status is: %d\n", link); - } - } else - data->link_counter = 0; + usbnet_link_change(dev, link, 0); + netdev_dbg(dev->net, "Link Status is: %d\n", link); + } } static const struct driver_info moschip_info = { diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 8494bb53ebdc9f..aba04f56176008 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1245,7 +1245,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) return -ENOMEM; urb->num_sgs = num_sgs; - sg_init_table(urb->sg, urb->num_sgs); + sg_init_table(urb->sg, urb->num_sgs + 1); sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb)); total_len += skb_headlen(skb); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d208f860498106..5d776447d9c33e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1797,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev) if (err) return err; - if (netif_running(vi->dev)) + if (netif_running(vi->dev)) { + for (i = 0; i < vi->curr_queue_pairs; i++) + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); + for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_enable(&vi->rq[i]); + } netif_device_attach(vi->dev); - for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); - mutex_lock(&vi->config_lock); vi->config_enable = true; mutex_unlock(&vi->config_lock); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 249e01c5600c90..ed384fee76ac9f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, /* update header length based on lower device */ dev->hard_header_len = lowerdev->hard_header_len + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); - } + } else if (use_ipv6) + vxlan->flags |= VXLAN_F_IPV6; if (data[IFLA_VXLAN_TOS]) vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 8d78253c26cee6..a366d6b4626f5f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) mask2 |= ATH9K_INT_CST; if (isr2 & AR_ISR_S2_TSFOOR) mask2 |= ATH9K_INT_TSFOOR; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { + REG_WRITE(ah, AR_ISR_S2, isr2); + isr &= ~AR_ISR_BCNMISC; + } } - isr = REG_READ(ah, AR_ISR_RAC); + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) + isr = REG_READ(ah, AR_ISR_RAC); + if (isr == 0xffffffff) { *masked = 0; return false; @@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) *masked |= ATH9K_INT_TX; - s0_s = REG_READ(ah, AR_ISR_S0_S); + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { + s0_s = REG_READ(ah, AR_ISR_S0_S); + s1_s = REG_READ(ah, AR_ISR_S1_S); + } else { + s0_s = REG_READ(ah, AR_ISR_S0); + REG_WRITE(ah, AR_ISR_S0, s0_s); + s1_s = REG_READ(ah, AR_ISR_S1); + REG_WRITE(ah, AR_ISR_S1, s1_s); + + isr &= ~(AR_ISR_TXOK | + AR_ISR_TXDESC | + AR_ISR_TXERR | + AR_ISR_TXEOL); + } + ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); - - s1_s = REG_READ(ah, AR_ISR_S1_S); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); } @@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) *masked |= mask2; } - if (AR_SREV_9100(ah)) - return true; - - if (isr & AR_ISR_GENTMR) { + if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) { u32 s5_s; - s5_s = REG_READ(ah, AR_ISR_S5_S); + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { + s5_s = REG_READ(ah, AR_ISR_S5_S); + } else { + s5_s = REG_READ(ah, AR_ISR_S5); + } + ah->intr_gen_timer_trigger = MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); @@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) if ((s5_s & AR_ISR_S5_TIM_TIMER) && !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) *masked |= ATH9K_INT_TIM_TIMER; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { + REG_WRITE(ah, AR_ISR_S5, s5_s); + isr &= ~AR_ISR_GENTMR; + } } + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { + REG_WRITE(ah, AR_ISR, isr); + REG_READ(ah, AR_ISR); + } + + if (AR_SREV_9100(ah)) + return true; + if (sync_cause) { ath9k_debug_sync_cause(common, sync_cause); fatal_int = diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 9a2657fdd9ccd4..608d739d137822 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif) struct ath9k_vif_iter_data *iter_data = data; int i; - for (i = 0; i < ETH_ALEN; i++) - iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); + if (iter_data->hw_macaddr != NULL) { + for (i = 0; i < ETH_ALEN; i++) + iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); + } else { + iter_data->hw_macaddr = mac; + } } -static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, +static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_vif_iter_data iter_data; /* - * Use the hardware MAC address as reference, the hardware uses it - * together with the BSSID mask when matching addresses. + * Pick the MAC address of the first interface as the new hardware + * MAC address. The hardware will use it together with the BSSID mask + * when matching addresses. */ - iter_data.hw_macaddr = common->macaddr; + iter_data.hw_macaddr = NULL; memset(&iter_data.mask, 0xff, ETH_ALEN); if (vif) @@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, ath9k_htc_bssid_iter, &iter_data); memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); + + if (iter_data.hw_macaddr) + memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN); + ath_hw_setbssidmask(common); } @@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, goto out; } - ath9k_htc_set_bssid_mask(priv, vif); + ath9k_htc_set_mac_bssid_mask(priv, vif); priv->vif_slot |= (1 << avp->index); priv->nvifs++; @@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, ath9k_htc_set_opmode(priv); - ath9k_htc_set_bssid_mask(priv, vif); + ath9k_htc_set_mac_bssid_mask(priv, vif); /* * Stop ANI only if there are no associated station interfaces. diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 74f452c7b1667c..21aa09e0e825df 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(ah); /* - * Use the hardware MAC address as reference, the hardware uses it - * together with the BSSID mask when matching addresses. + * Pick the MAC address of the first interface as the new hardware + * MAC address. The hardware will use it together with the BSSID mask + * when matching addresses. */ memset(iter_data, 0, sizeof(*iter_data)); memset(&iter_data->mask, 0xff, ETH_ALEN); diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 86605027c41d6b..e6272546395a98 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c72438bb2fafd2..a1b32ee9594a6b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, (hwsim_flags & HWSIM_TX_STAT_ACK)) { if (skb->len >= 16) { hdr = (struct ieee80211_hdr *) skb->data; - mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], + mac80211_hwsim_monitor_ack(data2->channel, hdr->addr2); } txi->flags |= IEEE80211_TX_STAT_ACK; diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 78e8a6666cc6ed..8bb8988c435cf0 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) } static u16 -mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) +mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { skb->priority = cfg80211_classify8021d(skb); return mwifiex_1d_to_wmm_queue[skb->priority]; diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 0f494444bcd1d9..5a53195d016b61 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) }; int index = rtlpci->rx_ring[rx_queue_idx].idx; + if (rtlpci->driver_is_goingto_unload) + return; /*RX NORMAL PKT */ while (count--) { /*rx descriptor */ @@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) */ set_hal_stop(rtlhal); + rtlpci->driver_is_goingto_unload = true; rtlpriv->cfg->ops->disable_interrupt(hw); cancel_work_sync(&rtlpriv->works.lps_change_work); @@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) ppsc->rfchange_inprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); - rtlpci->driver_is_goingto_unload = true; rtlpriv->cfg->ops->hw_disable(hw); /* some things are not needed if firmware not available */ if (!rtlpriv->max_fw_size) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 08ae01b41c832d..c47794b9d42f9b 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -101,6 +101,13 @@ struct xenvif_rx_meta { #define MAX_PENDING_REQS 256 +/* It's possible for an skb to have a maximal number of frags + * but still be less than MAX_BUFFER_OFFSET in size. Thus the + * worst-case number of copy operations is MAX_SKB_FRAGS per + * ring slot. + */ +#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) + struct xenvif { /* Unique identifier for this interface. */ domid_t domid; @@ -143,13 +150,13 @@ struct xenvif { */ RING_IDX rx_req_cons_peek; - /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each - * head/fragment page uses 2 copy operations because it - * straddles two buffers in the frontend. - */ - struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; - struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; + /* This array is allocated seperately as it is large */ + struct gnttab_copy *grant_copy_op; + /* We create one meta structure per ring request we consume, so + * the maximum number is the same as the ring size. + */ + struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; u8 fe_dev_addr[6]; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 870f1fa583702e..fff8cddfed816d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, SET_NETDEV_DEV(dev, parent); vif = netdev_priv(dev); + + vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * + MAX_GRANT_COPY_OPS); + if (vif->grant_copy_op == NULL) { + pr_warn("Could not allocate grant copy space for %s\n", name); + free_netdev(dev); + return ERR_PTR(-ENOMEM); + } + vif->domid = domid; vif->handle = handle; vif->can_sg = 1; @@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif) unregister_netdev(vif->dev); + vfree(vif->grant_copy_op); free_netdev(vif->dev); module_put(THIS_MODULE); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 27bbe58dcbe7bf..78425554a53757 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif) if (!npo.copy_prod) return; - BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); + BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { @@ -1209,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, goto out; if (!skb_partial_csum_set(skb, off, - offsetof(struct tcphdr, check))) + offsetof(struct tcphdr, check))) { + err = -EPROTO; goto out; + } if (recalculate_partial_csum) tcp_hdr(skb)->check = @@ -1227,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, goto out; if (!skb_partial_csum_set(skb, off, - offsetof(struct udphdr, check))) + offsetof(struct udphdr, check))) { + err = -EPROTO; goto out; + } if (recalculate_partial_csum) udp_hdr(skb)->check = @@ -1350,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, goto out; if (!skb_partial_csum_set(skb, off, - offsetof(struct tcphdr, check))) + offsetof(struct tcphdr, check))) { + err = -EPROTO; goto out; + } if (recalculate_partial_csum) tcp_hdr(skb)->check = @@ -1368,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, goto out; if (!skb_partial_csum_set(skb, off, - offsetof(struct udphdr, check))) + offsetof(struct udphdr, check))) { + err = -EPROTO; goto out; + } if (recalculate_partial_csum) udp_hdr(skb)->check = diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index de6f8990246fef..c6973f101a3e4a 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -20,7 +20,7 @@ config OF_SELFTEST depends on OF_IRQ help This option builds in test cases for the device tree infrastructure - that are executed one at boot time, and the results dumped to the + that are executed once at boot time, and the results dumped to the console. If unsure, say N here, but this option is safe to enable. diff --git a/drivers/of/address.c b/drivers/of/address.c index 4b9317bdb81ce8..d3dd41c840f1cd 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range, (unsigned long long)cp, (unsigned long long)s, (unsigned long long)da); - /* - * If the number of address cells is larger than 2 we assume the - * mapping doesn't specify a physical address. Rather, the address - * specifies an identifier that must match exactly. - */ - if (na > 2 && memcmp(range, addr, na * 4) != 0) - return OF_BAD_ADDR; - if (da < cp || da >= (cp + s)) return OF_BAD_ADDR; return da - cp; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 2fa024b97c4350..758b4f8b30b7d2 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -922,8 +922,16 @@ void __init unflatten_device_tree(void) */ void __init unflatten_and_copy_device_tree(void) { - int size = __be32_to_cpu(initial_boot_params->totalsize); - void *dt = early_init_dt_alloc_memory_arch(size, + int size; + void *dt; + + if (!initial_boot_params) { + pr_warn("No valid device tree found, continuing without\n"); + return; + } + + size = __be32_to_cpu(initial_boot_params->totalsize); + dt = early_init_dt_alloc_memory_arch(size, __alignof__(struct boot_param_header)); if (dt) { diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 786b0b47fae468..27212402c53247 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) if (of_get_property(ipar, "interrupt-controller", NULL) != NULL) { pr_debug(" -> got it !\n"); - of_node_put(old); return 0; } @@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) * Successfully parsed an interrrupt-map translation; copy new * interrupt specifier into the out_irq structure */ - of_node_put(out_irq->np); - out_irq->np = of_node_get(newpar); + out_irq->np = newpar; match_array = imap - newaddrsize - newintsize; for (i = 0; i < newintsize; i++) @@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) } fail: of_node_put(ipar); - of_node_put(out_irq->np); of_node_put(newpar); return -EINVAL; diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c index 7578d79b368831..2f650f68af14e4 100644 --- a/drivers/parport/parport_mfc3.c +++ b/drivers/parport/parport_mfc3.c @@ -300,7 +300,7 @@ static int __init parport_mfc3_init(void) if (!request_mem_region(piabase, sizeof(struct pia), "PIA")) continue; - pp = (struct pia *)ZTWO_VADDR(piabase); + pp = ZTWO_VADDR(piabase); pp->crb = 0; pp->pddrb = 255; /* all data pins output */ pp->crb = PIA_DDR|32|8; diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 96376152622962..76ee7750bc5ec2 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2600,8 +2600,6 @@ enum parport_pc_pci_cards { syba_2p_epp, syba_1p_ecp, titan_010l, - titan_1284p1, - titan_1284p2, avlab_1p, avlab_2p, oxsemi_952, @@ -2660,8 +2658,6 @@ static struct parport_pc_pci { /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } }, /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } }, /* titan_010l */ { 1, { { 3, -1 }, } }, - /* titan_1284p1 */ { 1, { { 0, 1 }, } }, - /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } }, /* avlab_1p */ { 1, { { 0, 1}, } }, /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} }, /* The Oxford Semi cards are unusual: 954 doesn't support ECP, @@ -2677,8 +2673,8 @@ static struct parport_pc_pci { /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, - /* netmos_9805 */ { 1, { { 0, -1 }, } }, - /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, + /* netmos_9805 */ { 1, { { 0, 1 }, } }, + /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } }, /* netmos_9901 */ { 1, { { 0, -1 }, } }, /* netmos_9865 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, @@ -2722,8 +2718,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L, PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l }, - { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 }, - { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 }, /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ /* AFAVLAB_TK9902 */ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, @@ -2827,16 +2821,12 @@ static int parport_pc_pci_probe(struct pci_dev *dev, if (irq == IRQ_NONE) { printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n", - parport_pc_pci_tbl[i + last_sio].vendor, - parport_pc_pci_tbl[i + last_sio].device, - io_lo, io_hi); + id->vendor, id->device, io_lo, io_hi); irq = PARPORT_IRQ_NONE; } else { printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n", - parport_pc_pci_tbl[i + last_sio].vendor, - parport_pc_pci_tbl[i + last_sio].device, - io_lo, io_hi, irq); + id->vendor, id->device, io_lo, io_hi, irq); } data->ports[count] = parport_pc_probe_port(io_lo, io_hi, irq, @@ -2866,8 +2856,6 @@ static void parport_pc_pci_remove(struct pci_dev *dev) struct pci_parport_data *data = pci_get_drvdata(dev); int i; - pci_set_drvdata(dev, NULL); - if (data) { for (i = data->num - 1; i >= 0; i--) parport_pc_unregister_port(data->ports[i]); diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c index 1b8bdb7e9bf40a..ff53314100f684 100644 --- a/drivers/parport/parport_serial.c +++ b/drivers/parport/parport_serial.c @@ -596,13 +596,11 @@ static int parport_serial_pci_probe(struct pci_dev *dev, err = pci_enable_device (dev); if (err) { - pci_set_drvdata (dev, NULL); kfree (priv); return err; } if (parport_register (dev, id)) { - pci_set_drvdata (dev, NULL); kfree (priv); return -ENODEV; } @@ -611,7 +609,6 @@ static int parport_serial_pci_probe(struct pci_dev *dev, int i; for (i = 0; i < priv->num_par; i++) parport_pc_unregister_port (priv->port[i]); - pci_set_drvdata (dev, NULL); kfree (priv); return -ENODEV; } @@ -624,8 +621,6 @@ static void parport_serial_pci_remove(struct pci_dev *dev) struct parport_serial_private *priv = pci_get_drvdata (dev); int i; - pci_set_drvdata(dev, NULL); - // Serial ports if (priv->serial) pciserial_remove_ports(priv->serial); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 1cf605f6767357..e86439283a5d19 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) { - acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); + if (status != AE_NOT_FOUND) + acpi_handle_warn(handle, + "can't evaluate _ADR (%#x)\n", status); return AE_OK; } @@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot) slot->flags &= (~SLOT_ENABLED); } +static bool acpiphp_no_hotplug(acpi_handle handle) +{ + struct acpi_device *adev = NULL; + + acpi_bus_get_device(handle, &adev); + return adev && adev->flags.no_hotplug; +} + +static bool slot_no_hotplug(struct acpiphp_slot *slot) +{ + struct acpiphp_func *func; + + list_for_each_entry(func, &slot->funcs, sibling) + if (acpiphp_no_hotplug(func_to_handle(func))) + return true; + + return false; +} /** * get_slot_status - get ACPI slot status @@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev) unsigned long long sta; status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); - alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL; + alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL) + || acpiphp_no_hotplug(handle); } if (!alive) { u32 v; @@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) struct pci_dev *dev, *tmp; mutex_lock(&slot->crit_sect); - /* wake up all functions */ - if (get_slot_status(slot) == ACPI_STA_ALL) { + if (slot_no_hotplug(slot)) { + ; /* do nothing */ + } else if (get_slot_status(slot) == ACPI_STA_ALL) { /* remove stale devices if any */ list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 577074efbe62f9..f7ebdba14bde9b 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) static void pci_acpi_setup(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - acpi_handle handle = ACPI_HANDLE(dev); - struct acpi_device *adev; + struct acpi_device *adev = ACPI_COMPANION(dev); - if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) + if (!adev) + return; + + pci_acpi_add_pm_notifier(adev, pci_dev); + if (!adev->wakeup.flags.valid) return; device_set_wakeup_capable(dev, true); acpi_pci_sleep_wake(pci_dev, false); - - pci_acpi_add_pm_notifier(adev, pci_dev); if (adev->wakeup.flags.run_wake) device_set_run_wake(dev, true); } static void pci_acpi_cleanup(struct device *dev) { - acpi_handle handle = ACPI_HANDLE(dev); - struct acpi_device *adev; + struct acpi_device *adev = ACPI_COMPANION(dev); + + if (!adev) + return; - if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { + pci_acpi_remove_pm_notifier(adev); + if (adev->wakeup.flags.valid) { device_set_wakeup_capable(dev, false); device_set_run_wake(dev, false); - pci_acpi_remove_pm_notifier(adev); } } diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c index ed3b522601b3a4..971991bab975a2 100644 --- a/drivers/pcmcia/bfin_cf_pcmcia.c +++ b/drivers/pcmcia/bfin_cf_pcmcia.c @@ -303,7 +303,7 @@ static int bfin_cf_remove(struct platform_device *pdev) static struct platform_driver bfin_cf_driver = { .driver = { - .name = (char *)driver_name, + .name = driver_name, .owner = THIS_MODULE, }, .probe = bfin_cf_probe, diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c index 1b206eac5f93be..5ea64d0f61abf8 100644 --- a/drivers/pcmcia/electra_cf.c +++ b/drivers/pcmcia/electra_cf.c @@ -359,7 +359,7 @@ MODULE_DEVICE_TABLE(of, electra_cf_match); static struct platform_driver electra_cf_driver = { .driver = { - .name = (char *)driver_name, + .name = driver_name, .owner = THIS_MODULE, .of_match_table = electra_cf_match, }, diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 330ef2d065670e..b901c472d7f3e2 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -21,6 +21,12 @@ config PHY_EXYNOS_MIPI_VIDEO Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P and EXYNOS SoCs. +config PHY_MVEBU_SATA + def_bool y + depends on ARCH_KIRKWOOD || ARCH_DOVE + depends on OF + select GENERIC_PHY + config OMAP_USB2 tristate "OMAP USB2 PHY Driver" depends on ARCH_OMAP2PLUS @@ -51,4 +57,10 @@ config PHY_EXYNOS_DP_VIDEO help Support for Display Port PHY found on Samsung EXYNOS SoCs. +config BCM_KONA_USB2_PHY + tristate "Broadcom Kona USB2 PHY Driver" + depends on GENERIC_PHY + help + Enable this to support the Broadcom Kona USB 2.0 PHY. + endmenu diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index d0caae9cfb830f..b57c25371cca48 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -3,7 +3,9 @@ # obj-$(CONFIG_GENERIC_PHY) += phy-core.o +obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o +obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o diff --git a/drivers/phy/phy-bcm-kona-usb2.c b/drivers/phy/phy-bcm-kona-usb2.c new file mode 100644 index 00000000000000..efc5c1a13a5d0c --- /dev/null +++ b/drivers/phy/phy-bcm-kona-usb2.c @@ -0,0 +1,158 @@ +/* + * phy-bcm-kona-usb2.c - Broadcom Kona USB2 Phy Driver + * + * Copyright (C) 2013 Linaro Limited + * Matt Porter + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define OTGCTL (0) +#define OTGCTL_OTGSTAT2 BIT(31) +#define OTGCTL_OTGSTAT1 BIT(30) +#define OTGCTL_PRST_N_SW BIT(11) +#define OTGCTL_HRESET_N BIT(10) +#define OTGCTL_UTMI_LINE_STATE1 BIT(9) +#define OTGCTL_UTMI_LINE_STATE0 BIT(8) + +#define P1CTL (8) +#define P1CTL_SOFT_RESET BIT(1) +#define P1CTL_NON_DRIVING BIT(0) + +struct bcm_kona_usb { + void __iomem *regs; +}; + +static void bcm_kona_usb_phy_power(struct bcm_kona_usb *phy, int on) +{ + u32 val; + + val = readl(phy->regs + OTGCTL); + if (on) { + /* Configure and power PHY */ + val &= ~(OTGCTL_OTGSTAT2 | OTGCTL_OTGSTAT1 | + OTGCTL_UTMI_LINE_STATE1 | OTGCTL_UTMI_LINE_STATE0); + val |= OTGCTL_PRST_N_SW | OTGCTL_HRESET_N; + } else { + val &= ~(OTGCTL_PRST_N_SW | OTGCTL_HRESET_N); + } + writel(val, phy->regs + OTGCTL); +} + +static int bcm_kona_usb_phy_init(struct phy *gphy) +{ + struct bcm_kona_usb *phy = phy_get_drvdata(gphy); + u32 val; + + /* Soft reset PHY */ + val = readl(phy->regs + P1CTL); + val &= ~P1CTL_NON_DRIVING; + val |= P1CTL_SOFT_RESET; + writel(val, phy->regs + P1CTL); + writel(val & ~P1CTL_SOFT_RESET, phy->regs + P1CTL); + /* Reset needs to be asserted for 2ms */ + mdelay(2); + writel(val | P1CTL_SOFT_RESET, phy->regs + P1CTL); + + return 0; +} + +static int bcm_kona_usb_phy_power_on(struct phy *gphy) +{ + struct bcm_kona_usb *phy = phy_get_drvdata(gphy); + + bcm_kona_usb_phy_power(phy, 1); + + return 0; +} + +static int bcm_kona_usb_phy_power_off(struct phy *gphy) +{ + struct bcm_kona_usb *phy = phy_get_drvdata(gphy); + + bcm_kona_usb_phy_power(phy, 0); + + return 0; +} + +static struct phy_ops ops = { + .init = bcm_kona_usb_phy_init, + .power_on = bcm_kona_usb_phy_power_on, + .power_off = bcm_kona_usb_phy_power_off, + .owner = THIS_MODULE, +}; + +static int bcm_kona_usb2_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bcm_kona_usb *phy; + struct resource *res; + struct phy *gphy; + struct phy_provider *phy_provider; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + phy->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(phy->regs)) + return PTR_ERR(phy->regs); + + platform_set_drvdata(pdev, phy); + + gphy = devm_phy_create(dev, &ops, NULL); + if (IS_ERR(gphy)) + return PTR_ERR(gphy); + + /* The Kona PHY supports an 8-bit wide UTMI interface */ + phy_set_bus_width(gphy, 8); + + phy_set_drvdata(gphy, phy); + + phy_provider = devm_of_phy_provider_register(dev, + of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); + + return 0; +} + +static const struct of_device_id bcm_kona_usb2_dt_ids[] = { + { .compatible = "brcm,kona-usb2-phy" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, bcm_kona_usb2_dt_ids); + +static struct platform_driver bcm_kona_usb2_driver = { + .probe = bcm_kona_usb2_probe, + .driver = { + .name = "bcm-kona-usb2", + .owner = THIS_MODULE, + .of_match_table = bcm_kona_usb2_dt_ids, + }, +}; + +module_platform_driver(bcm_kona_usb2_driver); + +MODULE_ALIAS("platform:bcm-kona-usb2"); +MODULE_AUTHOR("Matt Porter "); +MODULE_DESCRIPTION("BCM Kona USB 2.0 PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 58e0e973902873..645c867c12573e 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -94,19 +94,31 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node) int phy_pm_runtime_get(struct phy *phy) { + int ret; + if (!pm_runtime_enabled(&phy->dev)) return -ENOTSUPP; - return pm_runtime_get(&phy->dev); + ret = pm_runtime_get(&phy->dev); + if (ret < 0 && ret != -EINPROGRESS) + pm_runtime_put_noidle(&phy->dev); + + return ret; } EXPORT_SYMBOL_GPL(phy_pm_runtime_get); int phy_pm_runtime_get_sync(struct phy *phy) { + int ret; + if (!pm_runtime_enabled(&phy->dev)) return -ENOTSUPP; - return pm_runtime_get_sync(&phy->dev); + ret = pm_runtime_get_sync(&phy->dev); + if (ret < 0) + pm_runtime_put_sync(&phy->dev); + + return ret; } EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync); @@ -155,13 +167,14 @@ int phy_init(struct phy *phy) return ret; mutex_lock(&phy->mutex); - if (phy->init_count++ == 0 && phy->ops->init) { + if (phy->init_count == 0 && phy->ops->init) { ret = phy->ops->init(phy); if (ret < 0) { dev_err(&phy->dev, "phy init failed --> %d\n", ret); goto out; } } + ++phy->init_count; out: mutex_unlock(&phy->mutex); @@ -179,13 +192,14 @@ int phy_exit(struct phy *phy) return ret; mutex_lock(&phy->mutex); - if (--phy->init_count == 0 && phy->ops->exit) { + if (phy->init_count == 1 && phy->ops->exit) { ret = phy->ops->exit(phy); if (ret < 0) { dev_err(&phy->dev, "phy exit failed --> %d\n", ret); goto out; } } + --phy->init_count; out: mutex_unlock(&phy->mutex); @@ -196,23 +210,27 @@ EXPORT_SYMBOL_GPL(phy_exit); int phy_power_on(struct phy *phy) { - int ret = -ENOTSUPP; + int ret; ret = phy_pm_runtime_get_sync(phy); if (ret < 0 && ret != -ENOTSUPP) return ret; mutex_lock(&phy->mutex); - if (phy->power_count++ == 0 && phy->ops->power_on) { + if (phy->power_count == 0 && phy->ops->power_on) { ret = phy->ops->power_on(phy); if (ret < 0) { dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); goto out; } } + ++phy->power_count; + mutex_unlock(&phy->mutex); + return 0; out: mutex_unlock(&phy->mutex); + phy_pm_runtime_put_sync(phy); return ret; } @@ -220,22 +238,22 @@ EXPORT_SYMBOL_GPL(phy_power_on); int phy_power_off(struct phy *phy) { - int ret = -ENOTSUPP; + int ret; mutex_lock(&phy->mutex); - if (--phy->power_count == 0 && phy->ops->power_off) { + if (phy->power_count == 1 && phy->ops->power_off) { ret = phy->ops->power_off(phy); if (ret < 0) { dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret); - goto out; + mutex_unlock(&phy->mutex); + return ret; } } - -out: + --phy->power_count; mutex_unlock(&phy->mutex); phy_pm_runtime_put(phy); - return ret; + return 0; } EXPORT_SYMBOL_GPL(phy_power_off); @@ -360,7 +378,7 @@ EXPORT_SYMBOL_GPL(of_phy_simple_xlate); struct phy *phy_get(struct device *dev, const char *string) { int index = 0; - struct phy *phy = NULL; + struct phy *phy; if (string == NULL) { dev_WARN(dev, "missing string\n"); diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c new file mode 100644 index 00000000000000..d43786f6243742 --- /dev/null +++ b/drivers/phy/phy-mvebu-sata.c @@ -0,0 +1,137 @@ +/* + * phy-mvebu-sata.c: SATA Phy driver for the Marvell mvebu SoCs. + * + * Copyright (C) 2013 Andrew Lunn + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +struct priv { + struct clk *clk; + void __iomem *base; +}; + +#define SATA_PHY_MODE_2 0x0330 +#define MODE_2_FORCE_PU_TX BIT(0) +#define MODE_2_FORCE_PU_RX BIT(1) +#define MODE_2_PU_PLL BIT(2) +#define MODE_2_PU_IVREF BIT(3) +#define SATA_IF_CTRL 0x0050 +#define CTRL_PHY_SHUTDOWN BIT(9) + +static int phy_mvebu_sata_power_on(struct phy *phy) +{ + struct priv *priv = phy_get_drvdata(phy); + u32 reg; + + clk_prepare_enable(priv->clk); + + /* Enable PLL and IVREF */ + reg = readl(priv->base + SATA_PHY_MODE_2); + reg |= (MODE_2_FORCE_PU_TX | MODE_2_FORCE_PU_RX | + MODE_2_PU_PLL | MODE_2_PU_IVREF); + writel(reg , priv->base + SATA_PHY_MODE_2); + + /* Enable PHY */ + reg = readl(priv->base + SATA_IF_CTRL); + reg &= ~CTRL_PHY_SHUTDOWN; + writel(reg, priv->base + SATA_IF_CTRL); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int phy_mvebu_sata_power_off(struct phy *phy) +{ + struct priv *priv = phy_get_drvdata(phy); + u32 reg; + + clk_prepare_enable(priv->clk); + + /* Disable PLL and IVREF */ + reg = readl(priv->base + SATA_PHY_MODE_2); + reg &= ~(MODE_2_FORCE_PU_TX | MODE_2_FORCE_PU_RX | + MODE_2_PU_PLL | MODE_2_PU_IVREF); + writel(reg, priv->base + SATA_PHY_MODE_2); + + /* Disable PHY */ + reg = readl(priv->base + SATA_IF_CTRL); + reg |= CTRL_PHY_SHUTDOWN; + writel(reg, priv->base + SATA_IF_CTRL); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static struct phy_ops phy_mvebu_sata_ops = { + .power_on = phy_mvebu_sata_power_on, + .power_off = phy_mvebu_sata_power_off, + .owner = THIS_MODULE, +}; + +static int phy_mvebu_sata_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + struct resource *res; + struct priv *priv; + struct phy *phy; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(&pdev->dev, "sata"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + phy_provider = devm_of_phy_provider_register(&pdev->dev, + of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); + + phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + phy_set_drvdata(phy, priv); + + /* The boot loader may of left it on. Turn it off. */ + phy_mvebu_sata_power_off(phy); + + return 0; +} + +static const struct of_device_id phy_mvebu_sata_of_match[] = { + { .compatible = "marvell,mvebu-sata-phy" }, + { }, +}; +MODULE_DEVICE_TABLE(of, phy_mvebu_sata_of_match); + +static struct platform_driver phy_mvebu_sata_driver = { + .probe = phy_mvebu_sata_probe, + .driver = { + .name = "phy-mvebu-sata", + .owner = THIS_MODULE, + .of_match_table = phy_mvebu_sata_of_match, + } +}; +module_platform_driver(phy_mvebu_sata_driver); + +MODULE_AUTHOR("Andrew Lunn "); +MODULE_DESCRIPTION("Marvell MVEBU SATA PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 5e2054afe840e9..85ad58c6da1723 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -196,6 +196,7 @@ config BATTERY_MAX17040 config BATTERY_MAX17042 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" depends on I2C + select REGMAP_I2C help MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries in handheld and portable equipment. The MAX17042 is configured diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 00e66729636017..557af943b2f53d 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy) dev_set_drvdata(dev, psy); psy->dev = dev; + rc = dev_set_name(dev, "%s", psy->name); + if (rc) + goto dev_set_name_failed; + INIT_WORK(&psy->changed_work, power_supply_changed_work); rc = power_supply_check_supplies(psy); @@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy) if (rc) goto wakeup_init_failed; - rc = kobject_set_name(&dev->kobj, "%s", psy->name); - if (rc) - goto kobject_set_name_failed; - rc = device_add(dev); if (rc) goto device_add_failed; @@ -553,11 +553,11 @@ int power_supply_register(struct device *parent, struct power_supply *psy) register_cooler_failed: psy_unregister_thermal(psy); register_thermal_failed: -wakeup_init_failed: device_del(dev); -kobject_set_name_failed: device_add_failed: +wakeup_init_failed: check_supplies_failed: +dev_set_name_failed: put_device(dev); success: return rc; diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 2a786c504460f5..3c6768378a9460 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd, return 0; } +static const struct x86_cpu_id energy_unit_quirk_ids[] = { + { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ + {} +}; + static int rapl_check_unit(struct rapl_package *rp, int cpu) { u64 msr_val; @@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu) * time unit: 1/time_unit_divisor Seconds */ value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; - rp->energy_unit_divisor = 1 << value; - + /* some CPUs have different way to calculate energy unit */ + if (x86_match_cpu(energy_unit_quirk_ids)) + rp->energy_unit_divisor = 1000000 / (1 << value); + else + rp->energy_unit_divisor = 1 << value; value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; rp->power_unit_divisor = 1 << value; @@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id) static const struct x86_cpu_id rapl_ids[] = { { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ + { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ /* TODO: Add more CPU IDs after testing */ diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index f14876256a4a5d..a2325bc5e497e7 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -34,11 +34,11 @@ #include #include #include -#include #include #include #include #include +#include /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include @@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } +/* + * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes. + */ +static bool alarm_disable_quirk; + +static int __init set_alarm_disable_quirk(const struct dmi_system_id *id) +{ + alarm_disable_quirk = true; + pr_info("rtc-cmos: BIOS has alarm-disable quirk. "); + pr_info("RTC alarms disabled\n"); + return 0; +} + +static const struct dmi_system_id rtc_quirks[] __initconst = { + /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */ + { + .callback = set_alarm_disable_quirk, + .ident = "IBM Truman", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "4852570"), + }, + }, + /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */ + { + .callback = set_alarm_disable_quirk, + .ident = "Gigabyte GA-990XA-UD3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"), + }, + }, + /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */ + { + .callback = set_alarm_disable_quirk, + .ident = "Toshiba Satellite L300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"), + }, + }, + {} +}; + static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled) if (!is_valid_irq(cmos->irq)) return -EINVAL; + if (alarm_disable_quirk) + return 0; + spin_lock_irqsave(&rtc_lock, flags); if (enabled) @@ -1157,6 +1205,8 @@ static int __init cmos_init(void) platform_driver_registered = true; } + dmi_check_system(rtc_quirks); + if (retval == 0) return 0; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index f302efa937ef04..1eef0f586950b6 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3386,7 +3386,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev) if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { /* - * safe offline allready running + * safe offline already running * could only be called by normal offline so safe_offline flag * needs to be removed to run normal offline and kill all I/O */ diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 6fbe09686d18fc..fea76aed9eea41 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -183,7 +183,6 @@ extern unsigned long sclp_console_full; extern u8 sclp_fac84; extern unsigned long long sclp_rzm; extern unsigned long long sclp_rnmax; -extern __initdata int sclp_early_read_info_sccb_valid; /* useful inlines */ diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index eaa21d542c5cb1..cb3c4e05a38503 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -455,8 +455,6 @@ static int __init sclp_detect_standby_memory(void) if (OLDMEM_BASE) /* No standby memory in kdump mode */ return 0; - if (!sclp_early_read_info_sccb_valid) - return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) return 0; rc = -ENOMEM; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 1465e9563101f0..82f2c389b4d1f5 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -35,11 +35,12 @@ struct read_info_sccb { u8 _reserved5[4096 - 112]; /* 112-4095 */ } __packed __aligned(PAGE_SIZE); -static __initdata struct read_info_sccb early_read_info_sccb; -static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE); +static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata; +static unsigned int sclp_con_has_vt220 __initdata; +static unsigned int sclp_con_has_linemode __initdata; static unsigned long sclp_hsa_size; +static struct sclp_ipl_info sclp_ipl_info; -__initdata int sclp_early_read_info_sccb_valid; u64 sclp_facilities; u8 sclp_fac84; unsigned long long sclp_rzm; @@ -63,15 +64,12 @@ static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) return rc; } -static void __init sclp_read_info_early(void) +static int __init sclp_read_info_early(struct read_info_sccb *sccb) { - int rc; - int i; - struct read_info_sccb *sccb; + int rc, i; sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, SCLP_CMDW_READ_SCP_INFO}; - sccb = &early_read_info_sccb; for (i = 0; i < ARRAY_SIZE(commands); i++) { do { memset(sccb, 0, sizeof(*sccb)); @@ -83,24 +81,19 @@ static void __init sclp_read_info_early(void) if (rc) break; - if (sccb->header.response_code == 0x10) { - sclp_early_read_info_sccb_valid = 1; - break; - } + if (sccb->header.response_code == 0x10) + return 0; if (sccb->header.response_code != 0x1f0) break; } + return -EIO; } -static void __init sclp_facilities_detect(void) +static void __init sclp_facilities_detect(struct read_info_sccb *sccb) { - struct read_info_sccb *sccb; - - sclp_read_info_early(); - if (!sclp_early_read_info_sccb_valid) + if (sclp_read_info_early(sccb)) return; - sccb = &early_read_info_sccb; sclp_facilities = sccb->facilities; sclp_fac84 = sccb->fac84; if (sccb->fac85 & 0x02) @@ -108,30 +101,22 @@ static void __init sclp_facilities_detect(void) sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp_rzm <<= 20; + + /* Save IPL information */ + sclp_ipl_info.is_valid = 1; + if (sccb->flags & 0x2) + sclp_ipl_info.has_dump = 1; + memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN); } bool __init sclp_has_linemode(void) { - struct init_sccb *sccb = (void *) &sccb_early; - - if (sccb->header.response_code != 0x20) - return 0; - if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) - return 0; - if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) - return 0; - return 1; + return !!sclp_con_has_linemode; } bool __init sclp_has_vt220(void) { - struct init_sccb *sccb = (void *) &sccb_early; - - if (sccb->header.response_code != 0x20) - return 0; - if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) - return 1; - return 0; + return !!sclp_con_has_vt220; } unsigned long long sclp_get_rnmax(void) @@ -146,19 +131,12 @@ unsigned long long sclp_get_rzm(void) /* * This function will be called after sclp_facilities_detect(), which gets - * called from early.c code. Therefore the sccb should have valid contents. + * called from early.c code. The sclp_facilities_detect() function retrieves + * and saves the IPL information. */ void __init sclp_get_ipl_info(struct sclp_ipl_info *info) { - struct read_info_sccb *sccb; - - if (!sclp_early_read_info_sccb_valid) - return; - sccb = &early_read_info_sccb; - info->is_valid = 1; - if (sccb->flags & 0x2) - info->has_dump = 1; - memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); + *info = sclp_ipl_info; } static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) @@ -189,11 +167,10 @@ static void __init sccb_init_eq_size(struct sdias_sccb *sccb) sccb->evbuf.dbs = 1; } -static int __init sclp_set_event_mask(unsigned long receive_mask, +static int __init sclp_set_event_mask(struct init_sccb *sccb, + unsigned long receive_mask, unsigned long send_mask) { - struct init_sccb *sccb = (void *) &sccb_early; - memset(sccb, 0, sizeof(*sccb)); sccb->header.length = sizeof(*sccb); sccb->mask_length = sizeof(sccb_mask_t); @@ -202,10 +179,8 @@ static int __init sclp_set_event_mask(unsigned long receive_mask, return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); } -static long __init sclp_hsa_size_init(void) +static long __init sclp_hsa_size_init(struct sdias_sccb *sccb) { - struct sdias_sccb *sccb = (void *) &sccb_early; - sccb_init_eq_size(sccb); if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) return -EIO; @@ -214,10 +189,8 @@ static long __init sclp_hsa_size_init(void) return 0; } -static long __init sclp_hsa_copy_wait(void) +static long __init sclp_hsa_copy_wait(struct sccb_header *sccb) { - struct sccb_header *sccb = (void *) &sccb_early; - memset(sccb, 0, PAGE_SIZE); sccb->length = PAGE_SIZE; if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) @@ -230,34 +203,62 @@ unsigned long sclp_get_hsa_size(void) return sclp_hsa_size; } -static void __init sclp_hsa_size_detect(void) +static void __init sclp_hsa_size_detect(void *sccb) { long size; /* First try synchronous interface (LPAR) */ - if (sclp_set_event_mask(0, 0x40000010)) + if (sclp_set_event_mask(sccb, 0, 0x40000010)) return; - size = sclp_hsa_size_init(); + size = sclp_hsa_size_init(sccb); if (size < 0) return; if (size != 0) goto out; /* Then try asynchronous interface (z/VM) */ - if (sclp_set_event_mask(0x00000010, 0x40000010)) + if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010)) return; - size = sclp_hsa_size_init(); + size = sclp_hsa_size_init(sccb); if (size < 0) return; - size = sclp_hsa_copy_wait(); + size = sclp_hsa_copy_wait(sccb); if (size < 0) return; out: sclp_hsa_size = size; } +static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb) +{ + if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + return 1; +} + +static void __init sclp_console_detect(struct init_sccb *sccb) +{ + if (sccb->header.response_code != 0x20) + return; + + if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) + sclp_con_has_vt220 = 1; + + if (sclp_con_check_linemode(sccb)) + sclp_con_has_linemode = 1; +} + void __init sclp_early_detect(void) { - sclp_facilities_detect(); - sclp_hsa_size_detect(); - sclp_set_event_mask(0, 0); + void *sccb = &sccb_early; + + sclp_facilities_detect(sccb); + sclp_hsa_size_detect(sccb); + + /* Turn off SCLP event notifications. Also save remote masks in the + * sccb. These are sufficient to detect sclp console capabilities. + */ + sclp_set_event_mask(sccb, 0, 0); + sclp_console_detect(sccb); } diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 3f4ca4e09a4ccc..e91b89dc6d1f61 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -125,10 +125,7 @@ static void tty3270_resize_work(struct work_struct *work); */ static void tty3270_set_timer(struct tty3270 *tp, int expires) { - if (expires == 0) - del_timer(&tp->timer); - else - mod_timer(&tp->timer, jiffies + expires); + mod_timer(&tp->timer, jiffies + expires); } /* @@ -744,7 +741,6 @@ tty3270_free_view(struct tty3270 *tp) { int pages; - del_timer_sync(&tp->timer); kbd_free(tp->kbd); raw3270_request_free(tp->kreset); raw3270_request_free(tp->read); @@ -877,6 +873,7 @@ tty3270_free(struct raw3270_view *view) { struct tty3270 *tp = container_of(view, struct tty3270, view); + del_timer_sync(&tp->timer); tty3270_free_screen(tp->screen, tp->view.rows); tty3270_free_view(tp); } @@ -942,7 +939,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) return rc; } - tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); + tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols); if (IS_ERR(tp->screen)) { rc = PTR_ERR(tp->screen); raw3270_put_view(&tp->view); diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index a9fe3de2dec17f..b3f791b2c1f899 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -260,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf) parm = strsep(&buf, " "); - if (strcmp("free", parm) == 0) + if (strcmp("free", parm) == 0) { rc = blacklist_parse_parameters(buf, free, 0); - else if (strcmp("add", parm) == 0) + css_schedule_eval_all_unreg(0); + } else if (strcmp("add", parm) == 0) rc = blacklist_parse_parameters(buf, add, 0); else if (strcmp("purge", parm) == 0) return ccw_purge_blacklisted(); else return -EINVAL; - css_schedule_reprobe(); return rc; } diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 959135a0184794..fd3367a1dc7a0b 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -128,14 +128,14 @@ static ssize_t ccwgroup_online_store(struct device *dev, const char *buf, size_t count) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); - struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); unsigned long value; int ret; - if (!dev->driver) - return -EINVAL; - if (!try_module_get(gdrv->driver.owner)) - return -EINVAL; + device_lock(dev); + if (!dev->driver) { + ret = -EINVAL; + goto out; + } ret = kstrtoul(buf, 0, &value); if (ret) @@ -148,7 +148,7 @@ static ssize_t ccwgroup_online_store(struct device *dev, else ret = -EINVAL; out: - module_put(gdrv->driver.owner); + device_unlock(dev); return (ret == 0) ? count : ret; } diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 13299f902676e6..f6b9188c5af581 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -55,6 +55,7 @@ int chsc_error_from_response(int response) case 0x0004: return -EOPNOTSUPP; case 0x000b: + case 0x0107: /* "Channel busy" for the op 0x003d */ return -EBUSY; case 0x0100: case 0x0102: @@ -237,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid) for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); } -static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) -{ - struct schib schib; - /* - * We don't know the device yet, but since a path - * may be available now to the device we'll have - * to do recognition again. - * Since we don't have any idea about which chpid - * that beast may be on we'll have to do a stsch - * on all devices, grr... - */ - if (stsch_err(schid, &schib)) - /* We're through */ - return -ENXIO; - - /* Put it on the slow path. */ - css_schedule_eval(schid); - return 0; -} - static int __s390_process_res_acc(struct subchannel *sch, void *data) { spin_lock_irq(sch->lock); @@ -287,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link) * The more information we have (info), the less scanning * will we have to do. */ - for_each_subchannel_staged(__s390_process_res_acc, - s390_process_res_acc_new_sch, link); + for_each_subchannel_staged(__s390_process_res_acc, NULL, link); + css_schedule_reprobe(); } static int @@ -663,19 +644,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) return 0; } -static int -__s390_vary_chpid_on(struct subchannel_id schid, void *data) -{ - struct schib schib; - - if (stsch_err(schid, &schib)) - /* We're through */ - return -ENXIO; - /* Put it on the slow path. */ - css_schedule_eval(schid); - return 0; -} - /** * chsc_chp_vary - propagate channel-path vary operation to subchannels * @chpid: channl-path ID @@ -694,7 +662,8 @@ int chsc_chp_vary(struct chp_id chpid, int on) /* Try to update the channel path description. */ chp_update_desc(chp); for_each_subchannel_staged(s390_subchannel_vary_chpid_on, - __s390_vary_chpid_on, &chpid); + NULL, &chpid); + css_schedule_reprobe(); } else for_each_subchannel_staged(s390_subchannel_vary_chpid_off, NULL, &chpid); @@ -1234,3 +1203,35 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) return ret; } EXPORT_SYMBOL_GPL(chsc_scm_info); + +/** + * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info. + * @schid: id of the subchannel on which PNSO is performed + * @brinfo_area: request and response block for the operation + * @resume_token: resume token for multiblock response + * @cnc: Boolean change-notification control + * + * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) + * + * Returns 0 on success. + */ +int chsc_pnso_brinfo(struct subchannel_id schid, + struct chsc_pnso_area *brinfo_area, + struct chsc_brinfo_resume_token resume_token, + int cnc) +{ + memset(brinfo_area, 0, sizeof(*brinfo_area)); + brinfo_area->request.length = 0x0030; + brinfo_area->request.code = 0x003d; /* network-subchannel operation */ + brinfo_area->m = schid.m; + brinfo_area->ssid = schid.ssid; + brinfo_area->sch = schid.sch_no; + brinfo_area->cssid = schid.cssid; + brinfo_area->oc = 0; /* Store-network-bridging-information list */ + brinfo_area->resume_token = resume_token; + brinfo_area->n = (cnc != 0); + if (chsc(brinfo_area)) + return -EIO; + return chsc_error_from_response(brinfo_area->response.code); +} +EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 23d072e70eb2f4..7e53a9c8b0b991 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -61,7 +61,9 @@ struct css_chsc_char { u32 : 20; u32 scssc : 1; /* bit 107 */ u32 scsscf : 1; /* bit 108 */ - u32 : 19; + u32:7; + u32 pnso:1; /* bit 116 */ + u32:11; }__attribute__((packed)); extern struct css_chsc_char css_chsc_characteristics; @@ -188,6 +190,53 @@ struct chsc_scm_info { int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); +struct chsc_brinfo_resume_token { + u64 t1; + u64 t2; +} __packed; + +struct chsc_brinfo_naihdr { + struct chsc_brinfo_resume_token resume_token; + u32:32; + u32 instance; + u32:24; + u8 naids; + u32 reserved[3]; +} __packed; + +struct chsc_pnso_area { + struct chsc_header request; + u8:2; + u8 m:1; + u8:5; + u8:2; + u8 ssid:2; + u8 fmt:4; + u16 sch; + u8:8; + u8 cssid; + u16:16; + u8 oc; + u32:24; + struct chsc_brinfo_resume_token resume_token; + u32 n:1; + u32:31; + u32 reserved[3]; + struct chsc_header response; + u32:32; + struct chsc_brinfo_naihdr naihdr; + union { + struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0]; + struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0]; + struct qdio_brinfo_entry_l2 l2[0]; + } entries; +} __packed; + +int chsc_pnso_brinfo(struct subchannel_id schid, + struct chsc_pnso_area *brinfo_area, + struct chsc_brinfo_resume_token resume_token, + int cnc); + #ifdef CONFIG_SCM_BUS int scm_update_information(void); int scm_process_availability_information(void); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 8c2cb87bccc5d8..0268e5fd59b552 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -69,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data) struct cb_data *cb = data; int rc = 0; - idset_sch_del(cb->set, sch->schid); + if (cb->set) + idset_sch_del(cb->set, sch->schid); if (cb->fn_known_sch) rc = cb->fn_known_sch(sch, cb->data); return rc; @@ -115,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), cb.fn_known_sch = fn_known; cb.fn_unknown_sch = fn_unknown; + if (fn_known && !fn_unknown) { + /* Skip idset allocation in case of known-only loop. */ + cb.set = NULL; + return bus_for_each_dev(&css_bus_type, NULL, &cb, + call_fn_known_sch); + } + cb.set = idset_sch_new(); if (!cb.set) /* fall back to brute force scanning in case of oom */ @@ -553,6 +561,9 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) default: rc = 0; } + /* Allow scheduling here since the containing loop might + * take a while. */ + cond_resched(); } return rc; } @@ -572,7 +583,7 @@ static void css_slow_path_func(struct work_struct *unused) spin_unlock_irqrestore(&slow_subchannel_lock, flags); } -static DECLARE_WORK(slow_path_work, css_slow_path_func); +static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); struct workqueue_struct *cio_work_q; void css_schedule_eval(struct subchannel_id schid) @@ -582,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_sch_add(slow_subchannel_set, schid); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, 0); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -593,7 +604,7 @@ void css_schedule_eval_all(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_fill(slow_subchannel_set); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, 0); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -606,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data) return 0; } -static void css_schedule_eval_all_unreg(void) +void css_schedule_eval_all_unreg(unsigned long delay) { unsigned long flags; struct idset *unreg_set; @@ -624,7 +635,7 @@ static void css_schedule_eval_all_unreg(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_add_set(slow_subchannel_set, unreg_set); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, delay); spin_unlock_irqrestore(&slow_subchannel_lock, flags); idset_free(unreg_set); } @@ -637,7 +648,8 @@ void css_wait_for_slow_path(void) /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) { - css_schedule_eval_all_unreg(); + /* Schedule with a delay to allow merging of subsequent calls. */ + css_schedule_eval_all_unreg(1 * HZ); } EXPORT_SYMBOL_GPL(css_schedule_reprobe); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 29351321bad6c7..2c9107e2025143 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -133,6 +133,7 @@ extern struct channel_subsystem *channel_subsystems[]; /* Helper functions to build lists for the slow path. */ void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval_all(void); +void css_schedule_eval_all_unreg(unsigned long delay); int css_complete_work(void); int sch_is_pseudo_sch(struct subchannel *); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e4a7ab2bb629f7..e9d783563cbb88 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -333,9 +333,9 @@ int ccw_device_set_offline(struct ccw_device *cdev) if (ret != 0) return ret; } - cdev->online = 0; spin_lock_irq(cdev->ccwlock); sch = to_subchannel(cdev->dev.parent); + cdev->online = 0; /* Wait until a final state or DISCONNECTED is reached */ while (!dev_fsm_final_state(cdev) && cdev->private->state != DEV_STATE_DISCONNECTED) { @@ -446,7 +446,10 @@ int ccw_device_set_online(struct ccw_device *cdev) ret = cdev->drv->set_online(cdev); if (ret) goto rollback; + + spin_lock_irq(cdev->ccwlock); cdev->online = 1; + spin_unlock_irq(cdev->ccwlock); return 0; rollback: @@ -546,17 +549,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, if (!dev_fsm_final_state(cdev) && cdev->private->state != DEV_STATE_DISCONNECTED) { ret = -EAGAIN; - goto out_onoff; + goto out; } /* Prevent conflict between pending work and on-/offline processing.*/ if (work_pending(&cdev->private->todo_work)) { ret = -EAGAIN; - goto out_onoff; - } - - if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) { - ret = -EINVAL; - goto out_onoff; + goto out; } if (!strncmp(buf, "force\n", count)) { force = 1; @@ -568,6 +566,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, } if (ret) goto out; + + device_lock(dev); switch (i) { case 0: ret = online_store_handle_offline(cdev); @@ -578,10 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, default: ret = -EINVAL; } + device_unlock(dev); + out: - if (cdev->drv) - module_put(cdev->drv->driver.owner); -out_onoff: atomic_set(&cdev->private->onoff, 0); return (ret < 0) ? ret : count; } @@ -1745,8 +1744,7 @@ ccw_device_probe (struct device *dev) return 0; } -static int -ccw_device_remove (struct device *dev) +static int ccw_device_remove(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = cdev->drv; @@ -1754,9 +1752,10 @@ ccw_device_remove (struct device *dev) if (cdrv->remove) cdrv->remove(cdev); + + spin_lock_irq(cdev->ccwlock); if (cdev->online) { cdev->online = 0; - spin_lock_irq(cdev->ccwlock); ret = ccw_device_offline(cdev); spin_unlock_irq(cdev->ccwlock); if (ret == 0) @@ -1769,10 +1768,12 @@ ccw_device_remove (struct device *dev) cdev->private->dev_id.devno); /* Give up reference obtained in ccw_device_set_online(). */ put_device(&cdev->dev); + spin_lock_irq(cdev->ccwlock); } ccw_device_set_timeout(cdev, 0); cdev->drv = NULL; cdev->private->int_class = IRQIO_CIO; + spin_unlock_irq(cdev->ccwlock); return 0; } diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 3e602e8affa78c..c883a085c0591c 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -1752,6 +1752,97 @@ int qdio_stop_irq(struct ccw_device *cdev, int nr) } EXPORT_SYMBOL(qdio_stop_irq); +/** + * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info. + * @schid: Subchannel ID. + * @cnc: Boolean Change-Notification Control + * @response: Response code will be stored at this address + * @cb: Callback function will be executed for each element + * of the address list + * @priv: Pointer passed from the caller to qdio_pnso_brinfo() + * @type: Type of the address entry passed to the callback + * @entry: Entry containg the address of the specified type + * @priv: Pointer to pass to the callback function. + * + * Performs "Store-network-bridging-information list" operation and calls + * the callback function for every entry in the list. If "change- + * notification-control" is set, further changes in the address list + * will be reported via the IPA command. + */ +int qdio_pnso_brinfo(struct subchannel_id schid, + int cnc, u16 *response, + void (*cb)(void *priv, enum qdio_brinfo_entry_type type, + void *entry), + void *priv) +{ + struct chsc_pnso_area *rr; + int rc; + u32 prev_instance = 0; + int isfirstblock = 1; + int i, size, elems; + + rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); + if (rr == NULL) + return -ENOMEM; + do { + /* on the first iteration, naihdr.resume_token will be zero */ + rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc); + if (rc != 0 && rc != -EBUSY) + goto out; + if (rr->response.code != 1) { + rc = -EIO; + continue; + } else + rc = 0; + + if (cb == NULL) + continue; + + size = rr->naihdr.naids; + elems = (rr->response.length - + sizeof(struct chsc_header) - + sizeof(struct chsc_brinfo_naihdr)) / + size; + + if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { + /* Inform the caller that they need to scrap */ + /* the data that was already reported via cb */ + rc = -EAGAIN; + break; + } + isfirstblock = 0; + prev_instance = rr->naihdr.instance; + for (i = 0; i < elems; i++) + switch (size) { + case sizeof(struct qdio_brinfo_entry_l3_ipv6): + (*cb)(priv, l3_ipv6_addr, + &rr->entries.l3_ipv6[i]); + break; + case sizeof(struct qdio_brinfo_entry_l3_ipv4): + (*cb)(priv, l3_ipv4_addr, + &rr->entries.l3_ipv4[i]); + break; + case sizeof(struct qdio_brinfo_entry_l2): + (*cb)(priv, l2_addr_lnid, + &rr->entries.l2[i]); + break; + default: + WARN_ON_ONCE(1); + rc = -EIO; + goto out; + } + } while (rr->response.code == 0x0107 || /* channel busy */ + (rr->response.code == 1 && /* list stored */ + /* resume token is non-zero => list incomplete */ + (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); + (*response) = rr->response.code; + +out: + free_page((unsigned long)rr); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_pnso_brinfo); + static int __init init_QDIO(void) { int rc; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 02300dcfac91f8..ab3baa7f95082e 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -591,7 +591,13 @@ static int ap_init_queue(ap_qid_t qid) if (rc != -ENODEV && rc != -EBUSY) break; if (i < AP_MAX_RESET - 1) { - udelay(5); + /* Time we are waiting until we give up (0.7sec * 90). + * Since the actual request (in progress) will not + * interrupted immediately for the reset command, + * we have to be patient. In worst case we have to + * wait 60sec + reset time (some msec). + */ + schedule_timeout(AP_RESET_TIMEOUT); status = ap_test_queue(qid, &dummy, &dummy); } } @@ -992,6 +998,28 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf) static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); +static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) +{ + if (ap_configuration != NULL) { /* QCI not supported */ + if (test_facility(76)) { /* format 1 - 256 bit domain field */ + return snprintf(buf, PAGE_SIZE, + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", + ap_configuration->adm[0], ap_configuration->adm[1], + ap_configuration->adm[2], ap_configuration->adm[3], + ap_configuration->adm[4], ap_configuration->adm[5], + ap_configuration->adm[6], ap_configuration->adm[7]); + } else { /* format 0 - 16 bit domain field */ + return snprintf(buf, PAGE_SIZE, "%08x%08x\n", + ap_configuration->adm[0], ap_configuration->adm[1]); + } + } else { + return snprintf(buf, PAGE_SIZE, "not supported\n"); + } +} + +static BUS_ATTR(ap_control_domain_mask, 0444, + ap_control_domain_mask_show, NULL); + static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); @@ -1077,6 +1105,7 @@ static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); static struct bus_attribute *const ap_bus_attrs[] = { &bus_attr_ap_domain, + &bus_attr_ap_control_domain_mask, &bus_attr_config_time, &bus_attr_poll_thread, &bus_attr_ap_interrupts, diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 685f6cc022f92e..6405ae24a7a696 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -33,7 +33,7 @@ #define AP_DEVICES 64 /* Number of AP devices. */ #define AP_DOMAINS 16 /* Number of AP domains. */ #define AP_MAX_RESET 90 /* Maximum number of resets. */ -#define AP_RESET_TIMEOUT (HZ/2) /* Time in ticks for reset timeouts. */ +#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ #define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ #define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ @@ -125,6 +125,8 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) #define AP_FUNC_CRT4K 2 #define AP_FUNC_COPRO 3 #define AP_FUNC_ACCEL 4 +#define AP_FUNC_EP11 5 +#define AP_FUNC_APXA 6 /* * AP reset flag states diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 31cfaa556072c0..4b824b15194f6e 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -44,6 +44,8 @@ #include "zcrypt_debug.h" #include "zcrypt_api.h" +#include "zcrypt_msgtype6.h" + /* * Module description. */ @@ -554,9 +556,9 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB) spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->send_cprb || - (xcRB->user_defined != AUTOSELECT && - AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) - ) + (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) || + (xcRB->user_defined != AUTOSELECT && + AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) continue; zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); @@ -581,6 +583,90 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB) return -ENODEV; } +struct ep11_target_dev_list { + unsigned short targets_num; + struct ep11_target_dev *targets; +}; + +static bool is_desired_ep11dev(unsigned int dev_qid, + struct ep11_target_dev_list dev_list) +{ + int n; + + for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) { + if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) && + (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) { + return true; + } + } + return false; +} + +static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) +{ + struct zcrypt_device *zdev; + bool autoselect = false; + int rc; + struct ep11_target_dev_list ep11_dev_list = { + .targets_num = 0x00, + .targets = NULL, + }; + + ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num; + + /* empty list indicates autoselect (all available targets) */ + if (ep11_dev_list.targets_num == 0) + autoselect = true; + else { + ep11_dev_list.targets = kcalloc((unsigned short) + xcrb->targets_num, + sizeof(struct ep11_target_dev), + GFP_KERNEL); + if (!ep11_dev_list.targets) + return -ENOMEM; + + if (copy_from_user(ep11_dev_list.targets, + (struct ep11_target_dev *)xcrb->targets, + xcrb->targets_num * + sizeof(struct ep11_target_dev))) + return -EFAULT; + } + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + /* check if device is eligible */ + if (!zdev->online || + zdev->ops->variant != MSGTYPE06_VARIANT_EP11) + continue; + + /* check if device is selected as valid target */ + if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && + !autoselect) + continue; + + zcrypt_device_get(zdev); + get_device(&zdev->ap_dev->device); + zdev->request_count++; + __zcrypt_decrease_preference(zdev); + if (try_module_get(zdev->ap_dev->drv->driver.owner)) { + spin_unlock_bh(&zcrypt_device_lock); + rc = zdev->ops->send_ep11_cprb(zdev, xcrb); + spin_lock_bh(&zcrypt_device_lock); + module_put(zdev->ap_dev->drv->driver.owner); + } else { + rc = -EAGAIN; + } + zdev->request_count--; + __zcrypt_increase_preference(zdev); + put_device(&zdev->ap_dev->device); + zcrypt_device_put(zdev); + spin_unlock_bh(&zcrypt_device_lock); + return rc; + } + spin_unlock_bh(&zcrypt_device_lock); + return -ENODEV; +} + static long zcrypt_rng(char *buffer) { struct zcrypt_device *zdev; @@ -784,6 +870,23 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; return rc; } + case ZSENDEP11CPRB: { + struct ep11_urb __user *uxcrb = (void __user *)arg; + struct ep11_urb xcrb; + if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) + return -EFAULT; + do { + rc = zcrypt_send_ep11_cprb(&xcrb); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_send_ep11_cprb(&xcrb); + } while (rc == -EAGAIN); + if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) + return -EFAULT; + return rc; + } case Z90STAT_STATUS_MASK: { char status[AP_DEVICES]; zcrypt_status_mask(status); diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 89632919c993ab..b3d496bfaa7ead 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -74,6 +74,7 @@ struct ica_z90_status { #define ZCRYPT_CEX2A 6 #define ZCRYPT_CEX3C 7 #define ZCRYPT_CEX3A 8 +#define ZCRYPT_CEX4 10 /** * Large random numbers are pulled in 4096 byte chunks from the crypto cards @@ -89,6 +90,7 @@ struct zcrypt_ops { long (*rsa_modexpo_crt)(struct zcrypt_device *, struct ica_rsa_modexpo_crt *); long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); + long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *); long (*rng)(struct zcrypt_device *, char *); struct list_head list; /* zcrypt ops list. */ struct module *owner; diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index ce1226398ac996..569f8b1d86c053 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -30,7 +30,12 @@ #define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE #define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE -#define CEX4_CLEANUP_TIME (15*HZ) +/* Waiting time for requests to be processed. + * Currently there are some types of request which are not deterministic. + * But the maximum time limit managed by the stomper code is set to 60sec. + * Hence we have to wait at least that time period. + */ +#define CEX4_CLEANUP_TIME (61*HZ) static struct ap_device_id zcrypt_cex4_ids[] = { { AP_DEVICE(AP_DEVICE_TYPE_CEX4) }, @@ -101,6 +106,19 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev) zdev->speed_rating = CEX4C_SPEED_RATING; zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, MSGTYPE06_VARIANT_DEFAULT); + } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) { + zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); + if (!zdev) + return -ENOMEM; + zdev->type_string = "CEX4P"; + zdev->user_space_type = ZCRYPT_CEX4; + zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; + zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; + zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; + zdev->short_crt = 0; + zdev->speed_rating = CEX4C_SPEED_RATING; + zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_EP11); } break; } diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 0079b661721139..7b23f43c7b080c 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -106,15 +106,15 @@ static inline int convert_error(struct zcrypt_device *zdev, // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A /* * To sent a message of the wrong type is a bug in the - * device driver. Warn about it, disable the device + * device driver. Send error msg, disable the device * and then repeat the request. */ - WARN_ON(1); atomic_set(&zcrypt_rescan_req, 1); zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", - zdev->ap_dev->qid, - zdev->online, ehdr->reply_code); + zdev->ap_dev->qid, zdev->online, ehdr->reply_code); return -EAGAIN; case REP82_ERROR_TRANSPORT_FAIL: case REP82_ERROR_MACHINE_FAILURE: @@ -122,15 +122,17 @@ static inline int convert_error(struct zcrypt_device *zdev, /* If a card fails disable it and repeat the request. */ atomic_set(&zcrypt_rescan_req, 1); zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", - zdev->ap_dev->qid, - zdev->online, ehdr->reply_code); + zdev->ap_dev->qid, zdev->online, ehdr->reply_code); return -EAGAIN; default: zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", - zdev->ap_dev->qid, - zdev->online, ehdr->reply_code); + zdev->ap_dev->qid, zdev->online, ehdr->reply_code); return -EAGAIN; /* repeat the request on a different device. */ } } diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 7c522f338bda16..334e282f255b7d 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -25,6 +25,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include #include #include @@ -332,6 +335,11 @@ static int convert_type80(struct zcrypt_device *zdev, if (t80h->len < sizeof(*t80h) + outputdatalength) { /* The result is too short, the CEX2A card may not do that.. */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, t80h->code); + return -EAGAIN; /* repeat the request on a different device. */ } if (zdev->user_space_type == ZCRYPT_CEX2A) @@ -359,6 +367,10 @@ static int convert_response(struct zcrypt_device *zdev, outputdata, outputdatalength); default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); return -EAGAIN; /* repeat the request on a different device. */ } } diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 7d97fa5a26d0cc..dc542e0a3055a6 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -25,6 +25,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include #include #include @@ -50,6 +53,7 @@ struct response_type { }; #define PCIXCC_RESPONSE_TYPE_ICA 0 #define PCIXCC_RESPONSE_TYPE_XCRB 1 +#define PCIXCC_RESPONSE_TYPE_EP11 2 MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ @@ -358,6 +362,91 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, return 0; } +static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ep11_urb *xcRB) +{ + unsigned int lfmt; + + static struct type6_hdr static_type6_ep11_hdr = { + .type = 0x06, + .rqid = {0x00, 0x01}, + .function_code = {0x00, 0x00}, + .agent_id[0] = 0x58, /* {'X'} */ + .agent_id[1] = 0x43, /* {'C'} */ + .offset1 = 0x00000058, + }; + + struct { + struct type6_hdr hdr; + struct ep11_cprb cprbx; + unsigned char pld_tag; /* fixed value 0x30 */ + unsigned char pld_lenfmt; /* payload length format */ + } __packed * msg = ap_msg->message; + + struct pld_hdr { + unsigned char func_tag; /* fixed value 0x4 */ + unsigned char func_len; /* fixed value 0x4 */ + unsigned int func_val; /* function ID */ + unsigned char dom_tag; /* fixed value 0x4 */ + unsigned char dom_len; /* fixed value 0x4 */ + unsigned int dom_val; /* domain id */ + } __packed * payload_hdr; + + /* length checks */ + ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len; + if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE - + (sizeof(struct type6_hdr))) + return -EINVAL; + + if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE - + (sizeof(struct type86_fmt2_msg))) + return -EINVAL; + + /* prepare type6 header */ + msg->hdr = static_type6_ep11_hdr; + msg->hdr.ToCardLen1 = xcRB->req_len; + msg->hdr.FromCardLen1 = xcRB->resp_len; + + /* Import CPRB data from the ioctl input parameter */ + if (copy_from_user(&(msg->cprbx.cprb_len), + (char *)xcRB->req, xcRB->req_len)) { + return -EFAULT; + } + + /* + The target domain field within the cprb body/payload block will be + replaced by the usage domain for non-management commands only. + Therefore we check the first bit of the 'flags' parameter for + management command indication. + 0 - non management command + 1 - management command + */ + if (!((msg->cprbx.flags & 0x80) == 0x80)) { + msg->cprbx.target_id = (unsigned int) + AP_QID_QUEUE(zdev->ap_dev->qid); + + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ + switch (msg->pld_lenfmt & 0x03) { + case 1: + lfmt = 2; + break; + case 2: + lfmt = 3; + break; + default: + return -EINVAL; + } + } else { + lfmt = 1; /* length format #1 */ + } + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); + payload_hdr->dom_val = (unsigned int) + AP_QID_QUEUE(zdev->ap_dev->qid); + } + return 0; +} + /** * Copy results from a type 86 ICA reply message back to user space. * @@ -377,6 +466,12 @@ struct type86x_reply { char text[0]; } __packed; +struct type86_ep11_reply { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct ep11_cprb cprbx; +} __packed; + static int convert_type86_ica(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, @@ -440,6 +535,11 @@ static int convert_type86_ica(struct zcrypt_device *zdev, if (service_rc == 8 && service_rs == 72) return -EINVAL; zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, + msg->hdr.reply_code); return -EAGAIN; /* repeat the request on a different device. */ } data = msg->text; @@ -503,6 +603,33 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev, return 0; } +/** + * Copy results from a type 86 EP11 XCRB reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @xcRB: pointer to EP11 user request block + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, + struct ap_message *reply, + struct ep11_urb *xcRB) +{ + struct type86_fmt2_msg *msg = reply->message; + char *data = reply->message; + + if (xcRB->resp_len < msg->fmt2.count1) + return -EINVAL; + + /* Copy response CPRB to user */ + if (copy_to_user((char *)xcRB->resp, + data + msg->fmt2.offset1, msg->fmt2.count1)) + return -EFAULT; + xcRB->resp_len = msg->fmt2.count1; + return 0; +} + static int convert_type86_rng(struct zcrypt_device *zdev, struct ap_message *reply, char *buffer) @@ -551,6 +678,10 @@ static int convert_response_ica(struct zcrypt_device *zdev, * response */ default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); return -EAGAIN; /* repeat the request on a different device. */ } } @@ -579,10 +710,40 @@ static int convert_response_xcrb(struct zcrypt_device *zdev, default: /* Unknown response type, this should NEVER EVER happen */ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); return -EAGAIN; /* repeat the request on a different device. */ } } +static int convert_response_ep11_xcrb(struct zcrypt_device *zdev, + struct ap_message *reply, struct ep11_urb *xcRB) +{ + struct type86_ep11_reply *msg = reply->message; + + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *)reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE87_RSP_CODE: + return convert_error(zdev, reply); + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) + return convert_error(zdev, reply); + if (msg->cprbx.cprb_ver_id == 0x04) + return convert_type86_ep11_xcrb(zdev, reply, xcRB); + /* Fall through, no break, incorrect cprb version is an unknown resp.*/ + default: /* Unknown response type, this should NEVER EVER happen */ + zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + static int convert_response_rng(struct zcrypt_device *zdev, struct ap_message *reply, char *data) @@ -602,6 +763,10 @@ static int convert_response_rng(struct zcrypt_device *zdev, * response */ default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); return -EAGAIN; /* repeat the request on a different device. */ } } @@ -657,6 +822,51 @@ static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, complete(&(resp_type->work)); } +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @ap_dev: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct response_type *resp_type = + (struct response_type *)msg->private; + struct type86_ep11_reply *t86r; + int length; + + /* Copy the reply message to the request message buffer. */ + if (IS_ERR(reply)) { + memcpy(msg->message, &error_reply, sizeof(error_reply)); + goto out; + } + t86r = reply->message; + if (t86r->hdr.type == TYPE86_RSP_CODE && + t86r->cprbx.cprb_ver_id == 0x04) { + switch (resp_type->type) { + case PCIXCC_RESPONSE_TYPE_EP11: + length = t86r->fmt2.offset1 + t86r->fmt2.count1; + length = min(MSGTYPE06_MAX_MSG_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + default: + memcpy(msg->message, &error_reply, sizeof(error_reply)); + } + } else { + memcpy(msg->message, reply->message, sizeof(error_reply)); + } +out: + complete(&(resp_type->work)); +} + static atomic_t zcrypt_step = ATOMIC_INIT(0); /** @@ -781,6 +991,46 @@ static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev, return rc; } +/** + * The request distributor calls this function if it picked the CEX4P + * device to handle a send_ep11_cprb request. + * @zdev: pointer to zcrypt_device structure that identifies the + * CEX4P device to the request distributor + * @xcRB: pointer to the ep11 user request block + */ +static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev, + struct ep11_urb *xcrb) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_EP11, + }; + int rc; + + ap_init_message(&ap_msg); + ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_msgtype6_receive_ep11; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) + rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); + else /* Signal pending. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + +out_free: + kzfree(ap_msg.message); + return rc; +} + /** * The request distributor calls this function if it picked the PCIXCC/CEX2C * device to generate random data. @@ -839,10 +1089,19 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = { .rng = zcrypt_msgtype6_rng, }; +static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { + .owner = THIS_MODULE, + .variant = MSGTYPE06_VARIANT_EP11, + .rsa_modexpo = NULL, + .rsa_modexpo_crt = NULL, + .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb, +}; + int __init zcrypt_msgtype6_init(void) { zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); zcrypt_msgtype_register(&zcrypt_msgtype6_ops); + zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops); return 0; } @@ -850,6 +1109,7 @@ void __exit zcrypt_msgtype6_exit(void) { zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops); zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); + zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops); } module_init(zcrypt_msgtype6_init); diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h index 1e500d3c073571..20724757062370 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.h +++ b/drivers/s390/crypto/zcrypt_msgtype6.h @@ -32,6 +32,7 @@ #define MSGTYPE06_NAME "zcrypt_msgtype6" #define MSGTYPE06_VARIANT_DEFAULT 0 #define MSGTYPE06_VARIANT_NORNG 1 +#define MSGTYPE06_VARIANT_EP11 2 #define MSGTYPE06_MAX_MSG_SIZE (12*1024) @@ -99,6 +100,7 @@ struct type86_hdr { } __packed; #define TYPE86_RSP_CODE 0x86 +#define TYPE87_RSP_CODE 0x87 #define TYPE86_FMT2 0x02 struct type86_fmt2_ext { diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index f2b71d8df01f0c..7a743f4c646c0e 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c @@ -24,6 +24,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include #include #include @@ -199,6 +202,10 @@ static int convert_type84(struct zcrypt_device *zdev, if (t84h->len < sizeof(*t84h) + outputdatalength) { /* The result is too short, the PCICA card may not do that.. */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, t84h->code); return -EAGAIN; /* repeat the request on a different device. */ } BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE); @@ -223,6 +230,10 @@ static int convert_response(struct zcrypt_device *zdev, outputdata, outputdatalength); default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); return -EAGAIN; /* repeat the request on a different device. */ } } diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 0d90a4334055ef..4d14c04b746e14 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c @@ -24,6 +24,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include #include #include @@ -372,6 +375,11 @@ static int convert_type86(struct zcrypt_device *zdev, if (service_rc == 8 && service_rs == 72) return -EINVAL; zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, + msg->hdr.reply_code); return -EAGAIN; /* repeat the request on a different device. */ } data = msg->text; @@ -425,6 +433,10 @@ static int convert_response(struct zcrypt_device *zdev, /* no break, incorrect cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); return -EAGAIN; /* repeat the request on a different device. */ } } diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index 30fa38a0ad39a3..9176bfbd574586 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c @@ -201,7 +201,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; - regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); + regs = ZTWO_VADDR(z->resource.start); regs->DAWR = DAWR_A2091; wdregs.SASR = ®s->SASR; diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index c0f4f4290dd60d..dd5b64726ddc3c 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -220,7 +220,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) instance->irq = IRQ_AMIGA_PORTS; - regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start); + regs = ZTWO_VADDR(res->start); regs->DAWR = DAWR_A3000; wdregs.SASR = ®s->SASR; diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index 70c521f79f7c26..f5a2ab41543bd0 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c @@ -56,7 +56,7 @@ static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev) scsi_addr = res->start + A4000T_SCSI_OFFSET; /* Fill in the required pieces of hostdata */ - hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr); + hostdata->base = ZTWO_VADDR(scsi_addr); hostdata->clock = 50; hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index 2203ac281103b9..3b6f83ffddc4e8 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c @@ -310,7 +310,7 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent) if (!request_mem_region(address, 256, "wd33c93")) return -EBUSY; - regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address)); + regs = ZTWO_VADDR(address); error = check_wd33c93(regs); if (error) diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index cbf3476c68cd35..aff31991aea980 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c @@ -104,7 +104,7 @@ static int zorro7xx_init_one(struct zorro_dev *z, if (ioaddr > 0x01000000) hostdata->base = ioremap(ioaddr, zorro_resource_len(z)); else - hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr); + hostdata->base = ZTWO_VADDR(ioaddr); hostdata->clock = 50; hostdata->chip710 = 1; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 3bfdaa8d80a9cc..4bb6b11166b335 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -54,6 +54,8 @@ source "drivers/staging/rtl8188eu/Kconfig" source "drivers/staging/rts5139/Kconfig" +source "drivers/staging/rts5208/Kconfig" + source "drivers/staging/frontier/Kconfig" source "drivers/staging/phison/Kconfig" @@ -138,12 +140,8 @@ source "drivers/staging/netlogic/Kconfig" source "drivers/staging/mt29f_spinand/Kconfig" -source "drivers/staging/dwc2/Kconfig" - source "drivers/staging/lustre/Kconfig" -source "drivers/staging/btmtk_usb/Kconfig" - source "drivers/staging/xillybus/Kconfig" source "drivers/staging/dgnc/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index b0d3303b46808f..9f07e5e160942b 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/ obj-$(CONFIG_R8712U) += rtl8712/ obj-$(CONFIG_R8188EU) += rtl8188eu/ obj-$(CONFIG_RTS5139) += rts5139/ +obj-$(CONFIG_RTS5208) += rts5208/ obj-$(CONFIG_TRANZPORT) += frontier/ obj-$(CONFIG_IDE_PHISON) += phison/ obj-$(CONFIG_LINE6_USB) += line6/ @@ -60,9 +61,7 @@ obj-$(CONFIG_DGRP) += dgrp/ obj-$(CONFIG_SB105X) += sb105x/ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/ obj-$(CONFIG_GOLDFISH) += goldfish/ -obj-$(CONFIG_USB_DWC2) += dwc2/ obj-$(CONFIG_LUSTRE_FS) += lustre/ -obj-$(CONFIG_USB_BTMTK) += btmtk_usb/ obj-$(CONFIG_XILLYBUS) += xillybus/ obj-$(CONFIG_DGNC) += dgnc/ obj-$(CONFIG_DGAP) += dgap/ diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 1e9ab6dfc90d44..b91c758883bf41 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -100,6 +100,8 @@ config SW_SYNC_USER *WARNING* improper use of this can result in deadlocking kernel drivers from userspace. +source "drivers/staging/android/ion/Kconfig" + endif # if ANDROID endmenu diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index c136299e05afd6..0a01e1914905fc 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,5 +1,7 @@ ccflags-y += -I$(src) # needed for trace events +obj-y += ion/ + obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o obj-$(CONFIG_ASHMEM) += ashmem.o obj-$(CONFIG_ANDROID_LOGGER) += logger.o diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c index 647694f43dcf12..2fc7cdd4c4e3b5 100644 --- a/drivers/staging/android/alarm-dev.c +++ b/drivers/staging/android/alarm-dev.c @@ -68,11 +68,10 @@ static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT]; */ static int is_wakeup(enum android_alarm_type type) { - return (type == ANDROID_ALARM_RTC_WAKEUP || - type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP); + return type == ANDROID_ALARM_RTC_WAKEUP || + type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP; } - static void devalarm_start(struct devalarm *alrm, ktime_t exp) { if (is_wakeup(alrm->type)) @@ -111,7 +110,6 @@ static void alarm_clear(enum android_alarm_type alarm_type) } alarm_enabled &= ~alarm_type_mask; spin_unlock_irqrestore(&alarm_slock, flags); - } static void alarm_set(enum android_alarm_type alarm_type, @@ -280,6 +278,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; } + #ifdef CONFIG_COMPAT static long alarm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -371,7 +370,6 @@ static void devalarm_triggered(struct devalarm *alarm) spin_unlock_irqrestore(&alarm_slock, flags); } - static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt) { struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt); diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig new file mode 100644 index 00000000000000..0f8fec1f84e557 --- /dev/null +++ b/drivers/staging/android/ion/Kconfig @@ -0,0 +1,35 @@ +menuconfig ION + bool "Ion Memory Manager" + depends on HAVE_MEMBLOCK + select GENERIC_ALLOCATOR + select DMA_SHARED_BUFFER + ---help--- + Chose this option to enable the ION Memory Manager, + used by Android to efficiently allocate buffers + from userspace that can be shared between drivers. + If you're not using Android its probably safe to + say N here. + +config ION_TEST + tristate "Ion Test Device" + depends on ION + help + Choose this option to create a device that can be used to test the + kernel and device side ION functions. + +config ION_DUMMY + bool "Dummy Ion driver" + depends on ION + help + Provides a dummy ION driver that registers the + /dev/ion device and some basic heaps. This can + be used for testing the ION infrastructure if + one doesn't have access to hardware drivers that + use ION. + +config ION_TEGRA + tristate "Ion for Tegra" + depends on ARCH_TEGRA && ION + help + Choose this option if you wish to use ion on an nVidia Tegra. + diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile new file mode 100644 index 00000000000000..b56fd2bf2b4f48 --- /dev/null +++ b/drivers/staging/android/ion/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ + ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o +obj-$(CONFIG_ION_TEST) += ion_test.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_ION) += compat_ion.o +endif + +obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o +obj-$(CONFIG_ION_TEGRA) += tegra/ + diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c new file mode 100644 index 00000000000000..af6cd370b30f3e --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.c @@ -0,0 +1,177 @@ +/* + * drivers/staging/android/ion/compat_ion.c + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "ion.h" +#include "compat_ion.h" + +/* See drivers/staging/android/uapi/ion.h for the definition of these structs */ +struct compat_ion_allocation_data { + compat_size_t len; + compat_size_t align; + compat_uint_t heap_id_mask; + compat_uint_t flags; + compat_int_t handle; +}; + +struct compat_ion_custom_data { + compat_uint_t cmd; + compat_ulong_t arg; +}; + +#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct compat_ion_allocation_data) +#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) +#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ + struct compat_ion_custom_data) + +static int compat_get_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data32->len); + err |= put_user(s, &data->len); + err |= get_user(s, &data32->align); + err |= put_user(s, &data->align); + err |= get_user(u, &data32->heap_id_mask); + err |= put_user(u, &data->heap_id_mask); + err |= get_user(u, &data32->flags); + err |= put_user(u, &data->flags); + err |= get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_put_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data->len); + err |= put_user(s, &data32->len); + err |= get_user(s, &data->align); + err |= put_user(s, &data32->align); + err |= get_user(u, &data->heap_id_mask); + err |= put_user(u, &data32->heap_id_mask); + err |= get_user(u, &data->flags); + err |= put_user(u, &data32->flags); + err |= get_user(i, &data->handle); + err |= put_user(i, &data32->handle); + + return err; +} + +static int compat_get_ion_custom_data( + struct compat_ion_custom_data __user *data32, + struct ion_custom_data __user *data) +{ + compat_uint_t cmd; + compat_ulong_t arg; + int err; + + err = get_user(cmd, &data32->cmd); + err |= put_user(cmd, &data->cmd); + err |= get_user(arg, &data32->arg); + err |= put_user(arg, &data->arg); + + return err; +}; + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + if (!filp->f_op || !filp->f_op->unlocked_ioctl) + return -ENOTTY; + + switch (cmd) { + case COMPAT_ION_IOC_ALLOC: + { + struct compat_ion_allocation_data __user *data32; + struct ion_allocation_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_allocation_data(data32, data); + if (err) + return err; + ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, + (unsigned long)data); + err = compat_put_ion_allocation_data(data32, data); + return ret ? ret : err; + } + case COMPAT_ION_IOC_FREE: + { + struct compat_ion_allocation_data __user *data32; + struct ion_allocation_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_allocation_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, + (unsigned long)data); + } + case COMPAT_ION_IOC_CUSTOM: { + struct compat_ion_custom_data __user *data32; + struct ion_custom_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_custom_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, + (unsigned long)data); + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + case ION_IOC_IMPORT: + case ION_IOC_SYNC: + return filp->f_op->unlocked_ioctl(filp, cmd, + (unsigned long)compat_ptr(arg)); + default: + return -ENOIOCTLCMD; + } +} diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h new file mode 100644 index 00000000000000..c2ad5893dfdadc --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.h @@ -0,0 +1,30 @@ +/* + + * drivers/staging/android/ion/compat_ion.h + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_COMPAT_ION_H +#define _LINUX_COMPAT_ION_H + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#else + +#define compat_ion_ioctl NULL + +#endif /* CONFIG_COMPAT */ +#endif /* _LINUX_COMPAT_ION_H */ diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c new file mode 100644 index 00000000000000..574066ff73f80d --- /dev/null +++ b/drivers/staging/android/ion/ion.c @@ -0,0 +1,1549 @@ +/* + + * drivers/staging/android/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion.h" +#include "ion_priv.h" +#include "compat_ion.h" + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @buffer_lock: lock protecting the tree of buffers + * @lock: rwsem protecting the tree of heaps and clients + * @heaps: list of all the heaps in the system + * @user_clients: list of all the clients created from userspace + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex buffer_lock; + struct rw_semaphore lock; + struct plist_head heaps; + long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root clients; + struct dentry *debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @node: node in the tree of all clients + * @dev: backpointer to ion device + * @handles: an rb tree of all the handles in this client + * @idr: an idr space for allocating handle ids + * @lock: lock protecting the tree of handles + * @name: used for debugging + * @task: used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { + struct rb_node node; + struct ion_device *dev; + struct rb_root handles; + struct idr idr; + struct mutex lock; + const char *name; + struct task_struct *task; + pid_t pid; + struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref: reference count + * @client: back pointer to the client the buffer resides in + * @buffer: pointer to the buffer + * @node: node in the client's handle rbtree + * @kmap_cnt: count of times this client has mapped to kernel + * @id: client-unique id allocated by client->idr + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client. Other fields are never changed after initialization. + */ +struct ion_handle { + struct kref ref; + struct ion_client *client; + struct ion_buffer *buffer; + struct rb_node node; + unsigned int kmap_cnt; + int id; +}; + +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) +{ + return (buffer->flags & ION_FLAG_CACHED) && + !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); +} + +bool ion_buffer_cached(struct ion_buffer *buffer) +{ + return !!(buffer->flags & ION_FLAG_CACHED); +} + +static inline struct page *ion_buffer_page(struct page *page) +{ + return (struct page *)((unsigned long)page & ~(1UL)); +} + +static inline bool ion_buffer_page_is_dirty(struct page *page) +{ + return !!((unsigned long)page & 1UL); +} + +static inline void ion_buffer_page_dirty(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) | 1UL); +} + +static inline void ion_buffer_page_clean(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) & ~(1UL)); +} + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, + struct ion_buffer *buffer) +{ + struct rb_node **p = &dev->buffers.rb_node; + struct rb_node *parent = NULL; + struct ion_buffer *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_buffer, node); + + if (buffer < entry) { + p = &(*p)->rb_left; + } else if (buffer > entry) { + p = &(*p)->rb_right; + } else { + pr_err("%s: buffer already found.", __func__); + BUG(); + } + } + + rb_link_node(&buffer->node, parent, p); + rb_insert_color(&buffer->node, &dev->buffers); +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, + struct ion_device *dev, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + struct ion_buffer *buffer; + struct sg_table *table; + struct scatterlist *sg; + int i, ret; + + buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + buffer->heap = heap; + buffer->flags = flags; + kref_init(&buffer->ref); + + ret = heap->ops->allocate(heap, buffer, len, align, flags); + + if (ret) { + if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) + goto err2; + + ion_heap_freelist_drain(heap, 0); + ret = heap->ops->allocate(heap, buffer, len, align, + flags); + if (ret) + goto err2; + } + + buffer->dev = dev; + buffer->size = len; + + table = heap->ops->map_dma(heap, buffer); + if (WARN_ONCE(table == NULL, + "heap->ops->map_dma should return ERR_PTR on error")) + table = ERR_PTR(-EINVAL); + if (IS_ERR(table)) { + heap->ops->free(buffer); + kfree(buffer); + return ERR_PTR(PTR_ERR(table)); + } + buffer->sg_table = table; + if (ion_buffer_fault_user_mappings(buffer)) { + int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct scatterlist *sg; + int i, j, k = 0; + + buffer->pages = vmalloc(sizeof(struct page *) * num_pages); + if (!buffer->pages) { + ret = -ENOMEM; + goto err1; + } + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + + for (j = 0; j < sg->length / PAGE_SIZE; j++) + buffer->pages[k++] = page++; + } + + if (ret) + goto err; + } + + buffer->dev = dev; + buffer->size = len; + INIT_LIST_HEAD(&buffer->vmas); + mutex_init(&buffer->lock); + /* this will set up dma addresses for the sglist -- it is not + technically correct as per the dma api -- a specific + device isn't really taking ownership here. However, in practice on + our systems the only dma_address space is physical addresses. + Additionally, we can't afford the overhead of invalidating every + allocation via dma_map_sg. The implicit contract here is that + memory comming from the heaps is ready for dma, ie if it has a + cached mapping that mapping has been invalidated */ + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) + sg_dma_address(sg) = sg_phys(sg); + mutex_lock(&dev->buffer_lock); + ion_buffer_add(dev, buffer); + mutex_unlock(&dev->buffer_lock); + return buffer; + +err: + heap->ops->unmap_dma(heap, buffer); + heap->ops->free(buffer); +err1: + if (buffer->pages) + vfree(buffer->pages); +err2: + kfree(buffer); + return ERR_PTR(ret); +} + +void ion_buffer_destroy(struct ion_buffer *buffer) +{ + if (WARN_ON(buffer->kmap_cnt > 0)) + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->heap->ops->unmap_dma(buffer->heap, buffer); + buffer->heap->ops->free(buffer); + if (buffer->pages) + vfree(buffer->pages); + kfree(buffer); +} + +static void _ion_buffer_destroy(struct kref *kref) +{ + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); + struct ion_heap *heap = buffer->heap; + struct ion_device *dev = buffer->dev; + + mutex_lock(&dev->buffer_lock); + rb_erase(&buffer->node, &dev->buffers); + mutex_unlock(&dev->buffer_lock); + + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + ion_heap_freelist_add(heap, buffer); + else + ion_buffer_destroy(buffer); +} + +static void ion_buffer_get(struct ion_buffer *buffer) +{ + kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ + return kref_put(&buffer->ref, _ion_buffer_destroy); +} + +static void ion_buffer_add_to_handle(struct ion_buffer *buffer) +{ + mutex_lock(&buffer->lock); + buffer->handle_count++; + mutex_unlock(&buffer->lock); +} + +static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) +{ + /* + * when a buffer is removed from a handle, if it is not in + * any other handles, copy the taskcomm and the pid of the + * process it's being removed from into the buffer. At this + * point there will be no way to track what processes this buffer is + * being used by, it only exists as a dma_buf file descriptor. + * The taskcomm and pid can provide a debug hint as to where this fd + * is in the system + */ + mutex_lock(&buffer->lock); + buffer->handle_count--; + BUG_ON(buffer->handle_count < 0); + if (!buffer->handle_count) { + struct task_struct *task; + + task = current->group_leader; + get_task_comm(buffer->task_comm, task); + buffer->pid = task_pid_nr(task); + } + mutex_unlock(&buffer->lock); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle; + + handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + kref_init(&handle->ref); + RB_CLEAR_NODE(&handle->node); + handle->client = client; + ion_buffer_get(buffer); + ion_buffer_add_to_handle(buffer); + handle->buffer = buffer; + + return handle; +} + +static void ion_handle_kmap_put(struct ion_handle *); + +static void ion_handle_destroy(struct kref *kref) +{ + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); + struct ion_client *client = handle->client; + struct ion_buffer *buffer = handle->buffer; + + mutex_lock(&buffer->lock); + while (handle->kmap_cnt) + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + + idr_remove(&client->idr, handle->id); + if (!RB_EMPTY_NODE(&handle->node)) + rb_erase(&handle->node, &client->handles); + + ion_buffer_remove_from_handle(buffer); + ion_buffer_put(buffer); + + kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ + return handle->buffer; +} + +static void ion_handle_get(struct ion_handle *handle) +{ + kref_get(&handle->ref); +} + +static int ion_handle_put(struct ion_handle *handle) +{ + struct ion_client *client = handle->client; + int ret; + + mutex_lock(&client->lock); + ret = kref_put(&handle->ref, ion_handle_destroy); + mutex_unlock(&client->lock); + + return ret; +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct rb_node *n = client->handles.rb_node; + + while (n) { + struct ion_handle *entry = rb_entry(n, struct ion_handle, node); + if (buffer < entry->buffer) + n = n->rb_left; + else if (buffer > entry->buffer) + n = n->rb_right; + else + return entry; + } + return ERR_PTR(-EINVAL); +} + +static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = idr_find(&client->idr, id); + if (handle) + ion_handle_get(handle); + mutex_unlock(&client->lock); + + return handle ? handle : ERR_PTR(-EINVAL); +} + +static bool ion_handle_validate(struct ion_client *client, + struct ion_handle *handle) +{ + WARN_ON(!mutex_is_locked(&client->lock)); + return (idr_find(&client->idr, handle->id) == handle); +} + +static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ + int id; + struct rb_node **p = &client->handles.rb_node; + struct rb_node *parent = NULL; + struct ion_handle *entry; + + id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); + if (id < 0) + return id; + + handle->id = id; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_handle, node); + + if (handle->buffer < entry->buffer) + p = &(*p)->rb_left; + else if (handle->buffer > entry->buffer) + p = &(*p)->rb_right; + else + WARN(1, "%s: buffer already found.", __func__); + } + + rb_link_node(&handle->node, parent, p); + rb_insert_color(&handle->node, &client->handles); + + return 0; +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags) +{ + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + struct ion_heap *heap; + int ret; + + pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, + len, align, heap_id_mask, flags); + /* + * traverse the list of heaps available in this system in priority + * order. If the heap type is supported by the client, and matches the + * request of the caller allocate from it. Repeat until allocate has + * succeeded or all heaps have been tried + */ + len = PAGE_ALIGN(len); + + if (!len) + return ERR_PTR(-EINVAL); + + down_read(&dev->lock); + plist_for_each_entry(heap, &dev->heaps, node) { + /* if the caller didn't specify this heap id */ + if (!((1 << heap->id) & heap_id_mask)) + continue; + buffer = ion_buffer_create(heap, dev, len, align, flags); + if (!IS_ERR(buffer)) + break; + } + up_read(&dev->lock); + + if (buffer == NULL) + return ERR_PTR(-ENODEV); + + if (IS_ERR(buffer)) + return ERR_PTR(PTR_ERR(buffer)); + + handle = ion_handle_create(client, buffer); + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + if (IS_ERR(handle)) + return handle; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + + return handle; +} +EXPORT_SYMBOL(ion_alloc); + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + bool valid_handle; + + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to free.\n", __func__); + mutex_unlock(&client->lock); + return; + } + mutex_unlock(&client->lock); + ion_handle_put(handle); +} +EXPORT_SYMBOL(ion_free); + +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_buffer *buffer; + int ret; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + mutex_unlock(&client->lock); + return -EINVAL; + } + + buffer = handle->buffer; + + if (!buffer->heap->ops->phys) { + pr_err("%s: ion_phys is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return -ENODEV; + } + mutex_unlock(&client->lock); + ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); + return ret; +} +EXPORT_SYMBOL(ion_phys); + +static void *ion_buffer_kmap_get(struct ion_buffer *buffer) +{ + void *vaddr; + + if (buffer->kmap_cnt) { + buffer->kmap_cnt++; + return buffer->vaddr; + } + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); + if (WARN_ONCE(vaddr == NULL, + "heap->ops->map_kernel should return ERR_PTR on error")) + return ERR_PTR(-EINVAL); + if (IS_ERR(vaddr)) + return vaddr; + buffer->vaddr = vaddr; + buffer->kmap_cnt++; + return vaddr; +} + +static void *ion_handle_kmap_get(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + void *vaddr; + + if (handle->kmap_cnt) { + handle->kmap_cnt++; + return buffer->vaddr; + } + vaddr = ion_buffer_kmap_get(buffer); + if (IS_ERR(vaddr)) + return vaddr; + handle->kmap_cnt++; + return vaddr; +} + +static void ion_buffer_kmap_put(struct ion_buffer *buffer) +{ + buffer->kmap_cnt--; + if (!buffer->kmap_cnt) { + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->vaddr = NULL; + } +} + +static void ion_handle_kmap_put(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + + handle->kmap_cnt--; + if (!handle->kmap_cnt) + ion_buffer_kmap_put(buffer); +} + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + void *vaddr; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_kernel.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + + buffer = handle->buffer; + + if (!handle->buffer->heap->ops->map_kernel) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&buffer->lock); + vaddr = ion_handle_kmap_get(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return vaddr; +} +EXPORT_SYMBOL(ion_map_kernel); + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_unmap_kernel); + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ + struct ion_client *client = s->private; + struct rb_node *n; + size_t sizes[ION_NUM_HEAP_IDS] = {0}; + const char *names[ION_NUM_HEAP_IDS] = {NULL}; + int i; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + unsigned int id = handle->buffer->heap->id; + + if (!names[id]) + names[id] = handle->buffer->heap->name; + sizes[id] += handle->buffer->size; + } + mutex_unlock(&client->lock); + + seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); + for (i = 0; i < ION_NUM_HEAP_IDS; i++) { + if (!names[i]) + continue; + seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); + } + return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { + .open = ion_debug_client_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name) +{ + struct ion_client *client; + struct task_struct *task; + struct rb_node **p; + struct rb_node *parent = NULL; + struct ion_client *entry; + char debug_name[64]; + pid_t pid; + + get_task_struct(current->group_leader); + task_lock(current->group_leader); + pid = task_pid_nr(current->group_leader); + /* don't bother to store task struct for kernel threads, + they can't be killed anyway */ + if (current->group_leader->flags & PF_KTHREAD) { + put_task_struct(current->group_leader); + task = NULL; + } else { + task = current->group_leader; + } + task_unlock(current->group_leader); + + client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); + if (!client) { + if (task) + put_task_struct(current->group_leader); + return ERR_PTR(-ENOMEM); + } + + client->dev = dev; + client->handles = RB_ROOT; + idr_init(&client->idr); + mutex_init(&client->lock); + client->name = name; + client->task = task; + client->pid = pid; + + down_write(&dev->lock); + p = &dev->clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (client < entry) + p = &(*p)->rb_left; + else if (client > entry) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->clients); + + snprintf(debug_name, 64, "%u", client->pid); + client->debug_root = debugfs_create_file(debug_name, 0664, + dev->debug_root, client, + &debug_client_fops); + up_write(&dev->lock); + + return client; +} +EXPORT_SYMBOL(ion_client_create); + +void ion_client_destroy(struct ion_client *client) +{ + struct ion_device *dev = client->dev; + struct rb_node *n; + + pr_debug("%s: %d\n", __func__, __LINE__); + while ((n = rb_first(&client->handles))) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + ion_handle_destroy(&handle->ref); + } + + idr_destroy(&client->idr); + + down_write(&dev->lock); + if (client->task) + put_task_struct(client->task); + rb_erase(&client->node, &dev->clients); + debugfs_remove_recursive(client->debug_root); + up_write(&dev->lock); + + kfree(client); +} +EXPORT_SYMBOL(ion_client_destroy); + +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct sg_table *table; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_dma.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + table = buffer->sg_table; + mutex_unlock(&client->lock); + return table; +} +EXPORT_SYMBOL(ion_sg_table); + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction direction); + +static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_buf *dmabuf = attachment->dmabuf; + struct ion_buffer *buffer = dmabuf->priv; + + ion_buffer_sync_for_device(buffer, attachment->dev, direction); + return buffer->sg_table; +} + +static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ +} + +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + /* + * This is not correct - sg_dma_address needs a dma_addr_t that is valid + * for the the targeted device, but this works on the currently targeted + * hardware. + */ + sg_dma_address(&sg) = page_to_phys(page); + dma_sync_sg_for_device(dev, &sg, 1, dir); +} + +struct ion_vma_list { + struct list_head list; + struct vm_area_struct *vma; +}; + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction dir) +{ + struct ion_vma_list *vma_list; + int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + int i; + + pr_debug("%s: syncing for device %s\n", __func__, + dev ? dev_name(dev) : "null"); + + if (!ion_buffer_fault_user_mappings(buffer)) + return; + + mutex_lock(&buffer->lock); + for (i = 0; i < pages; i++) { + struct page *page = buffer->pages[i]; + + if (ion_buffer_page_is_dirty(page)) + ion_pages_sync_for_device(dev, ion_buffer_page(page), + PAGE_SIZE, dir); + + ion_buffer_page_clean(buffer->pages + i); + } + list_for_each_entry(vma_list, &buffer->vmas, list) { + struct vm_area_struct *vma = vma_list->vma; + + zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, + NULL); + } + mutex_unlock(&buffer->lock); +} + +static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct ion_buffer *buffer = vma->vm_private_data; + unsigned long pfn; + int ret; + + mutex_lock(&buffer->lock); + ion_buffer_page_dirty(buffer->pages + vmf->pgoff); + BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); + + pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); + ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + mutex_unlock(&buffer->lock); + if (ret) + return VM_FAULT_ERROR; + + return VM_FAULT_NOPAGE; +} + +static void ion_vm_open(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list; + + vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); + if (!vma_list) + return; + vma_list->vma = vma; + mutex_lock(&buffer->lock); + list_add(&vma_list->list, &buffer->vmas); + mutex_unlock(&buffer->lock); + pr_debug("%s: adding %p\n", __func__, vma); +} + +static void ion_vm_close(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list, *tmp; + + pr_debug("%s\n", __func__); + mutex_lock(&buffer->lock); + list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { + if (vma_list->vma != vma) + continue; + list_del(&vma_list->list); + kfree(vma_list); + pr_debug("%s: deleting %p\n", __func__, vma); + break; + } + mutex_unlock(&buffer->lock); +} + +static struct vm_operations_struct ion_vma_ops = { + .open = ion_vm_open, + .close = ion_vm_close, + .fault = ion_vm_fault, +}; + +static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = dmabuf->priv; + int ret = 0; + + if (!buffer->heap->ops->map_user) { + pr_err("%s: this heap does not define a method for mapping " + "to userspace\n", __func__); + return -EINVAL; + } + + if (ion_buffer_fault_user_mappings(buffer)) { + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | + VM_DONTDUMP; + vma->vm_private_data = buffer; + vma->vm_ops = &ion_vma_ops; + ion_vm_open(vma); + return 0; + } + + if (!(buffer->flags & ION_FLAG_CACHED)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + mutex_lock(&buffer->lock); + /* now map it to userspace */ + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + mutex_unlock(&buffer->lock); + + if (ret) + pr_err("%s: failure mapping buffer to userspace\n", + __func__); + + return ret; +} + +static void ion_dma_buf_release(struct dma_buf *dmabuf) +{ + struct ion_buffer *buffer = dmabuf->priv; + ion_buffer_put(buffer); +} + +static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) +{ + struct ion_buffer *buffer = dmabuf->priv; + return buffer->vaddr + offset * PAGE_SIZE; +} + +static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, + void *ptr) +{ + return; +} + +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, + size_t len, + enum dma_data_direction direction) +{ + struct ion_buffer *buffer = dmabuf->priv; + void *vaddr; + + if (!buffer->heap->ops->map_kernel) { + pr_err("%s: map kernel is not implemented by this heap.\n", + __func__); + return -ENODEV; + } + + mutex_lock(&buffer->lock); + vaddr = ion_buffer_kmap_get(buffer); + mutex_unlock(&buffer->lock); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + return 0; +} + +static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, + size_t len, + enum dma_data_direction direction) +{ + struct ion_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + ion_buffer_kmap_put(buffer); + mutex_unlock(&buffer->lock); +} + +static struct dma_buf_ops dma_buf_ops = { + .map_dma_buf = ion_map_dma_buf, + .unmap_dma_buf = ion_unmap_dma_buf, + .mmap = ion_mmap, + .release = ion_dma_buf_release, + .begin_cpu_access = ion_dma_buf_begin_cpu_access, + .end_cpu_access = ion_dma_buf_end_cpu_access, + .kmap_atomic = ion_dma_buf_kmap, + .kunmap_atomic = ion_dma_buf_kunmap, + .kmap = ion_dma_buf_kmap, + .kunmap = ion_dma_buf_kunmap, +}; + +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct dma_buf *dmabuf; + bool valid_handle; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to share.\n", __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + ion_buffer_get(buffer); + mutex_unlock(&client->lock); + + dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); + if (IS_ERR(dmabuf)) { + ion_buffer_put(buffer); + return dmabuf; + } + + return dmabuf; +} +EXPORT_SYMBOL(ion_share_dma_buf); + +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) +{ + struct dma_buf *dmabuf; + int fd; + + dmabuf = ion_share_dma_buf(client, handle); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, O_CLOEXEC); + if (fd < 0) + dma_buf_put(dmabuf); + + return fd; +} +EXPORT_SYMBOL(ion_share_dma_buf_fd); + +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + struct ion_handle *handle; + int ret; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return ERR_PTR(PTR_ERR(dmabuf)); + /* if this memory came from ion */ + + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not import dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return ERR_PTR(-EINVAL); + } + buffer = dmabuf->priv; + + mutex_lock(&client->lock); + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR(handle)) { + ion_handle_get(handle); + mutex_unlock(&client->lock); + goto end; + } + mutex_unlock(&client->lock); + + handle = ion_handle_create(client, buffer); + if (IS_ERR(handle)) + goto end; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + +end: + dma_buf_put(dmabuf); + return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf); + +static int ion_sync_for_device(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + /* if this memory came from ion */ + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not sync dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return -EINVAL; + } + buffer = dmabuf->priv; + + dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_BIDIRECTIONAL); + dma_buf_put(dmabuf); + return 0; +} + +/* fix up the cases where the ioctl direction bits are incorrect */ +static unsigned int ion_ioctl_dir(unsigned int cmd) +{ + switch (cmd) { + case ION_IOC_SYNC: + case ION_IOC_FREE: + case ION_IOC_CUSTOM: + return _IOC_WRITE; + default: + return _IOC_DIR(cmd); + } +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ion_client *client = filp->private_data; + struct ion_device *dev = client->dev; + struct ion_handle *cleanup_handle = NULL; + int ret = 0; + unsigned int dir; + + union { + struct ion_fd_data fd; + struct ion_allocation_data allocation; + struct ion_handle_data handle; + struct ion_custom_data custom; + } data; + + dir = ion_ioctl_dir(cmd); + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (dir & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_ALLOC: + { + struct ion_handle *handle; + + handle = ion_alloc(client, data.allocation.len, + data.allocation.align, + data.allocation.heap_id_mask, + data.allocation.flags); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + data.allocation.handle = handle->id; + + cleanup_handle = handle; + break; + } + case ION_IOC_FREE: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + ion_free(client, handle); + ion_handle_put(handle); + break; + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + data.fd.fd = ion_share_dma_buf_fd(client, handle); + ion_handle_put(handle); + if (data.fd.fd < 0) + ret = data.fd.fd; + break; + } + case ION_IOC_IMPORT: + { + struct ion_handle *handle; + handle = ion_import_dma_buf(client, data.fd.fd); + if (IS_ERR(handle)) + ret = PTR_ERR(handle); + else + data.handle.handle = handle->id; + break; + } + case ION_IOC_SYNC: + { + ret = ion_sync_for_device(client, data.fd.fd); + break; + } + case ION_IOC_CUSTOM: + { + if (!dev->custom_ioctl) + return -ENOTTY; + ret = dev->custom_ioctl(client, data.custom.cmd, + data.custom.arg); + break; + } + default: + return -ENOTTY; + } + + if (dir & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { + if (cleanup_handle) + ion_free(client, cleanup_handle); + return -EFAULT; + } + } + return ret; +} + +static int ion_release(struct inode *inode, struct file *file) +{ + struct ion_client *client = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + ion_client_destroy(client); + return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = file->private_data; + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); + struct ion_client *client; + + pr_debug("%s: %d\n", __func__, __LINE__); + client = ion_client_create(dev, "user"); + if (IS_ERR(client)) + return PTR_ERR(client); + file->private_data = client; + + return 0; +} + +static const struct file_operations ion_fops = { + .owner = THIS_MODULE, + .open = ion_open, + .release = ion_release, + .unlocked_ioctl = ion_ioctl, + .compat_ioctl = compat_ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, + unsigned int id) +{ + size_t size = 0; + struct rb_node *n; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + if (handle->buffer->heap->id == id) + size += handle->buffer->size; + } + mutex_unlock(&client->lock); + return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ + struct ion_heap *heap = s->private; + struct ion_device *dev = heap->dev; + struct rb_node *n; + size_t total_size = 0; + size_t total_orphaned_size = 0; + + seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); + seq_printf(s, "----------------------------------------------------\n"); + + for (n = rb_first(&dev->clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + size_t size = ion_debug_heap_total(client, heap->id); + if (!size) + continue; + if (client->task) { + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, client->task); + seq_printf(s, "%16.s %16u %16zu\n", task_comm, + client->pid, size); + } else { + seq_printf(s, "%16.s %16u %16zu\n", client->name, + client->pid, size); + } + } + seq_printf(s, "----------------------------------------------------\n"); + seq_printf(s, "orphaned allocations (info is from last known client):" + "\n"); + mutex_lock(&dev->buffer_lock); + for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { + struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, + node); + if (buffer->heap->id != heap->id) + continue; + total_size += buffer->size; + if (!buffer->handle_count) { + seq_printf(s, "%16.s %16u %16zu %d %d\n", + buffer->task_comm, buffer->pid, + buffer->size, buffer->kmap_cnt, + atomic_read(&buffer->ref.refcount)); + total_orphaned_size += buffer->size; + } + } + mutex_unlock(&dev->buffer_lock); + seq_printf(s, "----------------------------------------------------\n"); + seq_printf(s, "%16.s %16zu\n", "total orphaned", + total_orphaned_size); + seq_printf(s, "%16.s %16zu\n", "total ", total_size); + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + seq_printf(s, "%16.s %16zu\n", "deferred free", + heap->free_list_size); + seq_printf(s, "----------------------------------------------------\n"); + + if (heap->debug_show) + heap->debug_show(heap, s, unused); + + return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { + .open = ion_debug_heap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef DEBUG_HEAP_SHRINKER +static int debug_shrink_set(void *data, u64 val) +{ + struct ion_heap *heap = data; + struct shrink_control sc; + int objs; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + if (!val) + return 0; + + objs = heap->shrinker.shrink(&heap->shrinker, &sc); + sc.nr_to_scan = objs; + + heap->shrinker.shrink(&heap->shrinker, &sc); + return 0; +} + +static int debug_shrink_get(void *data, u64 *val) +{ + struct ion_heap *heap = data; + struct shrink_control sc; + int objs; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + objs = heap->shrinker.shrink(&heap->shrinker, &sc); + *val = objs; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, + debug_shrink_set, "%llu\n"); +#endif + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ + if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || + !heap->ops->unmap_dma) + pr_err("%s: can not add heap with invalid ops struct.\n", + __func__); + + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + ion_heap_init_deferred_free(heap); + + heap->dev = dev; + down_write(&dev->lock); + /* use negative heap->id to reverse the priority -- when traversing + the list later attempt higher id numbers first */ + plist_node_init(&heap->node, -heap->id); + plist_add(&heap->node, &dev->heaps); + debugfs_create_file(heap->name, 0664, dev->debug_root, heap, + &debug_heap_fops); +#ifdef DEBUG_HEAP_SHRINKER + if (heap->shrinker.shrink) { + char debug_name[64]; + + snprintf(debug_name, 64, "%s_shrink", heap->name); + debugfs_create_file(debug_name, 0644, dev->debug_root, heap, + &debug_shrink_fops); + } +#endif + up_write(&dev->lock); +} + +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)) +{ + struct ion_device *idev; + int ret; + + idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); + if (!idev) + return ERR_PTR(-ENOMEM); + + idev->dev.minor = MISC_DYNAMIC_MINOR; + idev->dev.name = "ion"; + idev->dev.fops = &ion_fops; + idev->dev.parent = NULL; + ret = misc_register(&idev->dev); + if (ret) { + pr_err("ion: failed to register misc device.\n"); + return ERR_PTR(ret); + } + + idev->debug_root = debugfs_create_dir("ion", NULL); + if (!idev->debug_root) + pr_err("ion: failed to create debug files.\n"); + + idev->custom_ioctl = custom_ioctl; + idev->buffers = RB_ROOT; + mutex_init(&idev->buffer_lock); + init_rwsem(&idev->lock); + plist_head_init(&idev->heaps); + idev->clients = RB_ROOT; + return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ + misc_deregister(&dev->dev); + /* XXX need to free the heaps and clients ? */ + kfree(dev); +} + +void __init ion_reserve(struct ion_platform_data *data) +{ + int i; + + for (i = 0; i < data->nr; i++) { + if (data->heaps[i].size == 0) + continue; + + if (data->heaps[i].base == 0) { + phys_addr_t paddr; + paddr = memblock_alloc_base(data->heaps[i].size, + data->heaps[i].align, + MEMBLOCK_ALLOC_ANYWHERE); + if (!paddr) { + pr_err("%s: error allocating memblock for " + "heap %d\n", + __func__, i); + continue; + } + data->heaps[i].base = paddr; + } else { + int ret = memblock_reserve(data->heaps[i].base, + data->heaps[i].size); + if (ret) + pr_err("memblock reserve of %zx@%lx failed\n", + data->heaps[i].size, + data->heaps[i].base); + } + pr_info("%s: %s reserved base %lx size %zu\n", __func__, + data->heaps[i].name, + data->heaps[i].base, + data->heaps[i].size); + } +} diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h new file mode 100644 index 00000000000000..dcd2a0cdb192ed --- /dev/null +++ b/drivers/staging/android/ion/ion.h @@ -0,0 +1,204 @@ +/* + * drivers/staging/android/ion/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ION_H +#define _LINUX_ION_H + +#include + +#include "../uapi/ion.h" + +struct ion_handle; +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* This should be removed some day when phys_addr_t's are fully + plumbed in the kernel, and all instances of ion_phys_addr_t should + be converted to phys_addr_t. For the time being many kernel interfaces + do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t unsigned long + +/** + * struct ion_platform_heap - defines a heap in the given platform + * @type: type of the heap from ion_heap_type enum + * @id: unique identifier for heap. When allocating higher numbers + * will be allocated from first. At allocation these are passed + * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. + * @name: used for debug purposes + * @base: base address of heap in physical memory if applicable + * @size: size of the heap in bytes if applicable + * @align: required alignment in physical memory if applicable + * @priv: private info passed from the board file + * + * Provided by the board file. + */ +struct ion_platform_heap { + enum ion_heap_type type; + unsigned int id; + const char *name; + ion_phys_addr_t base; + size_t size; + ion_phys_addr_t align; + void *priv; +}; + +/** + * struct ion_platform_data - array of platform heaps passed from board file + * @nr: number of structures in the array + * @heaps: array of platform_heap structions + * + * Provided by the board file in the form of platform data to a platform device. + */ +struct ion_platform_data { + int nr; + struct ion_platform_heap *heaps; +}; + +/** + * ion_reserve() - reserve memory for ion heaps if applicable + * @data: platform data specifying starting physical address and + * size + * + * Calls memblock reserve to set aside memory for heaps that are + * located at specific memory addresses or of specfic sizes not + * managed by the kernel + */ +void ion_reserve(struct ion_platform_data *data); + +/** + * ion_client_create() - allocate a client and returns it + * @dev: the global ion device + * @heap_type_mask: mask of heaps this client can allocate from + * @name: used for debugging + */ +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name); + +/** + * ion_client_destroy() - free's a client and all it's handles + * @client: the client + * + * Free the provided client and all it's resources including + * any handles it is holding. + */ +void ion_client_destroy(struct ion_client *client); + +/** + * ion_alloc - allocate ion memory + * @client: the client + * @len: size of the allocation + * @align: requested allocation alignment, lots of hardware blocks + * have alignment requirements of some kind + * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set + * heaps will be tried in order from highest to lowest + * id + * @flags: heap flags, the low 16 bits are consumed by ion, the + * high 16 bits are passed on to the respective heap and + * can be heap custom + * + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. + */ +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags); + +/** + * ion_free - free a handle + * @client: the client + * @handle: the handle to free + * + * Free the provided handle. + */ +void ion_free(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_phys - returns the physical address and len of a handle + * @client: the client + * @handle: the handle + * @addr: a pointer to put the address in + * @len: a pointer to put the length in + * + * This function queries the heap for a particular handle to get the + * handle's physical address. It't output is only correct if + * a heap returns physically contiguous memory -- in other cases + * this api should not be implemented -- ion_sg_table should be used + * instead. Returns -EINVAL if the handle is invalid. This has + * no implications on the reference counting of the handle -- + * the returned value may not be valid if the caller is not + * holding a reference. + */ +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len); + +/** + * ion_map_dma - return an sg_table describing a handle + * @client: the client + * @handle: the handle + * + * This function returns the sg_table describing + * a particular ion handle. + */ +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_map_kernel - create mapping for the given handle + * @client: the client + * @handle: handle to map + * + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. + */ +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client: the client + * @handle: handle to unmap + */ +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_share_dma_buf() - share buffer as dma-buf + * @client: the client + * @handle: the handle + */ +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd + * @client: the client + * @handle: the handle + */ +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * @client: the client + * @fd: the dma-buf fd + * + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, + * import that fd and return a handle representing it. If a dma-buf from + * another exporter is passed in this function will return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); + +#endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c new file mode 100644 index 00000000000000..3cb05b9b0e93dd --- /dev/null +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -0,0 +1,194 @@ +/* + * drivers/staging/android/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +struct ion_carveout_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); + + if (!offset) + return ION_CARVEOUT_ALLOCATE_FAIL; + + return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) + return; + gen_pool_free(carveout_heap->pool, addr, size); +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + *addr = paddr; + *len = buffer->size; + return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct sg_table *table; + ion_phys_addr_t paddr; + int ret; + + if (align > PAGE_SIZE) + return -EINVAL; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto err_free; + + paddr = ion_carveout_allocate(heap, size, align); + if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { + ret = -ENOMEM; + goto err_free_table; + } + + sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); + buffer->priv_virt = table; + + return 0; + +err_free_table: + sg_free_table(table); +err_free: + kfree(table); + return ret; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + ion_heap_buffer_zero(buffer); + + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + + ion_carveout_free(heap, paddr, buffer->size); + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_carveout_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static struct ion_heap_ops carveout_heap_ops = { + .allocate = ion_carveout_heap_allocate, + .free = ion_carveout_heap_free, + .phys = ion_carveout_heap_phys, + .map_dma = ion_carveout_heap_map_dma, + .unmap_dma = ion_carveout_heap_unmap_dma, + .map_user = ion_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_carveout_heap *carveout_heap; + int ret; + + struct page *page; + size_t size; + + page = pfn_to_page(PFN_DOWN(heap_data->base)); + size = heap_data->size; + + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); + if (ret) + return ERR_PTR(ret); + + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + if (!carveout_heap) + return ERR_PTR(-ENOMEM); + + carveout_heap->pool = gen_pool_create(12, -1); + if (!carveout_heap->pool) { + kfree(carveout_heap); + return ERR_PTR(-ENOMEM); + } + carveout_heap->base = heap_data->base; + gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, + -1); + carveout_heap->heap.ops = &carveout_heap_ops; + carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; + carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + + return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + carveout_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c new file mode 100644 index 00000000000000..d40f5f831808aa --- /dev/null +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/ion_chunk_heap.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +struct ion_chunk_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; + unsigned long chunk_size; + unsigned long size; + unsigned long allocated; +}; + +static int ion_chunk_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + struct sg_table *table; + struct scatterlist *sg; + int ret, i; + unsigned long num_chunks; + unsigned long allocated_size; + + if (align > chunk_heap->chunk_size) + return -EINVAL; + + allocated_size = ALIGN(size, chunk_heap->chunk_size); + num_chunks = allocated_size / chunk_heap->chunk_size; + + if (allocated_size > chunk_heap->size - chunk_heap->allocated) + return -ENOMEM; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); + if (ret) { + kfree(table); + return ret; + } + + sg = table->sgl; + for (i = 0; i < num_chunks; i++) { + unsigned long paddr = gen_pool_alloc(chunk_heap->pool, + chunk_heap->chunk_size); + if (!paddr) + goto err; + sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), + chunk_heap->chunk_size, 0); + sg = sg_next(sg); + } + + buffer->priv_virt = table; + chunk_heap->allocated += allocated_size; + return 0; +err: + sg = table->sgl; + for (i -= 1; i >= 0; i--) { + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + sg->length); + sg = sg_next(sg); + } + sg_free_table(table); + kfree(table); + return -ENOMEM; +} + +static void ion_chunk_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + struct sg_table *table = buffer->priv_virt; + struct scatterlist *sg; + int i; + unsigned long allocated_size; + + allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size); + + ion_heap_buffer_zero(buffer); + + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + + for_each_sg(table->sgl, sg, table->nents, i) { + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + sg->length); + } + chunk_heap->allocated -= allocated_size; + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static struct ion_heap_ops chunk_heap_ops = { + .allocate = ion_chunk_heap_allocate, + .free = ion_chunk_heap_free, + .map_dma = ion_chunk_heap_map_dma, + .unmap_dma = ion_chunk_heap_unmap_dma, + .map_user = ion_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_chunk_heap *chunk_heap; + int ret; + struct page *page; + size_t size; + + page = pfn_to_page(PFN_DOWN(heap_data->base)); + size = heap_data->size; + + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); + if (ret) + return ERR_PTR(ret); + + chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); + if (!chunk_heap) + return ERR_PTR(-ENOMEM); + + chunk_heap->chunk_size = (unsigned long)heap_data->priv; + chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + + PAGE_SHIFT, -1); + if (!chunk_heap->pool) { + ret = -ENOMEM; + goto error_gen_pool_create; + } + chunk_heap->base = heap_data->base; + chunk_heap->size = heap_data->size; + chunk_heap->allocated = 0; + + gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); + chunk_heap->heap.ops = &chunk_heap_ops; + chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; + chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base, + heap_data->size, heap_data->align); + + return &chunk_heap->heap; + +error_gen_pool_create: + kfree(chunk_heap); + return ERR_PTR(ret); +} + +void ion_chunk_heap_destroy(struct ion_heap *heap) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + + gen_pool_destroy(chunk_heap->pool); + kfree(chunk_heap); + chunk_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c new file mode 100644 index 00000000000000..f0f98897e4b935 --- /dev/null +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -0,0 +1,218 @@ +/* + * drivers/staging/android/ion/ion_cma_heap.c + * + * Copyright (C) Linaro 2012 + * Author: for ST-Ericsson. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "ion.h" +#include "ion_priv.h" + +#define ION_CMA_ALLOCATE_FAILED -1 + +struct ion_cma_heap { + struct ion_heap heap; + struct device *dev; +}; + +#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) + +struct ion_cma_buffer_info { + void *cpu_addr; + dma_addr_t handle; + struct sg_table *table; +}; + +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replaced by dma_common_get_sgtable + * as soon as it will avalaible. + */ +static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = virt_to_page(cpu_addr); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} + +/* ION CMA heap operations functions */ +static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long len, unsigned long align, + unsigned long flags) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info; + + dev_dbg(dev, "Request buffer allocation len %ld\n", len); + + if (buffer->flags & ION_FLAG_CACHED) + return -EINVAL; + + if (align > PAGE_SIZE) + return -EINVAL; + + info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); + if (!info) { + dev_err(dev, "Can't allocate buffer info\n"); + return ION_CMA_ALLOCATE_FAILED; + } + + info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), + GFP_HIGHUSER | __GFP_ZERO); + + if (!info->cpu_addr) { + dev_err(dev, "Fail to allocate buffer\n"); + goto err; + } + + info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!info->table) { + dev_err(dev, "Fail to allocate sg table\n"); + goto free_mem; + } + + if (ion_cma_get_sgtable + (dev, info->table, info->cpu_addr, info->handle, len)) + goto free_table; + /* keep this for memory release */ + buffer->priv_virt = info; + dev_dbg(dev, "Allocate buffer %p\n", buffer); + return 0; + +free_table: + kfree(info->table); +free_mem: + dma_free_coherent(dev, len, info->cpu_addr, info->handle); +err: + kfree(info); + return ION_CMA_ALLOCATE_FAILED; +} + +static void ion_cma_free(struct ion_buffer *buffer) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Release buffer %p\n", buffer); + /* release memory */ + dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); + /* release sg table */ + sg_free_table(info->table); + kfree(info->table); + kfree(info); +} + +/* return physical address in addr */ +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer, + &info->handle); + + *addr = info->handle; + *len = buffer->size; + + return 0; +} + +static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return info->table; +} + +static void ion_cma_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, + buffer->size); +} + +static void *ion_cma_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + /* kernel memory mapping has been done at allocation time */ + return info->cpu_addr; +} + +static void ion_cma_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops ion_cma_ops = { + .allocate = ion_cma_allocate, + .free = ion_cma_free, + .map_dma = ion_cma_heap_map_dma, + .unmap_dma = ion_cma_heap_unmap_dma, + .phys = ion_cma_phys, + .map_user = ion_cma_mmap, + .map_kernel = ion_cma_map_kernel, + .unmap_kernel = ion_cma_unmap_kernel, +}; + +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) +{ + struct ion_cma_heap *cma_heap; + + cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); + + if (!cma_heap) + return ERR_PTR(-ENOMEM); + + cma_heap->heap.ops = &ion_cma_ops; + /* get device from private heaps data, later it will be + * used to make the link with reserved CMA memory */ + cma_heap->dev = data->priv; + cma_heap->heap.type = ION_HEAP_TYPE_DMA; + return &cma_heap->heap; +} + +void ion_cma_heap_destroy(struct ion_heap *heap) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + + kfree(cma_heap); +} diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c new file mode 100644 index 00000000000000..55b2002753f225 --- /dev/null +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -0,0 +1,158 @@ +/* + * drivers/gpu/ion/ion_dummy_driver.c + * + * Copyright (C) 2013 Linaro, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +struct ion_device *idev; +struct ion_heap **heaps; + +void *carveout_ptr; +void *chunk_ptr; + +struct ion_platform_heap dummy_heaps[] = { + { + .id = ION_HEAP_TYPE_SYSTEM, + .type = ION_HEAP_TYPE_SYSTEM, + .name = "system", + }, + { + .id = ION_HEAP_TYPE_SYSTEM_CONTIG, + .type = ION_HEAP_TYPE_SYSTEM_CONTIG, + .name = "system contig", + }, + { + .id = ION_HEAP_TYPE_CARVEOUT, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = "carveout", + .size = SZ_4M, + }, + { + .id = ION_HEAP_TYPE_CHUNK, + .type = ION_HEAP_TYPE_CHUNK, + .name = "chunk", + .size = SZ_4M, + .align = SZ_16K, + .priv = (void *)(SZ_16K), + }, +}; + +struct ion_platform_data dummy_ion_pdata = { + .nr = 4, + .heaps = dummy_heaps, +}; + +static int __init ion_dummy_init(void) +{ + int i, err; + + idev = ion_device_create(NULL); + heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, + GFP_KERNEL); + if (!heaps) + return PTR_ERR(heaps); + + + /* Allocate a dummy carveout heap */ + carveout_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size, + GFP_KERNEL); + if (carveout_ptr) + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base = + virt_to_phys(carveout_ptr); + else + pr_err("ion_dummy: Could not allocate carveout\n"); + + /* Allocate a dummy chunk heap */ + chunk_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CHUNK].size, + GFP_KERNEL); + if (chunk_ptr) + dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr); + else + pr_err("ion_dummy: Could not allocate chunk\n"); + + for (i = 0; i < dummy_ion_pdata.nr; i++) { + struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i]; + + if (heap_data->type == ION_HEAP_TYPE_CARVEOUT && + !heap_data->base) + continue; + + if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base) + continue; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + return 0; +err: + for (i = 0; i < dummy_ion_pdata.nr; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } + return err; +} + +static void __exit ion_dummy_exit(void) +{ + int i; + + ion_device_destroy(idev); + + for (i = 0; i < dummy_ion_pdata.nr; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } + + return; +} + +module_init(ion_dummy_init); +module_exit(ion_dummy_exit); + diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c new file mode 100644 index 00000000000000..296c74f98dc08c --- /dev/null +++ b/drivers/staging/android/ion/ion_heap.c @@ -0,0 +1,318 @@ +/* + * drivers/staging/android/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +void *ion_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sg; + int i, j; + void *vaddr; + pgprot_t pgprot; + struct sg_table *table = buffer->sg_table; + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + + if (!pages) + return NULL; + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for_each_sg(table->sgl, sg, table->nents, i) { + int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; + struct page *page = sg_page(sg); + BUG_ON(i >= npages); + for (j = 0; j < npages_this_entry; j++) + *(tmp++) = page++; + } + vaddr = vmap(pages, npages, VM_MAP, pgprot); + vfree(pages); + + if (vaddr == NULL) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +void ion_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + vunmap(buffer->vaddr); +} + +int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct sg_table *table = buffer->sg_table; + unsigned long addr = vma->vm_start; + unsigned long offset = vma->vm_pgoff * PAGE_SIZE; + struct scatterlist *sg; + int i; + int ret; + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + unsigned long remainder = vma->vm_end - addr; + unsigned long len = sg->length; + + if (offset >= sg->length) { + offset -= sg->length; + continue; + } else if (offset) { + page += offset / PAGE_SIZE; + len = sg->length - offset; + offset = 0; + } + len = min(len, remainder); + ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, + vma->vm_page_prot); + if (ret) + return ret; + addr += len; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) +{ + void *addr = vm_map_ram(pages, num, -1, pgprot); + if (!addr) + return -ENOMEM; + memset(addr, 0, PAGE_SIZE * num); + vm_unmap_ram(addr, num); + + return 0; +} + +static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, + pgprot_t pgprot) +{ + int p = 0; + int ret = 0; + struct sg_page_iter piter; + struct page *pages[32]; + + for_each_sg_page(sgl, &piter, nents, 0) { + pages[p++] = sg_page_iter_page(&piter); + if (p == ARRAY_SIZE(pages)) { + ret = ion_heap_clear_pages(pages, p, pgprot); + if (ret) + return ret; + p = 0; + } + } + if (p) + ret = ion_heap_clear_pages(pages, p, pgprot); + + return ret; +} + +int ion_heap_buffer_zero(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->sg_table; + pgprot_t pgprot; + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); +} + +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + return ion_heap_sglist_zero(&sg, 1, pgprot); +} + +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) +{ + spin_lock(&heap->free_lock); + list_add(&buffer->list, &heap->free_list); + heap->free_list_size += buffer->size; + spin_unlock(&heap->free_lock); + wake_up(&heap->waitqueue); +} + +size_t ion_heap_freelist_size(struct ion_heap *heap) +{ + size_t size; + + spin_lock(&heap->free_lock); + size = heap->free_list_size; + spin_unlock(&heap->free_lock); + + return size; +} + +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) +{ + struct ion_buffer *buffer; + size_t total_drained = 0; + + if (ion_heap_freelist_size(heap) == 0) + return 0; + + spin_lock(&heap->free_lock); + if (size == 0) + size = heap->free_list_size; + + while (!list_empty(&heap->free_list)) { + if (total_drained >= size) + break; + buffer = list_first_entry(&heap->free_list, struct ion_buffer, + list); + list_del(&buffer->list); + heap->free_list_size -= buffer->size; + total_drained += buffer->size; + spin_unlock(&heap->free_lock); + ion_buffer_destroy(buffer); + spin_lock(&heap->free_lock); + } + spin_unlock(&heap->free_lock); + + return total_drained; +} + +static int ion_heap_deferred_free(void *data) +{ + struct ion_heap *heap = data; + + while (true) { + struct ion_buffer *buffer; + + wait_event_freezable(heap->waitqueue, + ion_heap_freelist_size(heap) > 0); + + spin_lock(&heap->free_lock); + if (list_empty(&heap->free_list)) { + spin_unlock(&heap->free_lock); + continue; + } + buffer = list_first_entry(&heap->free_list, struct ion_buffer, + list); + list_del(&buffer->list); + heap->free_list_size -= buffer->size; + spin_unlock(&heap->free_lock); + ion_buffer_destroy(buffer); + } + + return 0; +} + +int ion_heap_init_deferred_free(struct ion_heap *heap) +{ + struct sched_param param = { .sched_priority = 0 }; + + INIT_LIST_HEAD(&heap->free_list); + heap->free_list_size = 0; + spin_lock_init(&heap->free_lock); + init_waitqueue_head(&heap->waitqueue); + heap->task = kthread_run(ion_heap_deferred_free, heap, + "%s", heap->name); + sched_setscheduler(heap->task, SCHED_IDLE, ¶m); + if (IS_ERR(heap->task)) { + pr_err("%s: creating thread for deferred free failed\n", + __func__); + return PTR_RET(heap->task); + } + return 0; +} + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + heap = ion_system_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_SYSTEM: + heap = ion_system_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CARVEOUT: + heap = ion_carveout_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CHUNK: + heap = ion_chunk_heap_create(heap_data); + break; + case ION_HEAP_TYPE_DMA: + heap = ion_cma_heap_create(heap_data); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %zu\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + ion_system_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_SYSTEM: + ion_system_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CARVEOUT: + ion_carveout_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CHUNK: + ion_chunk_heap_destroy(heap); + break; + case ION_HEAP_TYPE_DMA: + ion_cma_heap_destroy(heap); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap->type); + } +} diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c new file mode 100644 index 00000000000000..fa693c23681a7d --- /dev/null +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/ion_mem_pool.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ion_priv.h" + +struct ion_page_pool_item { + struct page *page; + struct list_head list; +}; + +static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) +{ + struct page *page = alloc_pages(pool->gfp_mask, pool->order); + + if (!page) + return NULL; + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, + DMA_BIDIRECTIONAL); + return page; +} + +static void ion_page_pool_free_pages(struct ion_page_pool *pool, + struct page *page) +{ + __free_pages(page, pool->order); +} + +static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) +{ + struct ion_page_pool_item *item; + + item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL); + if (!item) + return -ENOMEM; + + mutex_lock(&pool->mutex); + item->page = page; + if (PageHighMem(page)) { + list_add_tail(&item->list, &pool->high_items); + pool->high_count++; + } else { + list_add_tail(&item->list, &pool->low_items); + pool->low_count++; + } + mutex_unlock(&pool->mutex); + return 0; +} + +static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) +{ + struct ion_page_pool_item *item; + struct page *page; + + if (high) { + BUG_ON(!pool->high_count); + item = list_first_entry(&pool->high_items, + struct ion_page_pool_item, list); + pool->high_count--; + } else { + BUG_ON(!pool->low_count); + item = list_first_entry(&pool->low_items, + struct ion_page_pool_item, list); + pool->low_count--; + } + + list_del(&item->list); + page = item->page; + kfree(item); + return page; +} + +void *ion_page_pool_alloc(struct ion_page_pool *pool) +{ + struct page *page = NULL; + + BUG_ON(!pool); + + mutex_lock(&pool->mutex); + if (pool->high_count) + page = ion_page_pool_remove(pool, true); + else if (pool->low_count) + page = ion_page_pool_remove(pool, false); + mutex_unlock(&pool->mutex); + + if (!page) + page = ion_page_pool_alloc_pages(pool); + + return page; +} + +void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) +{ + int ret; + + ret = ion_page_pool_add(pool, page); + if (ret) + ion_page_pool_free_pages(pool, page); +} + +static int ion_page_pool_total(struct ion_page_pool *pool, bool high) +{ + int total = 0; + + total += high ? (pool->high_count + pool->low_count) * + (1 << pool->order) : + pool->low_count * (1 << pool->order); + return total; +} + +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan) +{ + int nr_freed = 0; + int i; + bool high; + + high = !!(gfp_mask & __GFP_HIGHMEM); + + if (nr_to_scan == 0) + return ion_page_pool_total(pool, high); + + for (i = 0; i < nr_to_scan; i++) { + struct page *page; + + mutex_lock(&pool->mutex); + if (pool->low_count) { + page = ion_page_pool_remove(pool, false); + } else if (high && pool->high_count) { + page = ion_page_pool_remove(pool, true); + } else { + mutex_unlock(&pool->mutex); + break; + } + mutex_unlock(&pool->mutex); + ion_page_pool_free_pages(pool, page); + nr_freed += (1 << pool->order); + } + + return nr_freed; +} + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) +{ + struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), + GFP_KERNEL); + if (!pool) + return NULL; + pool->high_count = 0; + pool->low_count = 0; + INIT_LIST_HEAD(&pool->low_items); + INIT_LIST_HEAD(&pool->high_items); + pool->gfp_mask = gfp_mask; + pool->order = order; + mutex_init(&pool->mutex); + plist_node_init(&pool->list, order); + + return pool; +} + +void ion_page_pool_destroy(struct ion_page_pool *pool) +{ + kfree(pool); +} + +static int __init ion_page_pool_init(void) +{ + return 0; +} + +static void __exit ion_page_pool_exit(void) +{ +} + +module_init(ion_page_pool_init); +module_exit(ion_page_pool_exit); diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h new file mode 100644 index 00000000000000..d98673981cc40c --- /dev/null +++ b/drivers/staging/android/ion/ion_priv.h @@ -0,0 +1,360 @@ +/* + * drivers/staging/android/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion.h" + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref: refernce count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @priv_phys: private data to the buffer representable as + * an ion_phys_addr_t (and someday a phys_addr_t) + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sg_table: the sg table for the buffer if dmap_cnt is not zero + * @pages: flat array of pages in the buffer -- used by fault + * handler and only valid for buffers that are faulted in + * @vmas: list of vma's mapping this buffer + * @handle_count: count of handles referencing this buffer + * @task_comm: taskcomm of last client to reference this buffer in a + * handle, used for debugging + * @pid: pid of last client to reference this buffer in a + * handle, used for debugging +*/ +struct ion_buffer { + struct kref ref; + union { + struct rb_node node; + struct list_head list; + }; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + size_t size; + union { + void *priv_virt; + ion_phys_addr_t priv_phys; + }; + struct mutex lock; + int kmap_cnt; + void *vaddr; + int dmap_cnt; + struct sg_table *sg_table; + struct page **pages; + struct list_head vmas; + /* used to track orphaned buffers */ + int handle_count; + char task_comm[TASK_COMM_LEN]; + pid_t pid; +}; +void ion_buffer_destroy(struct ion_buffer *buffer); + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @phys get physical address of a buffer (only define on + * physically contiguous heaps) + * @map_dma map the memory for dma to a scatterlist + * @unmap_dma unmap the memory for dma + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace + * + * allocate, phys, and map_user return 0 on success, -errno on error. + * map_dma and map_kernel return pointer on success, ERR_PTR on error. + */ +struct ion_heap_ops { + int (*allocate) (struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags); + void (*free) (struct ion_buffer *buffer); + int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len); + struct sg_table *(*map_dma) (struct ion_heap *heap, + struct ion_buffer *buffer); + void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); + void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma); +}; + +/** + * heap flags - flags between the heaps and core ion code + */ +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @flags: flags + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * @shrinker: a shrinker for the heap, if the heap caches system + * memory, it must define a shrinker to return it on low + * memory conditions, this includes system memory cached + * in the deferred free lists for heaps that support it + * @free_list: free list head if deferred free is used + * @free_list_size size of the deferred free list in bytes + * @lock: protects the free list + * @waitqueue: queue to wait on from deferred free thread + * @task: task struct of deferred free thread + * @debug_show: called when heap debug file is read to add any + * heap specific debug info to output + * + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { + struct plist_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + unsigned long flags; + unsigned int id; + const char *name; + struct shrinker shrinker; + struct list_head free_list; + size_t free_list_size; + spinlock_t free_lock; + wait_queue_head_t waitqueue; + struct task_struct *task; + int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); +}; + +/** + * ion_buffer_cached - this ion buffer is cached + * @buffer: buffer + * + * indicates whether this ion buffer is cached + */ +bool ion_buffer_cached(struct ion_buffer *buffer); + +/** + * ion_buffer_fault_user_mappings - fault in user mappings of this buffer + * @buffer: buffer + * + * indicates whether userspace mappings of this buffer will be faulted + * in, this can affect how buffers are allocated from the heap. + */ +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev: the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev: the device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * some helpers for common operations on buffers using the sg_table + * and vaddr fields + */ +void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); +void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); +int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, + struct vm_area_struct *); +int ion_heap_buffer_zero(struct ion_buffer *buffer); +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); + +/** + * ion_heap_init_deferred_free -- initialize deferred free functionality + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will + * be called to setup deferred frees. Calls to free the buffer will + * return immediately and the actual free will occur some time later + */ +int ion_heap_init_deferred_free(struct ion_heap *heap); + +/** + * ion_heap_freelist_add - add a buffer to the deferred free list + * @heap: the heap + * @buffer: the buffer + * + * Adds an item to the deferred freelist. + */ +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); + +/** + * ion_heap_freelist_drain - drain the deferred free list + * @heap: the heap + * @size: ammount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + */ +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); + +/** + * ion_heap_freelist_size - returns the size of the freelist in bytes + * @heap: the heap + */ +size_t ion_heap_freelist_size(struct ion_heap *heap); + + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); +void ion_chunk_heap_destroy(struct ion_heap *); +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); +void ion_cma_heap_destroy(struct ion_heap *); + +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, + unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size); +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 + +/** + * functions for creating and destroying a heap pool -- allows you + * to keep a pool of pre allocated memory to use from your heap. Keeping + * a pool of memory that is ready for dma, ie any cached mapping have been + * invalidated from the cache, provides a significant peformance benefit on + * many systems */ + +/** + * struct ion_page_pool - pagepool struct + * @high_count: number of highmem items in the pool + * @low_count: number of lowmem items in the pool + * @high_items: list of highmem items + * @low_items: list of lowmem items + * @shrinker: a shrinker for the items + * @mutex: lock protecting this struct and especially the count + * item list + * @alloc: function to be used to allocate pageory when the pool + * is empty + * @free: function to be used to free pageory back to the system + * when the shrinker fires + * @gfp_mask: gfp_mask to use from alloc + * @order: order of pages in the pool + * @list: plist node for list of pools + * + * Allows you to keep a pool of pre allocated pages to use from your heap. + * Keeping a pool of pages that is ready for dma, ie any cached mapping have + * been invalidated from the cache, provides a significant peformance benefit + * on many systems + */ +struct ion_page_pool { + int high_count; + int low_count; + struct list_head high_items; + struct list_head low_items; + struct mutex mutex; + gfp_t gfp_mask; + unsigned int order; + struct plist_node list; +}; + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); +void ion_page_pool_destroy(struct ion_page_pool *); +void *ion_page_pool_alloc(struct ion_page_pool *); +void ion_page_pool_free(struct ion_page_pool *, struct page *); + +/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool + * @pool: the pool + * @gfp_mask: the memory type to reclaim + * @nr_to_scan: number of items to shrink in pages + * + * returns the number of items freed in pages + */ +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan); + +/** + * ion_pages_sync_for_device - cache flush pages for use with the specified + * device + * @dev: the device the pages will be used with + * @page: the first page to be flushed + * @size: size in bytes of region to be flushed + * @dir: direction of dma transfer + */ +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir); + +#endif /* _ION_PRIV_H */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c new file mode 100644 index 00000000000000..7f0729130d6583 --- /dev/null +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -0,0 +1,488 @@ +/* + * drivers/staging/android/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | + __GFP_NORETRY) & ~__GFP_WAIT; +static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); +static const unsigned int orders[] = {8, 4, 0}; +static const int num_orders = ARRAY_SIZE(orders); +static int order_to_index(unsigned int order) +{ + int i; + for (i = 0; i < num_orders; i++) + if (order == orders[i]) + return i; + BUG(); + return -1; +} + +static unsigned int order_to_size(int order) +{ + return PAGE_SIZE << order; +} + +struct ion_system_heap { + struct ion_heap heap; + struct ion_page_pool **pools; +}; + +struct page_info { + struct page *page; + unsigned int order; + struct list_head list; +}; + +static struct page *alloc_buffer_page(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long order) +{ + bool cached = ion_buffer_cached(buffer); + struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + struct page *page; + + if (!cached) { + page = ion_page_pool_alloc(pool); + } else { + gfp_t gfp_flags = low_order_gfp_flags; + + if (order > 4) + gfp_flags = high_order_gfp_flags; + page = alloc_pages(gfp_flags, order); + if (!page) + return NULL; + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + } + if (!page) + return NULL; + + return page; +} + +static void free_buffer_page(struct ion_system_heap *heap, + struct ion_buffer *buffer, struct page *page, + unsigned int order) +{ + bool cached = ion_buffer_cached(buffer); + + if (!cached) { + struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + ion_page_pool_free(pool, page); + } else { + __free_pages(page, order); + } +} + + +static struct page_info *alloc_largest_available(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long size, + unsigned int max_order) +{ + struct page *page; + struct page_info *info; + int i; + + info = kmalloc(sizeof(struct page_info), GFP_KERNEL); + if (!info) + return NULL; + + for (i = 0; i < num_orders; i++) { + if (size < order_to_size(orders[i])) + continue; + if (max_order < orders[i]) + continue; + + page = alloc_buffer_page(heap, buffer, orders[i]); + if (!page) + continue; + + info->page = page; + info->order = orders[i]; + return info; + } + kfree(info); + + return NULL; +} + +static int ion_system_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + struct sg_table *table; + struct scatterlist *sg; + int ret; + struct list_head pages; + struct page_info *info, *tmp_info; + int i = 0; + long size_remaining = PAGE_ALIGN(size); + unsigned int max_order = orders[0]; + + if (align > PAGE_SIZE) + return -EINVAL; + + INIT_LIST_HEAD(&pages); + while (size_remaining > 0) { + info = alloc_largest_available(sys_heap, buffer, size_remaining, + max_order); + if (!info) + goto err; + list_add_tail(&info->list, &pages); + size_remaining -= (1 << info->order) * PAGE_SIZE; + max_order = info->order; + i++; + } + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + goto err; + + ret = sg_alloc_table(table, i, GFP_KERNEL); + if (ret) + goto err1; + + sg = table->sgl; + list_for_each_entry_safe(info, tmp_info, &pages, list) { + struct page *page = info->page; + sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0); + sg = sg_next(sg); + list_del(&info->list); + kfree(info); + } + + buffer->priv_virt = table; + return 0; +err1: + kfree(table); +err: + list_for_each_entry_safe(info, tmp_info, &pages, list) { + free_buffer_page(sys_heap, buffer, info->page, info->order); + kfree(info); + } + return -ENOMEM; +} + +static void ion_system_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + struct sg_table *table = buffer->sg_table; + bool cached = ion_buffer_cached(buffer); + struct scatterlist *sg; + LIST_HEAD(pages); + int i; + + /* uncached pages come from the page pools, zero them before returning + for security purposes (other allocations are zerod at alloc time */ + if (!cached) + ion_heap_buffer_zero(buffer); + + for_each_sg(table->sgl, sg, table->nents, i) + free_buffer_page(sys_heap, buffer, sg_page(sg), + get_order(sg->length)); + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static struct ion_heap_ops system_heap_ops = { + .allocate = ion_system_heap_allocate, + .free = ion_system_heap_free, + .map_dma = ion_system_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_heap_map_user, +}; + +static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct ion_heap *heap = container_of(shrinker, struct ion_heap, + shrinker); + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int nr_total = 0; + int i; + + /* total number of items is whatever the page pools are holding + plus whatever's in the freelist */ + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0); + } + nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE; + return nr_total; + +} + +static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + + struct ion_heap *heap = container_of(shrinker, struct ion_heap, + shrinker); + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int nr_freed = 0; + int i; + + if (sc->nr_to_scan == 0) + goto end; + + /* shrink the free list first, no point in zeroing the memory if + we're just going to reclaim it */ + nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) / + PAGE_SIZE; + + if (nr_freed >= sc->nr_to_scan) + goto end; + + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + + nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask, + sc->nr_to_scan); + if (nr_freed >= sc->nr_to_scan) + break; + } + +end: + return nr_freed; + +} + +static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, + void *unused) +{ + + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", + pool->high_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->high_count); + seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", + pool->low_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->low_count); + } + return 0; +} + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ + struct ion_system_heap *heap; + int i; + + heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->heap.ops = &system_heap_ops; + heap->heap.type = ION_HEAP_TYPE_SYSTEM; + heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, + GFP_KERNEL); + if (!heap->pools) + goto err_alloc_pools; + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool; + gfp_t gfp_flags = low_order_gfp_flags; + + if (orders[i] > 4) + gfp_flags = high_order_gfp_flags; + pool = ion_page_pool_create(gfp_flags, orders[i]); + if (!pool) + goto err_create_pool; + heap->pools[i] = pool; + } + + heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan; + heap->heap.shrinker.count_objects = ion_system_heap_shrink_count; + heap->heap.shrinker.seeks = DEFAULT_SEEKS; + heap->heap.shrinker.batch = 0; + register_shrinker(&heap->heap.shrinker); + heap->heap.debug_show = ion_system_heap_debug_show; + return &heap->heap; +err_create_pool: + for (i = 0; i < num_orders; i++) + if (heap->pools[i]) + ion_page_pool_destroy(heap->pools[i]); + kfree(heap->pools); +err_alloc_pools: + kfree(heap); + return ERR_PTR(-ENOMEM); +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; + + for (i = 0; i < num_orders; i++) + ion_page_pool_destroy(sys_heap->pools[i]); + kfree(sys_heap->pools); + kfree(sys_heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + int order = get_order(len); + struct page *page; + struct sg_table *table; + unsigned long i; + int ret; + + if (align > (PAGE_SIZE << order)) + return -EINVAL; + + page = alloc_pages(low_order_gfp_flags, order); + if (!page) + return -ENOMEM; + + split_page(page, order); + + len = PAGE_ALIGN(len); + for (i = len >> PAGE_SHIFT; i < (1 << order); i++) + __free_page(page + i); + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) { + ret = -ENOMEM; + goto out; + } + + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto out; + + sg_set_page(table->sgl, page, len, 0); + + buffer->priv_virt = table; + + ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); + + return 0; + +out: + for (i = 0; i < len >> PAGE_SHIFT; i++) + __free_page(page + i); + kfree(table); + return ret; +} + +static void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; + unsigned long i; + + for (i = 0; i < pages; i++) + __free_page(page + i); + sg_free_table(table); + kfree(table); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + *addr = page_to_phys(page); + *len = buffer->size; + return 0; +} + +static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops kmalloc_ops = { + .allocate = ion_system_contig_heap_allocate, + .free = ion_system_contig_heap_free, + .phys = ion_system_contig_heap_phys, + .map_dma = ion_system_contig_heap_map_dma, + .unmap_dma = ion_system_contig_heap_unmap_dma, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_heap_map_user, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &kmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; + return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c new file mode 100644 index 00000000000000..654acb5c8eba12 --- /dev/null +++ b/drivers/staging/android/ion/ion_test.c @@ -0,0 +1,282 @@ +/* + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "ion-test: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion.h" +#include "../uapi/ion_test.h" + +#define u64_to_uptr(x) ((void __user *)(unsigned long)(x)) + +struct ion_test_device { + struct miscdevice misc; +}; + +struct ion_test_data { + struct dma_buf *dma_buf; + struct device *dev; +}; + +static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf, + void __user *ptr, size_t offset, size_t size, bool write) +{ + int ret = 0; + struct dma_buf_attachment *attach; + struct sg_table *table; + pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + struct sg_page_iter sg_iter; + unsigned long offset_page; + + attach = dma_buf_attach(dma_buf, dev); + if (IS_ERR(attach)) + return PTR_ERR(attach); + + table = dma_buf_map_attachment(attach, dir); + if (IS_ERR(table)) + return PTR_ERR(table); + + offset_page = offset >> PAGE_SHIFT; + offset %= PAGE_SIZE; + + for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) { + struct page *page = sg_page_iter_page(&sg_iter); + void *vaddr = vmap(&page, 1, VM_MAP, pgprot); + size_t to_copy = PAGE_SIZE - offset; + + to_copy = min(to_copy, size); + if (!vaddr) { + ret = -ENOMEM; + goto err; + } + + if (write) + ret = copy_from_user(vaddr + offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + offset, to_copy); + + vunmap(vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + size -= to_copy; + if (!size) + break; + ptr += to_copy; + offset = 0; + } + +err: + dma_buf_unmap_attachment(attach, table, dir); + dma_buf_detach(dma_buf, attach); + return ret; +} + +static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, + size_t offset, size_t size, bool write) +{ + int ret; + unsigned long page_offset = offset >> PAGE_SHIFT; + size_t copy_offset = offset % PAGE_SIZE; + size_t copy_size = size; + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (offset > dma_buf->size || size > dma_buf->size - offset) + return -EINVAL; + + ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir); + if (ret) + return ret; + + while (copy_size > 0) { + size_t to_copy; + void *vaddr = dma_buf_kmap(dma_buf, page_offset); + + if (!vaddr) + goto err; + + to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size); + + if (write) + ret = copy_from_user(vaddr + copy_offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + copy_offset, to_copy); + + dma_buf_kunmap(dma_buf, page_offset, vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + + copy_size -= to_copy; + ptr += to_copy; + page_offset++; + copy_offset = 0; + } +err: + dma_buf_end_cpu_access(dma_buf, offset, size, dir); + return ret; +} + +static long ion_test_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct ion_test_data *test_data = filp->private_data; + int ret = 0; + + union { + struct ion_test_rw_data test_rw; + } data; + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (_IOC_DIR(cmd) & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_TEST_SET_FD: + { + struct dma_buf *dma_buf = NULL; + int fd = arg; + + if (fd >= 0) { + dma_buf = dma_buf_get((int)arg); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + } + if (test_data->dma_buf) + dma_buf_put(test_data->dma_buf); + test_data->dma_buf = dma_buf; + break; + } + case ION_IOC_TEST_DMA_MAPPING: + { + ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, data.test_rw.size, + data.test_rw.write); + break; + } + case ION_IOC_TEST_KERNEL_MAPPING: + { + ret = ion_handle_test_kernel(test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, data.test_rw.size, + data.test_rw.write); + break; + } + default: + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + } + return ret; +} + +static int ion_test_open(struct inode *inode, struct file *file) +{ + struct ion_test_data *data; + struct miscdevice *miscdev = file->private_data; + + data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = miscdev->parent; + + file->private_data = data; + + return 0; +} + +static int ion_test_release(struct inode *inode, struct file *file) +{ + struct ion_test_data *data = file->private_data; + + kfree(data); + + return 0; +} + +static const struct file_operations ion_test_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ion_test_ioctl, + .compat_ioctl = ion_test_ioctl, + .open = ion_test_open, + .release = ion_test_release, +}; + +static int __init ion_test_probe(struct platform_device *pdev) +{ + int ret; + struct ion_test_device *testdev; + + testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device), + GFP_KERNEL); + if (!testdev) + return -ENOMEM; + + testdev->misc.minor = MISC_DYNAMIC_MINOR; + testdev->misc.name = "ion-test"; + testdev->misc.fops = &ion_test_fops; + testdev->misc.parent = &pdev->dev; + ret = misc_register(&testdev->misc); + if (ret) { + pr_err("failed to register misc device.\n"); + return ret; + } + + platform_set_drvdata(pdev, testdev); + + return 0; +} + +static struct platform_driver ion_test_platform_driver = { + .driver = { + .name = "ion-test", + }, +}; + +static int __init ion_test_init(void) +{ + platform_device_register_simple("ion-test", -1, NULL, 0); + return platform_driver_probe(&ion_test_platform_driver, ion_test_probe); +} + +static void __exit ion_test_exit(void) +{ + platform_driver_unregister(&ion_test_platform_driver); +} + +module_init(ion_test_init); +module_exit(ion_test_exit); diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile new file mode 100644 index 00000000000000..11cd003fb08f08 --- /dev/null +++ b/drivers/staging/android/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c new file mode 100644 index 00000000000000..3474c65f87faad --- /dev/null +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -0,0 +1,84 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include "../ion.h" +#include "../ion_priv.h" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +static int tegra_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); + + idev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(idev)) { + kfree(heaps); + return PTR_ERR(idev); + } + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); + return 0; +err: + for (i = 0; i < num_heaps; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + return err; +} + +static int tegra_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = tegra_ion_probe, + .remove = tegra_ion_remove, + .driver = { .name = "ion-tegra" } +}; + +module_platform_driver(ion_driver); + diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index 38ea986dc70f84..62e2255b1c1e2c 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -28,7 +28,7 @@ struct sync_fence; /** * struct sync_timeline_ops - sync object implementation ops - * @driver_name: name of the implentation + * @driver_name: name of the implementation * @dup: duplicate a sync_pt * @has_signaled: returns: * 1 if pt has signaled @@ -37,12 +37,12 @@ struct sync_fence; * @compare: returns: * 1 if b will signal before a * 0 if a and b will signal at the same time - * -1 if a will signabl before b + * -1 if a will signal before b * @free_pt: called before sync_pt is freed * @release_obj: called before sync_timeline is freed * @print_obj: deprecated * @print_pt: deprecated - * @fill_driver_data: write implmentation specific driver data to data. + * @fill_driver_data: write implementation specific driver data to data. * should return an error if there is not enough room * as specified by size. This information is returned * to userspace by SYNC_IOC_FENCE_INFO. @@ -88,9 +88,9 @@ struct sync_timeline_ops { /** * struct sync_timeline - sync object * @kref: reference count on fence. - * @ops: ops that define the implementaiton of the sync_timeline + * @ops: ops that define the implementation of the sync_timeline * @name: name of the sync_timeline. Useful for debugging - * @destoryed: set when sync_timeline is destroyed + * @destroyed: set when sync_timeline is destroyed * @child_list_head: list of children sync_pts for this sync_timeline * @child_list_lock: lock protecting @child_list_head, destroyed, and * sync_pt.status @@ -119,12 +119,12 @@ struct sync_timeline { * @parent: sync_timeline to which this sync_pt belongs * @child_list: membership in sync_timeline.child_list_head * @active_list: membership in sync_timeline.active_list_head - * @signaled_list: membership in temorary signaled_list on stack + * @signaled_list: membership in temporary signaled_list on stack * @fence: sync_fence to which the sync_pt belongs * @pt_list: membership in sync_fence.pt_list_head * @status: 1: signaled, 0:active, <0: error * @timestamp: time which sync_pt status transitioned from active to - * singaled or error. + * signaled or error. */ struct sync_pt { struct sync_timeline *parent; @@ -145,9 +145,9 @@ struct sync_pt { /** * struct sync_fence - sync fence * @file: file representing this fence - * @kref: referenace count on fence. + * @kref: reference count on fence. * @name: name of sync_fence. Useful for debugging - * @pt_list_head: list of sync_pts in ths fence. immutable once fence + * @pt_list_head: list of sync_pts in the fence. immutable once fence * is created * @waiter_list_head: list of asynchronous waiters on this fence * @waiter_list_lock: lock protecting @waiter_list_head and @status @@ -201,23 +201,23 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, /** * sync_timeline_create() - creates a sync object - * @ops: specifies the implemention ops for the object + * @ops: specifies the implementation ops for the object * @size: size to allocate for this obj * @name: sync_timeline name * - * Creates a new sync_timeline which will use the implemetation specified by - * @ops. @size bytes will be allocated allowing for implemntation specific - * data to be kept after the generic sync_timeline stuct. + * Creates a new sync_timeline which will use the implementation specified by + * @ops. @size bytes will be allocated allowing for implementation specific + * data to be kept after the generic sync_timeline struct. */ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, int size, const char *name); /** - * sync_timeline_destory() - destorys a sync object + * sync_timeline_destroy() - destroys a sync object * @obj: sync_timeline to destroy * - * A sync implemntation should call this when the @obj is going away - * (i.e. module unload.) @obj won't actually be freed until all its childern + * A sync implementation should call this when the @obj is going away + * (i.e. module unload.) @obj won't actually be freed until all its children * sync_pts are freed. */ void sync_timeline_destroy(struct sync_timeline *obj); @@ -226,7 +226,7 @@ void sync_timeline_destroy(struct sync_timeline *obj); * sync_timeline_signal() - signal a status change on a sync_timeline * @obj: sync_timeline to signal * - * A sync implemntation should call this any time one of it's sync_pts + * A sync implementation should call this any time one of it's sync_pts * has signaled or has an error condition. */ void sync_timeline_signal(struct sync_timeline *obj); @@ -236,8 +236,8 @@ void sync_timeline_signal(struct sync_timeline *obj); * @parent: sync_pt's parent sync_timeline * @size: size to allocate for this pt * - * Creates a new sync_pt as a chiled of @parent. @size bytes will be - * allocated allowing for implemntation specific data to be kept after + * Creates a new sync_pt as a child of @parent. @size bytes will be + * allocated allowing for implementation specific data to be kept after * the generic sync_timeline struct. */ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); @@ -287,7 +287,7 @@ struct sync_fence *sync_fence_merge(const char *name, struct sync_fence *sync_fence_fdget(int fd); /** - * sync_fence_put() - puts a refernnce of a sync fence + * sync_fence_put() - puts a reference of a sync fence * @fence: fence to put * * Puts a reference on @fence. If this is the last reference, the fence and @@ -297,7 +297,7 @@ void sync_fence_put(struct sync_fence *fence); /** * sync_fence_install() - installs a fence into a file descriptor - * @fence: fence to instal + * @fence: fence to install * @fd: file descriptor in which to install the fence * * Installs @fence into @fd. @fd's should be acquired through get_unused_fd(). @@ -359,10 +359,10 @@ struct sync_merge_data { * struct sync_pt_info - detailed sync_pt information * @len: length of sync_pt_info including any driver_data * @obj_name: name of parent sync_timeline - * @driver_name: name of driver implmenting the parent + * @driver_name: name of driver implementing the parent * @status: status of the sync_pt 0:active 1:signaled <0:error * @timestamp_ns: timestamp of status change in nanoseconds - * @driver_data: any driver dependant data + * @driver_data: any driver dependent data */ struct sync_pt_info { __u32 len; @@ -377,7 +377,7 @@ struct sync_pt_info { /** * struct sync_fence_info_data - data returned from fence info ioctl * @len: ioctl caller writes the size of the buffer its passing in. - * ioctl returns length of sync_fence_data reutnred to userspace + * ioctl returns length of sync_fence_data returned to userspace * including pt_info. * @name: name of fence * @status: status of fence. 1: signaled 0:active <0:error @@ -418,7 +418,7 @@ struct sync_fence_info_data { * pt_info. * * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. - * To itterate over the sync_pt_infos, use the sync_pt_info.len field. + * To iterate over the sync_pt_infos, use the sync_pt_info.len field. */ #define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ struct sync_fence_info_data) diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h new file mode 100644 index 00000000000000..f09e7c154d691e --- /dev/null +++ b/drivers/staging/android/uapi/ion.h @@ -0,0 +1,196 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_H +#define _UAPI_LINUX_ION_H + +#include +#include + +typedef int ion_user_handle_t; + +/** + * enum ion_heap_types - list of all possible types of heaps + * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved + * carveout heap, allocations are physically + * contiguous + * @ION_HEAP_TYPE_DMA: memory allocated via DMA API + * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask + * is used to identify the heaps, so only 32 + * total heap types are supported + */ +enum ion_heap_type { + ION_HEAP_TYPE_SYSTEM, + ION_HEAP_TYPE_SYSTEM_CONTIG, + ION_HEAP_TYPE_CARVEOUT, + ION_HEAP_TYPE_CHUNK, + ION_HEAP_TYPE_DMA, + ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always + are at the end of this enum */ + ION_NUM_HEAPS = 16, +}; + +#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) +#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) +#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA) + +#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8 + +/** + * allocation flags - the lower 16 bits are used by core ion, the upper 16 + * bits are reserved for use by the heaps themselves. + */ +#define ION_FLAG_CACHED 1 /* mappings of this buffer should be + cached, ion will do cache + maintenance when the buffer is + mapped for dma */ +#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created + at mmap time, if this is set + caches must be managed manually */ + +/** + * DOC: Ion Userspace API + * + * create a client by opening /dev/ion + * most operations handled via following ioctls + * + */ + +/** + * struct ion_allocation_data - metadata passed from userspace for allocations + * @len: size of the allocation + * @align: required alignment of the allocation + * @heap_id_mask: mask of heap ids to allocate from + * @flags: flags passed to heap + * @handle: pointer that will be populated with a cookie to use to + * refer to this allocation + * + * Provided by userspace as an argument to the ioctl + */ +struct ion_allocation_data { + size_t len; + size_t align; + unsigned int heap_id_mask; + unsigned int flags; + ion_user_handle_t handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle: a handle + * @fd: a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { + ion_user_handle_t handle; + int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle: a handle + */ +struct ion_handle_data { + ion_user_handle_t handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd: the custom ioctl function to call + * @arg: additional data to pass to the custom ioctl, typically a user + * pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { + unsigned int cmd; + unsigned long arg; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_ALLOC - allocate memory + * + * Takes an ion_allocation_data struct and returns it with the handle field + * populated with the opaque handle for the allocation. + */ +#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data) + +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be passed to another process. The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory + * + * Deprecated in favor of using the dma_buf api's correctly (syncing + * will happend automatically when the buffer is mapped to a device). + * If necessary should be used after touching a cached buffer from the cpu, + * this will make the buffer in memory coherent. + */ +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h new file mode 100644 index 00000000000000..ffef06f63133fb --- /dev/null +++ b/drivers/staging/android/uapi/ion_test.h @@ -0,0 +1,70 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_TEST_H +#define _UAPI_LINUX_ION_TEST_H + +#include +#include + +/** + * struct ion_test_rw_data - metadata passed to the kernel to read handle + * @ptr: a pointer to an area at least as large as size + * @offset: offset into the ion buffer to start reading + * @size: size to read or write + * @write: 1 to write, 0 to read + */ +struct ion_test_rw_data { + __u64 ptr; + __u64 offset; + __u64 size; + int write; + int __padding; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver + * + * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will + * release the first fd. + */ +#define ION_IOC_TEST_SET_FD \ + _IO(ION_IOC_MAGIC, 0xf0) + +/** + * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA + * + * Reads or writes the memory from a handle using an uncached mapping. Can be + * used by unit tests to emulate a DMA engine as close as possible. Only + * expected to be used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_DMA_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data) + +/** + * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle + * + * Reads or writes the memory from a handle using a kernel mapping. Can be + * used by unit tests to test heap map_kernel functions. Only expected to be + * used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_KERNEL_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) + + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h index 9cd59871adb2af..f0d6f0c382079d 100644 --- a/drivers/staging/bcm/Adapter.h +++ b/drivers/staging/bcm/Adapter.h @@ -378,7 +378,7 @@ struct bcm_mini_adapter { UINT uiFlashLayoutMinorVersion; bool bAllDSDWriteAllow; bool bSigCorrupted; - /* this should be set who so ever want to change the Headers. after Wrtie it should be reset immediately. */ + /* this should be set who so ever want to change the Headers. after Write it should be reset immediately. */ bool bHeaderChangeAllowed; int SelectedChip; bool bEndPointHalted; diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c index 87b74ca84c4215..f1b6de0293c8f6 100644 --- a/drivers/staging/bcm/Bcmchar.c +++ b/drivers/staging/bcm/Bcmchar.c @@ -160,7 +160,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) struct bcm_ioctl_buffer IoBuffer; int bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, + "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", + cmd, arg); if (_IOC_TYPE(cmd) != BCM_IOCTL) return -EFAULT; @@ -266,7 +268,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) (uiTempVar == EEPROM_REJECT_REG_3) || (uiTempVar == EEPROM_REJECT_REG_4))) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "EEPROM Access Denied, not in VSG Mode\n"); return -EFAULT; } @@ -274,9 +277,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) (PUINT)sWrmBuffer.Data, sizeof(ULONG)); if (Status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, "WRM Done\n"); } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, "WRM Failed\n"); Status = -EFAULT; } break; @@ -291,7 +296,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "Device in Idle Mode, Blocking Rdms\n"); return -EACCES; } @@ -317,7 +323,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) || ((ULONG)sRdmBuffer.Register & 0x3)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n", + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "RDM Done On invalid Address : %x Access Denied.\n", (int)sRdmBuffer.Register); kfree(temp_buff); @@ -325,7 +332,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) } uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK; - bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength); + bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, + (PUINT)temp_buff, IoBuffer.OutputLength); if (bytes > 0) { Status = STATUS_SUCCESS; @@ -349,7 +357,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "Device in Idle Mode, Blocking Wrms\n"); return -EACCES; } @@ -367,7 +376,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) || ((ULONG)sWrmBuffer.Register & 0x3)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)sWrmBuffer.Register); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "WRM Done On invalid Address : %x Access Denied.\n", + (int)sWrmBuffer.Register); return -EINVAL; } @@ -379,17 +390,21 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) (uiTempVar == EEPROM_REJECT_REG_4)) && (cmd == IOCTL_BCM_REGISTER_WRITE)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "EEPROM Access Denied, not in VSG Mode\n"); return -EFAULT; } Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register, - (PUINT)sWrmBuffer.Data, sWrmBuffer.Length); + (PUINT)sWrmBuffer.Data, + sWrmBuffer.Length); if (Status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, + DBG_LVL_ALL, "WRM Done\n"); } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, "WRM Failed\n"); Status = -EFAULT; } break; @@ -405,7 +420,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, + "GPIO Can't be set/clear in Low power Mode"); return -EACCES; } @@ -423,7 +440,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) value = (1< is not correspond to LED !!!", value); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, + "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!", + value); Status = -EINVAL; break; } @@ -431,27 +451,42 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) /* Set - setting 1 */ if (uiOperation) { /* Set the gpio output register */ - Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, (PUINT)(&value), sizeof(UINT)); + Status = wrmaltWithLock(Adapter, + BCM_GPIO_OUTPUT_SET_REG, + (PUINT)(&value), sizeof(UINT)); if (Status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + OSAL_DBG, DBG_LVL_ALL, + "Set the GPIO bit\n"); } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to set the %dth GPIO\n", uiBit); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + OSAL_DBG, DBG_LVL_ALL, + "Failed to set the %dth GPIO\n", + uiBit); break; } } else { /* Set the gpio output register */ - Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)(&value), sizeof(UINT)); + Status = wrmaltWithLock(Adapter, + BCM_GPIO_OUTPUT_CLR_REG, + (PUINT)(&value), sizeof(UINT)); if (Status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + OSAL_DBG, DBG_LVL_ALL, + "Set the GPIO bit\n"); } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to clear the %dth GPIO\n", uiBit); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + OSAL_DBG, DBG_LVL_ALL, + "Failed to clear the %dth GPIO\n", + uiBit); break; } } - bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT)); + bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, + (PUINT)ucResetValue, sizeof(UINT)); if (bytes < 0) { Status = bytes; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, @@ -467,9 +502,13 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) (PUINT)ucResetValue, sizeof(UINT)); if (Status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO to output Mode\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, + "Set the GPIO to output Mode\n"); } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to put GPIO in Output Mode\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, + "Failed to put GPIO in Output Mode\n"); break; } } @@ -477,13 +516,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) case BCM_LED_THREAD_STATE_CHANGE_REQ: { struct bcm_user_thread_req threadReq = {0}; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "User made LED thread InActive"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, + "User made LED thread InActive"); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, + "GPIO Can't be set/clear in Low power Mode"); Status = -EACCES; break; } @@ -500,10 +542,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) /* if LED thread is running(Actively or Inactively) set it state to make inactive */ if (Adapter->LEDInfo.led_thread_running) { if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Activating thread req"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + OSAL_DBG, DBG_LVL_ALL, + "Activating thread req"); Adapter->DriverState = LED_THREAD_ACTIVE; } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DeActivating Thread req....."); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + OSAL_DBG, DBG_LVL_ALL, + "DeActivating Thread req....."); Adapter->DriverState = LED_THREAD_INACTIVE; } @@ -540,7 +586,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) if (bytes < 0) { Status = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "RDM Failed\n"); return Status; } else { Status = STATUS_SUCCESS; @@ -570,9 +617,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) return -EFAULT; if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, + DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!", - pgpio_multi_info[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap); + pgpio_multi_info[WIMAX_IDX].uiGPIOMask, + Adapter->gpioBitMap); Status = -EINVAL; break; } @@ -590,7 +639,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) (PUINT)ucResetValue, sizeof(ULONG)); if (Status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_SET_REG Failed."); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "WRM to BCM_GPIO_OUTPUT_SET_REG Failed."); return Status; } @@ -603,7 +653,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue, sizeof(ULONG)); if (Status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed."); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed."); return Status; } } @@ -613,7 +664,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) if (bytes < 0) { Status = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed."); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, + "RDM to GPIO_PIN_STATE_REGISTER Failed."); return Status; } else { Status = STATUS_SUCCESS; @@ -1190,7 +1242,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) break; case IOCTL_BCM_CAL_INIT: { - UINT uiSectorSize = 0 ; + UINT uiSectorSize = 0; if (Adapter->eNVMType == NVM_FLASH) { if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) return -EFAULT; @@ -1403,7 +1455,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) case IOCTL_BCM_FLASH2X_SECTION_READ: { struct bcm_flash2x_readwrite sFlash2xRead = {0}; - PUCHAR pReadBuff = NULL ; + PUCHAR pReadBuff = NULL; UINT NOB = 0; UINT BuffSize = 0; UINT ReadBytes = 0; @@ -1438,7 +1490,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) else BuffSize = NOB; - ReadOffset = sFlash2xRead.offset ; + ReadOffset = sFlash2xRead.offset; OutPutBuff = IoBuffer.OutputBuffer; pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL); @@ -1483,7 +1535,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) NOB = NOB - ReadBytes; if (NOB) { ReadOffset = ReadOffset + ReadBytes; - OutPutBuff = OutPutBuff + ReadBytes ; + OutPutBuff = OutPutBuff + ReadBytes; } } @@ -1538,7 +1590,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) if (NOB > Adapter->uiSectorSize) BuffSize = Adapter->uiSectorSize; else - BuffSize = NOB ; + BuffSize = NOB; pWriteBuff = kmalloc(BuffSize, GFP_KERNEL); @@ -1718,12 +1770,12 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes); if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exixt in Flash ", sCopySectStrut.SrcSection); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exist in Flash ", sCopySectStrut.SrcSection); return -EINVAL; } if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exixt in Flash ", sCopySectStrut.DstSection); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exist in Flash ", sCopySectStrut.DstSection); return -EINVAL; } @@ -1828,7 +1880,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal); if (SectOfset == INVALID_OFFSET) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exixt in Flash 2.x", eFlash2xSectionVal); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exist in Flash 2.x", eFlash2xSectionVal); return -EINVAL; } @@ -1841,10 +1893,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) case IOCTL_BCM_NVM_RAW_READ: { struct bcm_nvm_readwrite stNVMRead; - INT NOB ; - INT BuffSize ; + INT NOB; + INT BuffSize; INT ReadOffset = 0; - UINT ReadBytes = 0 ; + UINT ReadBytes = 0; PUCHAR pReadBuff; void __user *OutPutBuff; diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c index 53fee2f9a49886..8dfdd2732bdc32 100644 --- a/drivers/staging/bcm/Bcmnet.c +++ b/drivers/staging/bcm/Bcmnet.c @@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev) return 0; } -static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { return ClassifyPacket(netdev_priv(dev), skb); } diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c index 9f7e30f637eaff..ed285b2d892daa 100644 --- a/drivers/staging/bcm/DDRInit.c +++ b/drivers/staging/bcm/DDRInit.c @@ -5,882 +5,865 @@ #define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00 #define MIPS_CLOCK_REG 0x0f000820 - //DDR INIT-133Mhz -#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 //index for 0x0F007000 -static struct bcm_ddr_setting asT3_DDRSetting133MHz[]= {// # DPLL Clock Setting - {0x0F000800,0x00007212}, - {0x0f000820,0x07F13FFF}, - {0x0f000810,0x00000F95}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - // Changed source for X-bar and MIPS clock to APLL - {0x0f000840,0x0FFF1B00}, - {0x0f000870,0x00000002}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - {0x0F00a04C,0x0000000C}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01010100}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001 - {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107 - {0x0F007024,0x02000007}, - {0x0F007028,0x02020202}, - {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a - {0x0F007030,0x05000000}, - {0x0F007034,0x00000003}, - {0x0F007038,0x110a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200 - {0x0F00703C,0x02101010},//ROB - 0x02101010,//0x02101018}, - {0x0F007040,0x45751200},//ROB - 0x45751200,//0x450f1200}, - {0x0F007044,0x110a0d00},//ROB - 0x110a0d00//0x111f0d00 - {0x0F007048,0x081b0306}, - {0x0F00704c,0x00000000}, - {0x0F007050,0x0000001c}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x0010246c}, - {0x0F007064,0x00000010}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00007000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - //# Enable BW improvement within memory controller - {0x0F007094,0x00000104}, - //# Enable 2 ports within X-bar - {0x0F00A000,0x00000016}, - //# Enable start bit within memory controller - {0x0F007018,0x01010000} - }; -//80Mhz -#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 //index for 0x0F007000 -static struct bcm_ddr_setting asT3_DDRSetting80MHz[]= {// # DPLL Clock Setting - {0x0f000810,0x00000F95}, - {0x0f000820,0x07f1ffff}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - {0x0F00a000,0x00000016}, - {0x0F00a04C,0x0000000C}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01000000}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020000}, - {0x0F007020,0x04020107}, - {0x0F007024,0x00000007}, - {0x0F007028,0x02020201}, - {0x0F00702c,0x0204040a}, - {0x0F007030,0x04000000}, - {0x0F007034,0x00000002}, - {0x0F007038,0x1F060200}, - {0x0F00703C,0x1C22221F}, - {0x0F007040,0x8A006600}, - {0x0F007044,0x221a0800}, - {0x0F007048,0x02690204}, - {0x0F00704c,0x00000000}, - {0x0F007050,0x0000001c}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x000A15D6}, - {0x0F007064,0x0000000A}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00004000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - {0x0F007094,0x00000104}, - //# Enable start bit within memory controller - {0x0F007018,0x01010000} - }; -//100Mhz -#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 //index for 0x0F007000 -static struct bcm_ddr_setting asT3_DDRSetting100MHz[]= {// # DPLL Clock Setting - {0x0F000800,0x00007008}, - {0x0f000810,0x00000F95}, - {0x0f000820,0x07F13E3F}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - // Changed source for X-bar and MIPS clock to APLL - //0x0f000840,0x0FFF1800, - {0x0f000840,0x0FFF1B00}, - {0x0f000870,0x00000002}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - {0x0F00a04C,0x0000000C}, - //# Enable 2 ports within X-bar - {0x0F00A000,0x00000016}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01010100}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020001}, // POP - 0x00020000 Normal 0x01020000 - {0x0F007020,0x04020107},//Normal - 0x04030107 POP - 0x05030107 - {0x0F007024,0x00000007}, - {0x0F007028,0x01020201}, - {0x0F00702c,0x0204040A}, - {0x0F007030,0x06000000}, - {0x0F007034,0x00000004}, - {0x0F007038,0x20080200}, - {0x0F00703C,0x02030320}, - {0x0F007040,0x6E7F1200}, - {0x0F007044,0x01190A00}, - {0x0F007048,0x06120305},//0x02690204 // 0x06120305 - {0x0F00704c,0x00000000}, - {0x0F007050,0x0000001C}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x00082ED6}, - {0x0F007064,0x0000000A}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00005000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - //# Enable BW improvement within memory controller - {0x0F007094,0x00000104}, - //# Enable start bit within memory controller - {0x0F007018,0x01010000} - }; +/* DDR INIT-133Mhz */ +#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3_DDRSetting133MHz[] = { /* DPLL Clock Setting */ + {0x0F000800, 0x00007212}, + {0x0f000820, 0x07F13FFF}, + {0x0f000810, 0x00000F95}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0f000840, 0x0FFF1B00}, + {0x0f000870, 0x00000002}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + {0x0F00a04C, 0x0000000C}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01010100}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020001}, + {0x0F007020, 0x04030107}, + {0x0F007024, 0x02000007}, + {0x0F007028, 0x02020202}, + {0x0F00702c, 0x0206060a}, + {0x0F007030, 0x05000000}, + {0x0F007034, 0x00000003}, + {0x0F007038, 0x110a0200}, + {0x0F00703C, 0x02101010}, + {0x0F007040, 0x45751200}, + {0x0F007044, 0x110a0d00}, + {0x0F007048, 0x081b0306}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0000001c}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x0010246c}, + {0x0F007064, 0x00000010}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00007000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + /* Enable BW improvement within memory controller */ + {0x0F007094, 0x00000104}, + /* Enable 2 ports within X-bar */ + {0x0F00A000, 0x00000016}, + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} +}; +/* 80Mhz */ +#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3_DDRSetting80MHz[] = { /* DPLL Clock Setting */ + {0x0f000810, 0x00000F95}, + {0x0f000820, 0x07f1ffff}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + {0x0F00a000, 0x00000016}, + {0x0F00a04C, 0x0000000C}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01000000}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020000}, + {0x0F007020, 0x04020107}, + {0x0F007024, 0x00000007}, + {0x0F007028, 0x02020201}, + {0x0F00702c, 0x0204040a}, + {0x0F007030, 0x04000000}, + {0x0F007034, 0x00000002}, + {0x0F007038, 0x1F060200}, + {0x0F00703C, 0x1C22221F}, + {0x0F007040, 0x8A006600}, + {0x0F007044, 0x221a0800}, + {0x0F007048, 0x02690204}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0000001c}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x000A15D6}, + {0x0F007064, 0x0000000A}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00004000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + {0x0F007094, 0x00000104}, + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} +}; +/* 100Mhz */ +#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3_DDRSetting100MHz[] = { /* DPLL Clock Setting */ + {0x0F000800, 0x00007008}, + {0x0f000810, 0x00000F95}, + {0x0f000820, 0x07F13E3F}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0f000840, 0x0FFF1B00}, + {0x0f000870, 0x00000002}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + {0x0F00a04C, 0x0000000C}, + /* Enable 2 ports within X-bar */ + {0x0F00A000, 0x00000016}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01010100}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020001}, + {0x0F007020, 0x04020107}, + {0x0F007024, 0x00000007}, + {0x0F007028, 0x01020201}, + {0x0F00702c, 0x0204040A}, + {0x0F007030, 0x06000000}, + {0x0F007034, 0x00000004}, + {0x0F007038, 0x20080200}, + {0x0F00703C, 0x02030320}, + {0x0F007040, 0x6E7F1200}, + {0x0F007044, 0x01190A00}, + {0x0F007048, 0x06120305}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0000001C}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x00082ED6}, + {0x0F007064, 0x0000000A}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00005000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + /* Enable BW improvement within memory controller */ + {0x0F007094, 0x00000104}, + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} +}; -//Net T3B DDR Settings -//DDR INIT-133Mhz +/* Net T3B DDR Settings + * DDR INIT-133Mhz + */ static struct bcm_ddr_setting asDPLL_266MHZ[] = { - {0x0F000800,0x00007212}, - {0x0f000820,0x07F13FFF}, - {0x0f000810,0x00000F95}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - // Changed source for X-bar and MIPS clock to APLL - {0x0f000840,0x0FFF1B00}, - {0x0f000870,0x00000002} - }; + {0x0F000800, 0x00007212}, + {0x0f000820, 0x07F13FFF}, + {0x0f000810, 0x00000F95}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0f000840, 0x0FFF1B00}, + {0x0f000870, 0x00000002} +}; -#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 //index for 0x0F007000 -static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = {// # DPLL Clock Setting - {0x0f000810,0x00000F95}, - {0x0f000810,0x00000F95}, - {0x0f000810,0x00000F95}, - {0x0f000820,0x07F13652}, - {0x0f000840,0x0FFF0800}, - // Changed source for X-bar and MIPS clock to APLL - {0x0f000880,0x000003DD}, - {0x0f000860,0x00000000}, - // Changed source for X-bar and MIPS clock to APLL - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - //# Enable 2 ports within X-bar - {0x0F00A000,0x00000016}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01010100}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001 - {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107 - {0x0F007024,0x02000007}, - {0x0F007028,0x02020202}, - {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a - {0x0F007030,0x05000000}, - {0x0F007034,0x00000003}, - {0x0F007038,0x130a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200 - {0x0F00703C,0x02101012},//ROB - 0x02101010,//0x02101018}, - {0x0F007040,0x457D1200},//ROB - 0x45751200,//0x450f1200}, - {0x0F007044,0x11130d00},//ROB - 0x110a0d00//0x111f0d00 - {0x0F007048,0x040D0306}, - {0x0F00704c,0x00000000}, - {0x0F007050,0x0000001c}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x0010246c}, - {0x0F007064,0x00000012}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00007000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - //# Enable BW improvement within memory controller - {0x0F007094,0x00000104}, - //# Enable start bit within memory controller - {0x0F007018,0x01010000}, - }; +#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = { /* DPLL Clock Setting */ + {0x0f000810, 0x00000F95}, + {0x0f000810, 0x00000F95}, + {0x0f000810, 0x00000F95}, + {0x0f000820, 0x07F13652}, + {0x0f000840, 0x0FFF0800}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0f000880, 0x000003DD}, + {0x0f000860, 0x00000000}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + /* Enable 2 ports within X-bar */ + {0x0F00A000, 0x00000016}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01010100}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020001}, + {0x0F007020, 0x04030107}, + {0x0F007024, 0x02000007}, + {0x0F007028, 0x02020202}, + {0x0F00702c, 0x0206060a}, + {0x0F007030, 0x05000000}, + {0x0F007034, 0x00000003}, + {0x0F007038, 0x130a0200}, + {0x0F00703C, 0x02101012}, + {0x0F007040, 0x457D1200}, + {0x0F007044, 0x11130d00}, + {0x0F007048, 0x040D0306}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0000001c}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x0010246c}, + {0x0F007064, 0x00000012}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00007000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + /* Enable BW improvement within memory controller */ + {0x0F007094, 0x00000104}, + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000}, + }; -#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 //index for 0x0F007000 -static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = {// # DPLL Clock Setting - {0x0f000810,0x00000F95}, - {0x0f000820,0x07F13FFF}, - {0x0f000840,0x0FFF1F00}, - {0x0f000880,0x000003DD}, - {0x0f000860,0x00000000}, +#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = { /* DPLL Clock Setting */ + {0x0f000810, 0x00000F95}, + {0x0f000820, 0x07F13FFF}, + {0x0f000840, 0x0FFF1F00}, + {0x0f000880, 0x000003DD}, + {0x0f000860, 0x00000000}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - {0x0F00a000,0x00000016}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01000000}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020000}, - {0x0F007020,0x04020107}, - {0x0F007024,0x00000007}, - {0x0F007028,0x02020201}, - {0x0F00702c,0x0204040a}, - {0x0F007030,0x04000000}, - {0x0F007034,0x02000002}, - {0x0F007038,0x1F060202}, - {0x0F00703C,0x1C22221F}, - {0x0F007040,0x8A006600}, - {0x0F007044,0x221a0800}, - {0x0F007048,0x02690204}, - {0x0F00704c,0x00000000}, - {0x0F007050,0x0100001c}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x000A15D6}, - {0x0F007064,0x0000000A}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00004000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - {0x0F007094,0x00000104}, - //# Enable start bit within memory controller - {0x0F007018,0x01010000} - }; + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + {0x0F00a000, 0x00000016}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01000000}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020000}, + {0x0F007020, 0x04020107}, + {0x0F007024, 0x00000007}, + {0x0F007028, 0x02020201}, + {0x0F00702c, 0x0204040a}, + {0x0F007030, 0x04000000}, + {0x0F007034, 0x02000002}, + {0x0F007038, 0x1F060202}, + {0x0F00703C, 0x1C22221F}, + {0x0F007040, 0x8A006600}, + {0x0F007044, 0x221a0800}, + {0x0F007048, 0x02690204}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0100001c}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x000A15D6}, + {0x0F007064, 0x0000000A}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00004000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + {0x0F007094, 0x00000104}, + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} +}; -//100Mhz -#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 //index for 0x0F007000 -static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = {// # DPLL Clock Setting - {0x0f000810,0x00000F95}, - {0x0f000820,0x07F1369B}, - {0x0f000840,0x0FFF0800}, - {0x0f000880,0x000003DD}, - {0x0f000860,0x00000000}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - //# Enable 2 ports within X-bar - {0x0F00A000,0x00000016}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01010100}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020000}, // POP - 0x00020000 Normal 0x01020000 - {0x0F007020,0x04020107},//Normal - 0x04030107 POP - 0x05030107 - {0x0F007024,0x00000007}, - {0x0F007028,0x01020201}, - {0x0F00702c,0x0204040A}, - {0x0F007030,0x06000000}, - {0x0F007034,0x02000004}, - {0x0F007038,0x20080200}, - {0x0F00703C,0x02030320}, - {0x0F007040,0x6E7F1200}, - {0x0F007044,0x01190A00}, - {0x0F007048,0x06120305},//0x02690204 // 0x06120305 - {0x0F00704c,0x00000000}, - {0x0F007050,0x0100001C}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x00082ED6}, - {0x0F007064,0x0000000A}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00005000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - //# Enable BW improvement within memory controller - {0x0F007094,0x00000104}, - //# Enable start bit within memory controller - {0x0F007018,0x01010000} - }; +/* 100Mhz */ +#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = { /* DPLL Clock Setting */ + {0x0f000810, 0x00000F95}, + {0x0f000820, 0x07F1369B}, + {0x0f000840, 0x0FFF0800}, + {0x0f000880, 0x000003DD}, + {0x0f000860, 0x00000000}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + /* Enable 2 ports within X-bar */ + {0x0F00A000, 0x00000016}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01010100}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020000}, + {0x0F007020, 0x04020107}, + {0x0F007024, 0x00000007}, + {0x0F007028, 0x01020201}, + {0x0F00702c, 0x0204040A}, + {0x0F007030, 0x06000000}, + {0x0F007034, 0x02000004}, + {0x0F007038, 0x20080200}, + {0x0F00703C, 0x02030320}, + {0x0F007040, 0x6E7F1200}, + {0x0F007044, 0x01190A00}, + {0x0F007048, 0x06120305}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0100001C}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x00082ED6}, + {0x0F007064, 0x0000000A}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00005000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + /* Enable BW improvement within memory controller */ + {0x0F007094, 0x00000104}, + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} +}; -#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 //index for 0x0F007000 -static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[]= {// # DPLL Clock Setting - {0x0f000820,0x03F1365B}, - {0x0f000810,0x00002F95}, - {0x0f000880,0x000003DD}, - // Changed source for X-bar and MIPS clock to APLL - {0x0f000840,0x0FFF0000}, - {0x0f000860,0x00000000}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - {0x0F00A000,0x00000016}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01010100}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001 - {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107 - {0x0F007024,0x02000007}, - {0x0F007028,0x02020200}, - {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a - {0x0F007030,0x05000000}, - {0x0F007034,0x00000003}, - {0x0F007038,0x200a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200 - {0x0F00703C,0x02101020},//ROB - 0x02101010,//0x02101018, - {0x0F007040,0x45711200},//ROB - 0x45751200,//0x450f1200, - {0x0F007044,0x110D0D00},//ROB - 0x110a0d00//0x111f0d00 - {0x0F007048,0x04080306}, - {0x0F00704c,0x00000000}, - {0x0F007050,0x0100001c}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x0010245F}, - {0x0F007064,0x00000010}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00007000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - {0x0F007088,0x01000001}, - {0x0F00708c,0x00000101}, - {0x0F007090,0x00000000}, - //# Enable BW improvement within memory controller - {0x0F007094,0x00040000}, - {0x0F007098,0x00000000}, - {0x0F0070c8,0x00000104}, - //# Enable 2 ports within X-bar - //# Enable start bit within memory controller - {0x0F007018,0x01010000} +#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[] = { /* DPLL Clock Setting */ + {0x0f000820, 0x03F1365B}, + {0x0f000810, 0x00002F95}, + {0x0f000880, 0x000003DD}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0f000840, 0x0FFF0000}, + {0x0f000860, 0x00000000}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + {0x0F00A000, 0x00000016}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01010100}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020001}, + {0x0F007020, 0x04030107}, + {0x0F007024, 0x02000007}, + {0x0F007028, 0x02020200}, + {0x0F00702c, 0x0206060a}, + {0x0F007030, 0x05000000}, + {0x0F007034, 0x00000003}, + {0x0F007038, 0x200a0200}, + {0x0F00703C, 0x02101020}, + {0x0F007040, 0x45711200}, + {0x0F007044, 0x110D0D00}, + {0x0F007048, 0x04080306}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0100001c}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x0010245F}, + {0x0F007064, 0x00000010}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00007000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + {0x0F007088, 0x01000001}, + {0x0F00708c, 0x00000101}, + {0x0F007090, 0x00000000}, + /* Enable BW improvement within memory controller */ + {0x0F007094, 0x00040000}, + {0x0F007098, 0x00000000}, + {0x0F0070c8, 0x00000104}, + /* Enable 2 ports within X-bar */ + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} }; -#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 //index for 0x0F007000 -static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[]= {// # DPLL Clock Setting - {0x0f000810,0x00002F95}, - {0x0f000820,0x03F1369B}, - {0x0f000840,0x0fff0000}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - // Changed source for X-bar and MIPS clock to APLL - {0x0f000840,0x0FFF0000}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01010100}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020000},// POP - 0x00020001 Normal 0x01020001 - {0x0F007020,0x04020107}, //Normal - 0x04030107 POP - 0x05030107 - {0x0F007024,0x00000007}, - {0x0F007028,0x01020200}, - {0x0F00702c,0x0204040a},//ROB- 0x0205050a,//0x0206060a - {0x0F007030,0x06000000}, - {0x0F007034,0x00000004}, - {0x0F007038,0x1F080200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200 - {0x0F00703C,0x0203031F},//ROB - 0x02101010,//0x02101018, - {0x0F007040,0x6e001200},//ROB - 0x45751200,//0x450f1200, - {0x0F007044,0x011a0a00},//ROB - 0x110a0d00//0x111f0d00 - {0x0F007048,0x03000305}, - {0x0F00704c,0x00000000}, - {0x0F007050,0x0100001c}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x00082ED6}, - {0x0F007064,0x0000000A}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00005000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - {0x0F007088,0x01000001}, - {0x0F00708c,0x00000101}, - {0x0F007090,0x00000000}, - {0x0F007094,0x00010000}, - {0x0F007098,0x00000000}, - {0x0F0070C8,0x00000104}, - //# Enable 2 ports within X-bar - {0x0F00A000,0x00000016}, - //# Enable start bit within memory controller - {0x0F007018,0x01010000} +#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[] = { /* DPLL Clock Setting */ + {0x0f000810, 0x00002F95}, + {0x0f000820, 0x03F1369B}, + {0x0f000840, 0x0fff0000}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0f000840, 0x0FFF0000}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01010100}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020000}, + {0x0F007020, 0x04020107}, + {0x0F007024, 0x00000007}, + {0x0F007028, 0x01020200}, + {0x0F00702c, 0x0204040a}, + {0x0F007030, 0x06000000}, + {0x0F007034, 0x00000004}, + {0x0F007038, 0x1F080200}, + {0x0F00703C, 0x0203031F}, + {0x0F007040, 0x6e001200}, + {0x0F007044, 0x011a0a00}, + {0x0F007048, 0x03000305}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0100001c}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x00082ED6}, + {0x0F007064, 0x0000000A}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00005000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + {0x0F007088, 0x01000001}, + {0x0F00708c, 0x00000101}, + {0x0F007090, 0x00000000}, + {0x0F007094, 0x00010000}, + {0x0F007098, 0x00000000}, + {0x0F0070C8, 0x00000104}, + /* Enable 2 ports within X-bar */ + {0x0F00A000, 0x00000016}, + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} }; -#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 //index for 0x0F007000 -static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[]= {// # DPLL Clock Setting - {0x0f000820,0x07F13FFF}, - {0x0f000810,0x00002F95}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - {0x0f000840,0x0FFF1F00}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0F00a084,0x1Cffffff}, - {0x0F00a080,0x1C000000}, - {0x0F00A000,0x00000016}, - {0x0f007000,0x00010001}, - {0x0f007004,0x01000000}, - {0x0f007008,0x01000001}, - {0x0f00700c,0x00000000}, - {0x0f007010,0x01000000}, - {0x0f007014,0x01000100}, - {0x0f007018,0x01000000}, - {0x0f00701c,0x01020000}, - {0x0f007020,0x04020107}, - {0x0f007024,0x00000007}, - {0x0f007028,0x02020200}, - {0x0f00702c,0x0204040a}, - {0x0f007030,0x04000000}, - {0x0f007034,0x00000002}, - {0x0f007038,0x1d060200}, - {0x0f00703c,0x1c22221d}, - {0x0f007040,0x8A116600}, - {0x0f007044,0x222d0800}, - {0x0f007048,0x02690204}, - {0x0f00704c,0x00000000}, - {0x0f007050,0x0100001c}, - {0x0f007054,0x00000000}, - {0x0f007058,0x00000000}, - {0x0f00705c,0x00000000}, - {0x0f007060,0x000A15D6}, - {0x0f007064,0x0000000A}, - {0x0f007068,0x00000000}, - {0x0f00706c,0x00000001}, - {0x0f007070,0x00004000}, - {0x0f007074,0x00000000}, - {0x0f007078,0x00000000}, - {0x0f00707c,0x00000000}, - {0x0f007080,0x00000000}, - {0x0f007084,0x00000000}, - {0x0f007088,0x01000001}, - {0x0f00708c,0x00000101}, - {0x0f007090,0x00000000}, - {0x0f007094,0x00010000}, - {0x0f007098,0x00000000}, - {0x0F0070C8,0x00000104}, - {0x0F007018,0x01010000} +#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[] = { /* DPLL Clock Setting */ + {0x0f000820, 0x07F13FFF}, + {0x0f000810, 0x00002F95}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + {0x0f000840, 0x0FFF1F00}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0F00a084, 0x1Cffffff}, + {0x0F00a080, 0x1C000000}, + {0x0F00A000, 0x00000016}, + {0x0f007000, 0x00010001}, + {0x0f007004, 0x01000000}, + {0x0f007008, 0x01000001}, + {0x0f00700c, 0x00000000}, + {0x0f007010, 0x01000000}, + {0x0f007014, 0x01000100}, + {0x0f007018, 0x01000000}, + {0x0f00701c, 0x01020000}, + {0x0f007020, 0x04020107}, + {0x0f007024, 0x00000007}, + {0x0f007028, 0x02020200}, + {0x0f00702c, 0x0204040a}, + {0x0f007030, 0x04000000}, + {0x0f007034, 0x00000002}, + {0x0f007038, 0x1d060200}, + {0x0f00703c, 0x1c22221d}, + {0x0f007040, 0x8A116600}, + {0x0f007044, 0x222d0800}, + {0x0f007048, 0x02690204}, + {0x0f00704c, 0x00000000}, + {0x0f007050, 0x0100001c}, + {0x0f007054, 0x00000000}, + {0x0f007058, 0x00000000}, + {0x0f00705c, 0x00000000}, + {0x0f007060, 0x000A15D6}, + {0x0f007064, 0x0000000A}, + {0x0f007068, 0x00000000}, + {0x0f00706c, 0x00000001}, + {0x0f007070, 0x00004000}, + {0x0f007074, 0x00000000}, + {0x0f007078, 0x00000000}, + {0x0f00707c, 0x00000000}, + {0x0f007080, 0x00000000}, + {0x0f007084, 0x00000000}, + {0x0f007088, 0x01000001}, + {0x0f00708c, 0x00000101}, + {0x0f007090, 0x00000000}, + {0x0f007094, 0x00010000}, + {0x0f007098, 0x00000000}, + {0x0F0070C8, 0x00000104}, + {0x0F007018, 0x01010000} }; -///T3 LP-B (UMA-B) +/* T3 LP-B (UMA-B) */ -#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 //index for 0x0F007000 -static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[]= {// # DPLL Clock Setting - - {0x0f000820,0x03F137DB}, - {0x0f000810,0x01842795}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - {0x0f000840,0x0FFF0400}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0f003050,0x00000021},//this is flash/eeprom clock divisor which set the flash clock to 20 MHz - {0x0F00a084,0x1Cffffff},//Now dump from her in internal memory - {0x0F00a080,0x1C000000}, - {0x0F00A000,0x00000016}, - {0x0f007000,0x00010001}, - {0x0f007004,0x01000001}, - {0x0f007008,0x01000101}, - {0x0f00700c,0x00000000}, - {0x0f007010,0x01000100}, - {0x0f007014,0x01000100}, - {0x0f007018,0x01000000}, - {0x0f00701c,0x01020000}, - {0x0f007020,0x04030107}, - {0x0f007024,0x02000007}, - {0x0f007028,0x02020200}, - {0x0f00702c,0x0206060a}, - {0x0f007030,0x050d0d00}, - {0x0f007034,0x00000003}, - {0x0f007038,0x170a0200}, - {0x0f00703c,0x02101012}, - {0x0f007040,0x45161200}, - {0x0f007044,0x11250c00}, - {0x0f007048,0x04da0307}, - {0x0f00704c,0x00000000}, - {0x0f007050,0x0000001c}, - {0x0f007054,0x00000000}, - {0x0f007058,0x00000000}, - {0x0f00705c,0x00000000}, - {0x0f007060,0x00142bb6}, - {0x0f007064,0x20430014}, - {0x0f007068,0x00000000}, - {0x0f00706c,0x00000001}, - {0x0f007070,0x00009000}, - {0x0f007074,0x00000000}, - {0x0f007078,0x00000000}, - {0x0f00707c,0x00000000}, - {0x0f007080,0x00000000}, - {0x0f007084,0x00000000}, - {0x0f007088,0x01000001}, - {0x0f00708c,0x00000101}, - {0x0f007090,0x00000000}, - {0x0f007094,0x00040000}, - {0x0f007098,0x00000000}, - {0x0F0070C8,0x00000104}, - {0x0F007018,0x01010000} +#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[] = { /* DPLL Clock Setting */ + {0x0f000820, 0x03F137DB}, + {0x0f000810, 0x01842795}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + {0x0f000840, 0x0FFF0400}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0f003050, 0x00000021}, /* this is flash/eeprom clock divisor which set the flash clock to 20 MHz */ + {0x0F00a084, 0x1Cffffff}, /* Now dump from her in internal memory */ + {0x0F00a080, 0x1C000000}, + {0x0F00A000, 0x00000016}, + {0x0f007000, 0x00010001}, + {0x0f007004, 0x01000001}, + {0x0f007008, 0x01000101}, + {0x0f00700c, 0x00000000}, + {0x0f007010, 0x01000100}, + {0x0f007014, 0x01000100}, + {0x0f007018, 0x01000000}, + {0x0f00701c, 0x01020000}, + {0x0f007020, 0x04030107}, + {0x0f007024, 0x02000007}, + {0x0f007028, 0x02020200}, + {0x0f00702c, 0x0206060a}, + {0x0f007030, 0x050d0d00}, + {0x0f007034, 0x00000003}, + {0x0f007038, 0x170a0200}, + {0x0f00703c, 0x02101012}, + {0x0f007040, 0x45161200}, + {0x0f007044, 0x11250c00}, + {0x0f007048, 0x04da0307}, + {0x0f00704c, 0x00000000}, + {0x0f007050, 0x0000001c}, + {0x0f007054, 0x00000000}, + {0x0f007058, 0x00000000}, + {0x0f00705c, 0x00000000}, + {0x0f007060, 0x00142bb6}, + {0x0f007064, 0x20430014}, + {0x0f007068, 0x00000000}, + {0x0f00706c, 0x00000001}, + {0x0f007070, 0x00009000}, + {0x0f007074, 0x00000000}, + {0x0f007078, 0x00000000}, + {0x0f00707c, 0x00000000}, + {0x0f007080, 0x00000000}, + {0x0f007084, 0x00000000}, + {0x0f007088, 0x01000001}, + {0x0f00708c, 0x00000101}, + {0x0f007090, 0x00000000}, + {0x0f007094, 0x00040000}, + {0x0f007098, 0x00000000}, + {0x0F0070C8, 0x00000104}, + {0x0F007018, 0x01010000} }; -#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 //index for 0x0F007000 -static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[]= {// # DPLL Clock Setting - {0x0f000820,0x03F1365B}, - {0x0f000810,0x00002F95}, - {0x0f000880,0x000003DD}, - // Changed source for X-bar and MIPS clock to APLL - {0x0f000840,0x0FFF0000}, - {0x0f000860,0x00000000}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz - {0x0F00a084,0x1Cffffff},//dump from here in internal memory - {0x0F00a080,0x1C000000}, - {0x0F00A000,0x00000016}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01010100}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001 - {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107 - {0x0F007024,0x02000007}, - {0x0F007028,0x02020200}, - {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a - {0x0F007030,0x05000000}, - {0x0F007034,0x00000003}, - {0x0F007038,0x190a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200 - {0x0F00703C,0x02101017},//ROB - 0x02101010,//0x02101018, - {0x0F007040,0x45171200},//ROB - 0x45751200,//0x450f1200, - {0x0F007044,0x11290D00},//ROB - 0x110a0d00//0x111f0d00 - {0x0F007048,0x04080306}, - {0x0F00704c,0x00000000}, - {0x0F007050,0x0100001c}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x0010245F}, - {0x0F007064,0x00000010}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00007000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - {0x0F007088,0x01000001}, - {0x0F00708c,0x00000101}, - {0x0F007090,0x00000000}, - //# Enable BW improvement within memory controller - {0x0F007094,0x00040000}, - {0x0F007098,0x00000000}, - {0x0F0070c8,0x00000104}, - //# Enable 2 ports within X-bar - //# Enable start bit within memory controller - {0x0F007018,0x01010000} +#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[] = { /* DPLL Clock Setting */ + {0x0f000820, 0x03F1365B}, + {0x0f000810, 0x00002F95}, + {0x0f000880, 0x000003DD}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0f000840, 0x0FFF0000}, + {0x0f000860, 0x00000000}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */ + {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */ + {0x0F00a080, 0x1C000000}, + {0x0F00A000, 0x00000016}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01010100}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020001}, + {0x0F007020, 0x04030107}, + {0x0F007024, 0x02000007}, + {0x0F007028, 0x02020200}, + {0x0F00702c, 0x0206060a}, + {0x0F007030, 0x05000000}, + {0x0F007034, 0x00000003}, + {0x0F007038, 0x190a0200}, + {0x0F00703C, 0x02101017}, + {0x0F007040, 0x45171200}, + {0x0F007044, 0x11290D00}, + {0x0F007048, 0x04080306}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0100001c}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x0010245F}, + {0x0F007064, 0x00000010}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00007000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + {0x0F007088, 0x01000001}, + {0x0F00708c, 0x00000101}, + {0x0F007090, 0x00000000}, + /* Enable BW improvement within memory controller */ + {0x0F007094, 0x00040000}, + {0x0F007098, 0x00000000}, + {0x0F0070c8, 0x00000104}, + /* Enable 2 ports within X-bar */ + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} }; -#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 //index for 0x0F007000 -static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[]= {// # DPLL Clock Setting - {0x0f000810,0x00002F95}, - {0x0f000820,0x03F1369B}, - {0x0f000840,0x0fff0000}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - // Changed source for X-bar and MIPS clock to APLL - {0x0f000840,0x0FFF0000}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz - {0x0F00a084,0x1Cffffff}, //dump from here in internal memory - {0x0F00a080,0x1C000000}, - //Memcontroller Default values - {0x0F007000,0x00010001}, - {0x0F007004,0x01010100}, - {0x0F007008,0x01000001}, - {0x0F00700c,0x00000000}, - {0x0F007010,0x01000000}, - {0x0F007014,0x01000100}, - {0x0F007018,0x01000000}, - {0x0F00701c,0x01020000},// POP - 0x00020001 Normal 0x01020001 - {0x0F007020,0x04020107}, //Normal - 0x04030107 POP - 0x05030107 - {0x0F007024,0x00000007}, - {0x0F007028,0x01020200}, - {0x0F00702c,0x0204040a},//ROB- 0x0205050a,//0x0206060a - {0x0F007030,0x06000000}, - {0x0F007034,0x00000004}, - {0x0F007038,0x1F080200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200 - {0x0F00703C,0x0203031F},//ROB - 0x02101010,//0x02101018, - {0x0F007040,0x6e001200},//ROB - 0x45751200,//0x450f1200, - {0x0F007044,0x011a0a00},//ROB - 0x110a0d00//0x111f0d00 - {0x0F007048,0x03000305}, - {0x0F00704c,0x00000000}, - {0x0F007050,0x0100001c}, - {0x0F007054,0x00000000}, - {0x0F007058,0x00000000}, - {0x0F00705c,0x00000000}, - {0x0F007060,0x00082ED6}, - {0x0F007064,0x0000000A}, - {0x0F007068,0x00000000}, - {0x0F00706c,0x00000001}, - {0x0F007070,0x00005000}, - {0x0F007074,0x00000000}, - {0x0F007078,0x00000000}, - {0x0F00707C,0x00000000}, - {0x0F007080,0x00000000}, - {0x0F007084,0x00000000}, - {0x0F007088,0x01000001}, - {0x0F00708c,0x00000101}, - {0x0F007090,0x00000000}, - {0x0F007094,0x00010000}, - {0x0F007098,0x00000000}, - {0x0F0070C8,0x00000104}, - //# Enable 2 ports within X-bar - {0x0F00A000,0x00000016}, - //# Enable start bit within memory controller - {0x0F007018,0x01010000} +#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[] = { /* DPLL Clock Setting */ + {0x0f000810, 0x00002F95}, + {0x0f000820, 0x03F1369B}, + {0x0f000840, 0x0fff0000}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + /* Changed source for X-bar and MIPS clock to APLL */ + {0x0f000840, 0x0FFF0000}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */ + {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */ + {0x0F00a080, 0x1C000000}, + /* Memcontroller Default values */ + {0x0F007000, 0x00010001}, + {0x0F007004, 0x01010100}, + {0x0F007008, 0x01000001}, + {0x0F00700c, 0x00000000}, + {0x0F007010, 0x01000000}, + {0x0F007014, 0x01000100}, + {0x0F007018, 0x01000000}, + {0x0F00701c, 0x01020000}, + {0x0F007020, 0x04020107}, + {0x0F007024, 0x00000007}, + {0x0F007028, 0x01020200}, + {0x0F00702c, 0x0204040a}, + {0x0F007030, 0x06000000}, + {0x0F007034, 0x00000004}, + {0x0F007038, 0x1F080200}, + {0x0F00703C, 0x0203031F}, + {0x0F007040, 0x6e001200}, + {0x0F007044, 0x011a0a00}, + {0x0F007048, 0x03000305}, + {0x0F00704c, 0x00000000}, + {0x0F007050, 0x0100001c}, + {0x0F007054, 0x00000000}, + {0x0F007058, 0x00000000}, + {0x0F00705c, 0x00000000}, + {0x0F007060, 0x00082ED6}, + {0x0F007064, 0x0000000A}, + {0x0F007068, 0x00000000}, + {0x0F00706c, 0x00000001}, + {0x0F007070, 0x00005000}, + {0x0F007074, 0x00000000}, + {0x0F007078, 0x00000000}, + {0x0F00707C, 0x00000000}, + {0x0F007080, 0x00000000}, + {0x0F007084, 0x00000000}, + {0x0F007088, 0x01000001}, + {0x0F00708c, 0x00000101}, + {0x0F007090, 0x00000000}, + {0x0F007094, 0x00010000}, + {0x0F007098, 0x00000000}, + {0x0F0070C8, 0x00000104}, + /* Enable 2 ports within X-bar */ + {0x0F00A000, 0x00000016}, + /* Enable start bit within memory controller */ + {0x0F007018, 0x01010000} }; -#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 //index for 0x0F007000 -static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[]= {// # DPLL Clock Setting - {0x0f000820,0x07F13FFF}, - {0x0f000810,0x00002F95}, - {0x0f000860,0x00000000}, - {0x0f000880,0x000003DD}, - {0x0f000840,0x0FFF1F00}, - {0x0F00a044,0x1fffffff}, - {0x0F00a040,0x1f000000}, - {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz - {0x0F00a084,0x1Cffffff},// dump from here in internal memory - {0x0F00a080,0x1C000000}, - {0x0F00A000,0x00000016}, - {0x0f007000,0x00010001}, - {0x0f007004,0x01000000}, - {0x0f007008,0x01000001}, - {0x0f00700c,0x00000000}, - {0x0f007010,0x01000000}, - {0x0f007014,0x01000100}, - {0x0f007018,0x01000000}, - {0x0f00701c,0x01020000}, - {0x0f007020,0x04020107}, - {0x0f007024,0x00000007}, - {0x0f007028,0x02020200}, - {0x0f00702c,0x0204040a}, - {0x0f007030,0x04000000}, - {0x0f007034,0x00000002}, - {0x0f007038,0x1d060200}, - {0x0f00703c,0x1c22221d}, - {0x0f007040,0x8A116600}, - {0x0f007044,0x222d0800}, - {0x0f007048,0x02690204}, - {0x0f00704c,0x00000000}, - {0x0f007050,0x0100001c}, - {0x0f007054,0x00000000}, - {0x0f007058,0x00000000}, - {0x0f00705c,0x00000000}, - {0x0f007060,0x000A15D6}, - {0x0f007064,0x0000000A}, - {0x0f007068,0x00000000}, - {0x0f00706c,0x00000001}, - {0x0f007070,0x00004000}, - {0x0f007074,0x00000000}, - {0x0f007078,0x00000000}, - {0x0f00707c,0x00000000}, - {0x0f007080,0x00000000}, - {0x0f007084,0x00000000}, - {0x0f007088,0x01000001}, - {0x0f00708c,0x00000101}, - {0x0f007090,0x00000000}, - {0x0f007094,0x00010000}, - {0x0f007098,0x00000000}, - {0x0F0070C8,0x00000104}, - {0x0F007018,0x01010000} +#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 /* index for 0x0F007000 */ +static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[] = { /* DPLL Clock Setting */ + {0x0f000820, 0x07F13FFF}, + {0x0f000810, 0x00002F95}, + {0x0f000860, 0x00000000}, + {0x0f000880, 0x000003DD}, + {0x0f000840, 0x0FFF1F00}, + {0x0F00a044, 0x1fffffff}, + {0x0F00a040, 0x1f000000}, + {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */ + {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */ + {0x0F00a080, 0x1C000000}, + {0x0F00A000, 0x00000016}, + {0x0f007000, 0x00010001}, + {0x0f007004, 0x01000000}, + {0x0f007008, 0x01000001}, + {0x0f00700c, 0x00000000}, + {0x0f007010, 0x01000000}, + {0x0f007014, 0x01000100}, + {0x0f007018, 0x01000000}, + {0x0f00701c, 0x01020000}, + {0x0f007020, 0x04020107}, + {0x0f007024, 0x00000007}, + {0x0f007028, 0x02020200}, + {0x0f00702c, 0x0204040a}, + {0x0f007030, 0x04000000}, + {0x0f007034, 0x00000002}, + {0x0f007038, 0x1d060200}, + {0x0f00703c, 0x1c22221d}, + {0x0f007040, 0x8A116600}, + {0x0f007044, 0x222d0800}, + {0x0f007048, 0x02690204}, + {0x0f00704c, 0x00000000}, + {0x0f007050, 0x0100001c}, + {0x0f007054, 0x00000000}, + {0x0f007058, 0x00000000}, + {0x0f00705c, 0x00000000}, + {0x0f007060, 0x000A15D6}, + {0x0f007064, 0x0000000A}, + {0x0f007068, 0x00000000}, + {0x0f00706c, 0x00000001}, + {0x0f007070, 0x00004000}, + {0x0f007074, 0x00000000}, + {0x0f007078, 0x00000000}, + {0x0f00707c, 0x00000000}, + {0x0f007080, 0x00000000}, + {0x0f007084, 0x00000000}, + {0x0f007088, 0x01000001}, + {0x0f00708c, 0x00000101}, + {0x0f007090, 0x00000000}, + {0x0f007094, 0x00010000}, + {0x0f007098, 0x00000000}, + {0x0F0070C8, 0x00000104}, + {0x0F007018, 0x01010000} }; int ddr_init(struct bcm_mini_adapter *Adapter) { - struct bcm_ddr_setting *psDDRSetting=NULL; - ULONG RegCount=0; + struct bcm_ddr_setting *psDDRSetting = NULL; + ULONG RegCount = 0; UINT value = 0; UINT uiResetValue = 0; UINT uiClockSetting = 0; int retval = STATUS_SUCCESS; - switch (Adapter->chip_id) - { + switch (Adapter->chip_id) { case 0xbece3200: - switch (Adapter->DDRSetting) - { - case DDR_80_MHZ: - psDDRSetting=asT3LP_DDRSetting80MHz; - RegCount=(sizeof(asT3LP_DDRSetting80MHz)/ - sizeof(struct bcm_ddr_setting)); - break; - case DDR_100_MHZ: - psDDRSetting=asT3LP_DDRSetting100MHz; - RegCount=(sizeof(asT3LP_DDRSetting100MHz)/ - sizeof(struct bcm_ddr_setting)); - break; - case DDR_133_MHZ: - psDDRSetting=asT3LP_DDRSetting133MHz; - RegCount=(sizeof(asT3LP_DDRSetting133MHz)/ - sizeof(struct bcm_ddr_setting)); - if(Adapter->bMipsConfig == MIPS_200_MHZ) - { - uiClockSetting = 0x03F13652; - } - else - { - uiClockSetting = 0x03F1365B; - } - break; - default: - return -EINVAL; - } + switch (Adapter->DDRSetting) { + case DDR_80_MHZ: + psDDRSetting = asT3LP_DDRSetting80MHz; + RegCount = (sizeof(asT3LP_DDRSetting80MHz)/ + sizeof(struct bcm_ddr_setting)); + break; + case DDR_100_MHZ: + psDDRSetting = asT3LP_DDRSetting100MHz; + RegCount = (sizeof(asT3LP_DDRSetting100MHz)/ + sizeof(struct bcm_ddr_setting)); + break; + case DDR_133_MHZ: + psDDRSetting = asT3LP_DDRSetting133MHz; + RegCount = (sizeof(asT3LP_DDRSetting133MHz)/ + sizeof(struct bcm_ddr_setting)); + if (Adapter->bMipsConfig == MIPS_200_MHZ) + uiClockSetting = 0x03F13652; + else + uiClockSetting = 0x03F1365B; + break; + default: + return -EINVAL; + } break; case T3LPB: case BCS220_2: case BCS220_2BC: case BCS250_BC: - case BCS220_3 : + case BCS220_3: /* Set bit 2 and bit 6 to 1 for BBIC 2mA drive * (please check current value and additionally set these bits) */ - if( (Adapter->chip_id != BCS220_2) && - (Adapter->chip_id != BCS220_2BC) && - (Adapter->chip_id != BCS220_3) ) - { - retval= rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); - return retval; - } - uiResetValue |= 0x44; - retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); - return retval; - } + if ((Adapter->chip_id != BCS220_2) && + (Adapter->chip_id != BCS220_2BC) && + (Adapter->chip_id != BCS220_3)) { + retval = rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue)); + if (retval < 0) { + BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); + return retval; } - switch(Adapter->DDRSetting) - { + uiResetValue |= 0x44; + retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue)); + if (retval < 0) { + BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); + return retval; + } + } + switch (Adapter->DDRSetting) { - case DDR_80_MHZ: - psDDRSetting = asT3LPB_DDRSetting80MHz; - RegCount=(sizeof(asT3B_DDRSetting80MHz)/ - sizeof(struct bcm_ddr_setting)); + case DDR_80_MHZ: + psDDRSetting = asT3LPB_DDRSetting80MHz; + RegCount = (sizeof(asT3B_DDRSetting80MHz)/ + sizeof(struct bcm_ddr_setting)); break; - case DDR_100_MHZ: - psDDRSetting=asT3LPB_DDRSetting100MHz; - RegCount=(sizeof(asT3B_DDRSetting100MHz)/ - sizeof(struct bcm_ddr_setting)); + case DDR_100_MHZ: + psDDRSetting = asT3LPB_DDRSetting100MHz; + RegCount = (sizeof(asT3B_DDRSetting100MHz)/ + sizeof(struct bcm_ddr_setting)); break; - case DDR_133_MHZ: - psDDRSetting = asT3LPB_DDRSetting133MHz; - RegCount=(sizeof(asT3B_DDRSetting133MHz)/ - sizeof(struct bcm_ddr_setting)); + case DDR_133_MHZ: + psDDRSetting = asT3LPB_DDRSetting133MHz; + RegCount = (sizeof(asT3B_DDRSetting133MHz)/ + sizeof(struct bcm_ddr_setting)); - if(Adapter->bMipsConfig == MIPS_200_MHZ) - { - uiClockSetting = 0x03F13652; - } - else - { - uiClockSetting = 0x03F1365B; - } + if (Adapter->bMipsConfig == MIPS_200_MHZ) + uiClockSetting = 0x03F13652; + else + uiClockSetting = 0x03F1365B; break; - case DDR_160_MHZ: - psDDRSetting = asT3LPB_DDRSetting160MHz; - RegCount = sizeof(asT3LPB_DDRSetting160MHz)/sizeof(struct bcm_ddr_setting); + case DDR_160_MHZ: + psDDRSetting = asT3LPB_DDRSetting160MHz; + RegCount = sizeof(asT3LPB_DDRSetting160MHz)/sizeof(struct bcm_ddr_setting); - if(Adapter->bMipsConfig == MIPS_200_MHZ) - { - uiClockSetting = 0x03F137D2; - } - else - { - uiClockSetting = 0x03F137DB; - } - } + if (Adapter->bMipsConfig == MIPS_200_MHZ) + uiClockSetting = 0x03F137D2; + else + uiClockSetting = 0x03F137DB; + } break; case 0xbece0110: @@ -888,68 +871,59 @@ int ddr_init(struct bcm_mini_adapter *Adapter) case 0xbece0121: case 0xbece0130: case 0xbece0300: - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting); - switch (Adapter->DDRSetting) - { - case DDR_80_MHZ: - psDDRSetting = asT3_DDRSetting80MHz; - RegCount = (sizeof(asT3_DDRSetting80MHz)/ - sizeof(struct bcm_ddr_setting)); - break; - case DDR_100_MHZ: - psDDRSetting = asT3_DDRSetting100MHz; - RegCount = (sizeof(asT3_DDRSetting100MHz)/ - sizeof(struct bcm_ddr_setting)); - break; - case DDR_133_MHZ: - psDDRSetting = asT3_DDRSetting133MHz; - RegCount = (sizeof(asT3_DDRSetting133MHz)/ - sizeof(struct bcm_ddr_setting)); - break; - default: - return -EINVAL; - } + BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting); + switch (Adapter->DDRSetting) { + case DDR_80_MHZ: + psDDRSetting = asT3_DDRSetting80MHz; + RegCount = (sizeof(asT3_DDRSetting80MHz)/ + sizeof(struct bcm_ddr_setting)); + break; + case DDR_100_MHZ: + psDDRSetting = asT3_DDRSetting100MHz; + RegCount = (sizeof(asT3_DDRSetting100MHz)/ + sizeof(struct bcm_ddr_setting)); + break; + case DDR_133_MHZ: + psDDRSetting = asT3_DDRSetting133MHz; + RegCount = (sizeof(asT3_DDRSetting133MHz)/ + sizeof(struct bcm_ddr_setting)); + break; + default: + return -EINVAL; + } case 0xbece0310: { - switch (Adapter->DDRSetting) - { - case DDR_80_MHZ: - psDDRSetting = asT3B_DDRSetting80MHz; - RegCount=(sizeof(asT3B_DDRSetting80MHz)/ - sizeof(struct bcm_ddr_setting)); - break; - case DDR_100_MHZ: - psDDRSetting=asT3B_DDRSetting100MHz; - RegCount=(sizeof(asT3B_DDRSetting100MHz)/ - sizeof(struct bcm_ddr_setting)); + switch (Adapter->DDRSetting) { + case DDR_80_MHZ: + psDDRSetting = asT3B_DDRSetting80MHz; + RegCount = (sizeof(asT3B_DDRSetting80MHz)/ + sizeof(struct bcm_ddr_setting)); + break; + case DDR_100_MHZ: + psDDRSetting = asT3B_DDRSetting100MHz; + RegCount = (sizeof(asT3B_DDRSetting100MHz)/ + sizeof(struct bcm_ddr_setting)); break; - case DDR_133_MHZ: + case DDR_133_MHZ: - if(Adapter->bDPLLConfig == PLL_266_MHZ)//266Mhz PLL selected. - { - memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ, - sizeof(asDPLL_266MHZ)); - psDDRSetting = asT3B_DDRSetting133MHz; - RegCount=(sizeof(asT3B_DDRSetting133MHz)/ - sizeof(struct bcm_ddr_setting)); - } + if (Adapter->bDPLLConfig == PLL_266_MHZ) { /* 266Mhz PLL selected. */ + memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ, + sizeof(asDPLL_266MHZ)); + psDDRSetting = asT3B_DDRSetting133MHz; + RegCount = (sizeof(asT3B_DDRSetting133MHz)/ + sizeof(struct bcm_ddr_setting)); + } else { + psDDRSetting = asT3B_DDRSetting133MHz; + RegCount = (sizeof(asT3B_DDRSetting133MHz)/ + sizeof(struct bcm_ddr_setting)); + if (Adapter->bMipsConfig == MIPS_200_MHZ) + uiClockSetting = 0x07F13652; else - { - psDDRSetting = asT3B_DDRSetting133MHz; - RegCount=(sizeof(asT3B_DDRSetting133MHz)/ - sizeof(struct bcm_ddr_setting)); - if(Adapter->bMipsConfig == MIPS_200_MHZ) - { - uiClockSetting = 0x07F13652; - } - else - { - uiClockSetting = 0x07F1365B; - } - } - break; - default: - return -EINVAL; + uiClockSetting = 0x07F1365B; + } + break; + default: + return -EINVAL; } break; @@ -958,20 +932,15 @@ int ddr_init(struct bcm_mini_adapter *Adapter) return -EINVAL; } - value=0; + value = 0; BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register Count is =%lu\n", RegCount); - while(RegCount && !retval) - { - if(uiClockSetting && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG) - { + while (RegCount && !retval) { + if (uiClockSetting && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG) value = uiClockSetting; - } else - { value = psDDRSetting->ulRegValue; - } retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value, sizeof(value)); - if(STATUS_SUCCESS != retval) { + if (STATUS_SUCCESS != retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__); break; } @@ -980,36 +949,34 @@ int ddr_init(struct bcm_mini_adapter *Adapter) psDDRSetting++; } - if(Adapter->chip_id >= 0xbece3300 ) - { + if (Adapter->chip_id >= 0xbece3300) { mdelay(3); - if( (Adapter->chip_id != BCS220_2)&& - (Adapter->chip_id != BCS220_2BC)&& - (Adapter->chip_id != BCS220_3)) - { + if ((Adapter->chip_id != BCS220_2) && + (Adapter->chip_id != BCS220_2BC) && + (Adapter->chip_id != BCS220_3)) { /* drive MDDR to half in case of UMA-B: */ uiResetValue = 0x01010001; retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } uiResetValue = 0x00040020; retval = wrmalt(Adapter, (UINT)0x0F007094, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } uiResetValue = 0x01020101; retval = wrmalt(Adapter, (UINT)0x0F00701c, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } uiResetValue = 0x01010000; retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } @@ -1020,75 +987,72 @@ int ddr_init(struct bcm_mini_adapter *Adapter) * This is to be done only for Hybrid PMU mode. * with the current h/w there is no way to detect this. * and since we dont have internal PMU lets do it under UMA-B chip id. - * we will change this when we will have internal PMU. - */ - if(Adapter->PmuMode == HYBRID_MODE_7C) - { + * we will change this when we will have internal PMU. + */ + if (Adapter->PmuMode == HYBRID_MODE_7C) { retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } uiResetValue = 0x1322a8; retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } uiResetValue = 0x132296; retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } - } - else if(Adapter->PmuMode == HYBRID_MODE_6 ) - { + } else if (Adapter->PmuMode == HYBRID_MODE_6) { retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } uiResetValue = 0x6003229a; retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } uiResetValue = 0x1322a8; retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue)); - if(retval < 0) { + if (retval < 0) { BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__); return retval; } @@ -1101,179 +1065,167 @@ int ddr_init(struct bcm_mini_adapter *Adapter) int download_ddr_settings(struct bcm_mini_adapter *Adapter) { - struct bcm_ddr_setting *psDDRSetting=NULL; - ULONG RegCount=0; + struct bcm_ddr_setting *psDDRSetting = NULL; + ULONG RegCount = 0; unsigned long ul_ddr_setting_load_addr = DDR_DUMP_INTERNAL_DEVICE_MEMORY; UINT value = 0; int retval = STATUS_SUCCESS; bool bOverrideSelfRefresh = false; - switch (Adapter->chip_id) - { + switch (Adapter->chip_id) { case 0xbece3200: - switch (Adapter->DDRSetting) - { - case DDR_80_MHZ: - psDDRSetting = asT3LP_DDRSetting80MHz; - RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz); - RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ; - psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + switch (Adapter->DDRSetting) { + case DDR_80_MHZ: + psDDRSetting = asT3LP_DDRSetting80MHz; + RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz); + RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + break; + case DDR_100_MHZ: + psDDRSetting = asT3LP_DDRSetting100MHz; + RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz); + RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; + psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; + break; + case DDR_133_MHZ: + bOverrideSelfRefresh = TRUE; + psDDRSetting = asT3LP_DDRSetting133MHz; + RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz); + RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; + psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; break; - case DDR_100_MHZ: - psDDRSetting = asT3LP_DDRSetting100MHz; - RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz); - RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ; - psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - break; - case DDR_133_MHZ: - bOverrideSelfRefresh = TRUE; - psDDRSetting = asT3LP_DDRSetting133MHz; - RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz); - RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ; - psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - break; default: return -EINVAL; - } + } break; case T3LPB: case BCS220_2: case BCS220_2BC: case BCS250_BC: - case BCS220_3 : - switch (Adapter->DDRSetting) - { - case DDR_80_MHZ: - psDDRSetting = asT3LPB_DDRSetting80MHz; - RegCount=ARRAY_SIZE(asT3LPB_DDRSetting80MHz); - RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ; - psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + case BCS220_3: + switch (Adapter->DDRSetting) { + case DDR_80_MHZ: + psDDRSetting = asT3LPB_DDRSetting80MHz; + RegCount = ARRAY_SIZE(asT3LPB_DDRSetting80MHz); + RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + break; + case DDR_100_MHZ: + psDDRSetting = asT3LPB_DDRSetting100MHz; + RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz); + RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; + psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; + break; + case DDR_133_MHZ: + bOverrideSelfRefresh = TRUE; + psDDRSetting = asT3LPB_DDRSetting133MHz; + RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz); + RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; + psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; break; - case DDR_100_MHZ: - psDDRSetting = asT3LPB_DDRSetting100MHz; - RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz); - RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ; - psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - break; - case DDR_133_MHZ: - bOverrideSelfRefresh = TRUE; - psDDRSetting = asT3LPB_DDRSetting133MHz; - RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz); - RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ; - psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - break; - case DDR_160_MHZ: - bOverrideSelfRefresh = TRUE; - psDDRSetting = asT3LPB_DDRSetting160MHz; - RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz); - RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ; - psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ; + case DDR_160_MHZ: + bOverrideSelfRefresh = TRUE; + psDDRSetting = asT3LPB_DDRSetting160MHz; + RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz); + RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ; + psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ; - break; - default: - return -EINVAL; - } + break; + default: + return -EINVAL; + } break; case 0xbece0300: - switch (Adapter->DDRSetting) - { - case DDR_80_MHZ: - psDDRSetting = asT3_DDRSetting80MHz; - RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz); - RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ; - psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + switch (Adapter->DDRSetting) { + case DDR_80_MHZ: + psDDRSetting = asT3_DDRSetting80MHz; + RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz); + RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; break; - case DDR_100_MHZ: - psDDRSetting = asT3_DDRSetting100MHz; - RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz); - RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ; - psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - break; - case DDR_133_MHZ: - psDDRSetting = asT3_DDRSetting133MHz; - RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz); - RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ; - psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ; - break; - default: - return -EINVAL; - } + case DDR_100_MHZ: + psDDRSetting = asT3_DDRSetting100MHz; + RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz); + RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; + psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; + break; + case DDR_133_MHZ: + psDDRSetting = asT3_DDRSetting133MHz; + RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz); + RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; + psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; + break; + default: + return -EINVAL; + } break; case 0xbece0310: { - switch (Adapter->DDRSetting) - { - case DDR_80_MHZ: - psDDRSetting = asT3B_DDRSetting80MHz; - RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz); - RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ; - psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - break; - case DDR_100_MHZ: - psDDRSetting = asT3B_DDRSetting100MHz; - RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz); - RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ; - psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - break; - case DDR_133_MHZ: - bOverrideSelfRefresh = TRUE; - psDDRSetting = asT3B_DDRSetting133MHz; - RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz); - RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ; - psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - break; - } - break; + switch (Adapter->DDRSetting) { + case DDR_80_MHZ: + psDDRSetting = asT3B_DDRSetting80MHz; + RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz); + RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; + break; + case DDR_100_MHZ: + psDDRSetting = asT3B_DDRSetting100MHz; + RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz); + RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; + psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; + break; + case DDR_133_MHZ: + bOverrideSelfRefresh = TRUE; + psDDRSetting = asT3B_DDRSetting133MHz; + RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz); + RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; + psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; + break; + } + break; } default: return -EINVAL; } - //total number of Register that has to be dumped - value =RegCount ; + /* total number of Register that has to be dumped */ + value = RegCount; retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value)); - if(retval) - { + if (retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__); return retval; } ul_ddr_setting_load_addr += sizeof(ULONG); - /*signature */ - value =(0x1d1e0dd0); + /* signature */ + value = (0x1d1e0dd0); retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value)); - if(retval) - { + if (retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__); return retval; } ul_ddr_setting_load_addr += sizeof(ULONG); - RegCount*=(sizeof(struct bcm_ddr_setting)/sizeof(ULONG)); + RegCount *= (sizeof(struct bcm_ddr_setting)/sizeof(ULONG)); - while(RegCount && !retval) - { - value = psDDRSetting->ulRegAddress ; - retval = wrmalt( Adapter, ul_ddr_setting_load_addr, &value, sizeof(value)); + while (RegCount && !retval) { + value = psDDRSetting->ulRegAddress; + retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value)); ul_ddr_setting_load_addr += sizeof(ULONG); - if(!retval) - { - if(bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018)) - { + if (!retval) { + if (bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018)) { value = (psDDRSetting->ulRegValue |(1<<8)); - if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr, - &value, sizeof(value))){ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__); - break; - } + if (STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr, + &value, sizeof(value))) { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__); + break; } - else - { + } else { value = psDDRSetting->ulRegValue; - if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr , - &value, sizeof(value))){ + if (STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr , + &value, sizeof(value))) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__); break; } @@ -1283,7 +1235,5 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter) RegCount--; psDDRSetting++; } - return retval; + return retval; } - - diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c index 463bdee9dfcafb..005e4607b26007 100644 --- a/drivers/staging/bcm/InterfaceDld.c +++ b/drivers/staging/bcm/InterfaceDld.c @@ -20,18 +20,10 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc) MAX_TRANSFER_CTRL_BYTE_USB, &pos); set_fs(oldfs); if (len <= 0) { - if (len < 0) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_INITEXIT, MP_INIT, - DBG_LVL_ALL, "len < 0"); + if (len < 0) errno = len; - } else { + else errno = 0; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_INITEXIT, MP_INIT, - DBG_LVL_ALL, - "Got end of file!"); - } break; } /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT, @@ -39,12 +31,8 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc) * MAX_TRANSFER_CTRL_BYTE_USB); */ errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len); - if (errno) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_PRINTK, 0, 0, - "WRM Failed! status: %d", errno); + if (errno) break; - } on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB; } @@ -52,7 +40,8 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc) return errno; } -int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_chip_loc) +int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, + unsigned int on_chip_loc) { char *buff, *buff_readback; unsigned int reg = 0; @@ -80,32 +69,28 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c while (1) { oldfs = get_fs(); set_fs(get_ds()); - len = vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos); + len = vfs_read(flp, (void __force __user *)buff, + MAX_TRANSFER_CTRL_BYTE_USB, &pos); set_fs(oldfs); fw_down++; if (len <= 0) { - if (len < 0) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0"); + if (len < 0) errno = len; - } else { + else errno = 0; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!"); - } break; } - bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len); + bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, + buff_readback, len); if (bytes < 0) { Status = bytes; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg); goto exit; } reg++; if ((len-sizeof(unsigned int)) < 4) { if (memcmp(buff_readback, buff, len)) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down); - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Length is: %d", len); Status = -EIO; goto exit; } @@ -113,10 +98,8 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c len -= 4; while (len) { - if (*(unsigned int *)&buff_readback[len] != *(unsigned int *)&buff[len]) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down); - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&buff_readback[len]); - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len); + if (*(unsigned int *)&buff_readback[len] != + *(unsigned int *)&buff[len]) { Status = -EIO; goto exit; } @@ -132,13 +115,15 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c return Status; } -static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo) +static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, + struct bcm_firmware_info *psFwInfo) { int retval = STATUS_SUCCESS; B_UINT32 value = 0; if (Adapter->pstargetparams == NULL) { - Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL); + Adapter->pstargetparams = + kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL); if (Adapter->pstargetparams == NULL) return -ENOMEM; } @@ -146,7 +131,9 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm if (psFwInfo->u32FirmwareLength != sizeof(struct bcm_target_params)) return -EIO; - retval = copy_from_user(Adapter->pstargetparams, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength); + retval = copy_from_user(Adapter->pstargetparams, + psFwInfo->pvMappedFirmwareAddress, + psFwInfo->u32FirmwareLength); if (retval) { kfree(Adapter->pstargetparams); Adapter->pstargetparams = NULL; @@ -160,52 +147,54 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm BcmInitNVM(Adapter); retval = InitLedSettings(Adapter); - if (retval) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "INIT LED Failed\n"); + if (retval) return retval; - } - if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { + if (Adapter->LEDInfo.led_thread_running & + BCM_LED_THREAD_RUNNING_ACTIVELY) { Adapter->LEDInfo.bLedInitDone = false; Adapter->DriverState = DRIVER_INIT; wake_up(&Adapter->LEDInfo.notify_led_event); } - if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { + if (Adapter->LEDInfo.led_thread_running & + BCM_LED_THREAD_RUNNING_ACTIVELY) { Adapter->DriverState = FW_DOWNLOAD; wake_up(&Adapter->LEDInfo.notify_led_event); } /* Initialize the DDR Controller */ retval = ddr_init(Adapter); - if (retval) { - BCM_DEBUG_PRINT (Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Init Failed\n"); + if (retval) return retval; - } value = 0; - wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value)); - wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value)); + wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, + &value, sizeof(value)); + wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, + &value, sizeof(value)); if (Adapter->eNVMType == NVM_FLASH) { retval = PropagateCalParamsFromFlashToMemory(Adapter); - if (retval) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "propagaion of cal param failed with status :%d", retval); + if (retval) return retval; - } } - retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR); + retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, + sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR); if (retval) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "configuration file not downloaded properly"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, + MP_INIT, DBG_LVL_ALL, + "configuration file not downloaded properly"); else Adapter->bCfgDownloaded = TRUE; return retval; } -int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo) +int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, + struct bcm_firmware_info *psFwInfo) { int retval = STATUS_SUCCESS; PUCHAR buff = NULL; @@ -215,9 +204,9 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_ * Application */ atomic_set(&Adapter->uiMBupdate, false); - if (!Adapter->bCfgDownloaded && psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) { + if (!Adapter->bCfgDownloaded && + psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) { /* Can't Download Firmware. */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Download the config File first\n"); return -EINVAL; } @@ -226,14 +215,13 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_ retval = bcm_download_config_file(Adapter, psFwInfo); } else { buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL); - if (buff == NULL) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed in allocation memory"); + if (buff == NULL) return -ENOMEM; - } - retval = copy_from_user(buff, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength); + retval = copy_from_user(buff, + psFwInfo->pvMappedFirmwareAddress, + psFwInfo->u32FirmwareLength); if (retval != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copying buffer from user space failed"); retval = -EFAULT; goto error; } @@ -243,10 +231,8 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_ psFwInfo->u32FirmwareLength, psFwInfo->u32StartingAddress); - if (retval != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "f/w download failed status :%d", retval); + if (retval != STATUS_SUCCESS) goto error; - } } error: @@ -254,7 +240,9 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_ return retval; } -static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress) +static INT buffDnld(struct bcm_mini_adapter *Adapter, + PUCHAR mappedbuffer, UINT u32FirmwareLength, + ULONG u32StartingAddress) { unsigned int len = 0; int retval = STATUS_SUCCESS; @@ -264,10 +252,8 @@ static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB); retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len); - if (retval) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed with status :%d", retval); + if (retval) break; - } u32StartingAddress += len; u32FirmwareLength -= len; mappedbuffer += len; @@ -275,17 +261,17 @@ static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT return retval; } -static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress) +static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, + PUCHAR mappedbuffer, UINT u32FirmwareLength, + ULONG u32StartingAddress) { UINT len = u32FirmwareLength; INT retval = STATUS_SUCCESS; PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL); int bytes; - if (NULL == readbackbuff) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED"); + if (NULL == readbackbuff) return -ENOMEM; - } while (u32FirmwareLength && !retval) { len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB); @@ -293,7 +279,6 @@ static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, if (bytes < 0) { retval = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval); break; } @@ -312,21 +297,22 @@ static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, return retval; } -INT buffDnldVerify(struct bcm_mini_adapter *Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength, unsigned long u32StartingAddress) +INT buffDnldVerify(struct bcm_mini_adapter *Adapter, + unsigned char *mappedbuffer, + unsigned int u32FirmwareLength, + unsigned long u32StartingAddress) { INT status = STATUS_SUCCESS; - status = buffDnld(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress); - if (status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer download failed"); + status = buffDnld(Adapter, mappedbuffer, + u32FirmwareLength, u32StartingAddress); + if (status != STATUS_SUCCESS) goto error; - } - status = buffRdbkVerify(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress); - if (status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer readback verifier failed"); + status = buffRdbkVerify(Adapter, mappedbuffer, + u32FirmwareLength, u32StartingAddress); + if (status != STATUS_SUCCESS) goto error; - } error: return status; } diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c index 5959fbdcd1bef3..fecf81ffe06667 100644 --- a/drivers/staging/bcm/InterfaceIdleMode.c +++ b/drivers/staging/bcm/InterfaceIdleMode.c @@ -1,32 +1,37 @@ #include "headers.h" /* -Function: InterfaceIdleModeWakeup +Function: InterfaceIdleModeWakeup -Description: This is the hardware specific Function for waking up HW device from Idle mode. - A software abort pattern is written to the device to wake it and necessary power state - transitions from host are performed here. +Description: This is the hardware specific Function for + waking up HW device from Idle mode. + A software abort pattern is written to the + device to wake it and necessary power state + transitions from host are performed here. -Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context +Input parameters: IN struct bcm_mini_adapter *Adapter + - Miniport Adapter Context - -Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface was successful. - Other - If an error occurred. +Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface + was successful. + Other - If an error occurred. */ - /* -Function: InterfaceIdleModeRespond +Function: InterfaceIdleModeRespond -Description: This is the hardware specific Function for responding to Idle mode request from target. - Necessary power state transitions from host for idle mode or other device specific - initializations are performed here. +Description: This is the hardware specific Function for + responding to Idle mode request from target. + Necessary power state transitions from host for + idle mode or other device specific initializations + are performed here. -Input parameters: IN struct bcm_mini_adapter * Adapter - Miniport Adapter Context +Input parameters: IN struct bcm_mini_adapter * Adapter + - Miniport Adapter Context - -Return: BCM_STATUS_SUCCESS - If Idle mode response related HW configuration was successful. - Other - If an error occurred. +Return: BCM_STATUS_SUCCESS - If Idle mode response related + HW configuration was successful. + Other - If an error occurred. */ /* @@ -36,59 +41,59 @@ this value will be at address bfc02fa4.just before value d0ea1dle. Set time value by writing at bfc02f98 7d0 checking the Ack timer expire on kannon by running command -d qcslog .. if it shows e means host has not send response to f/w with in 200 ms. Response should be +d qcslog .. if it shows e means host has not send response +to f/w with in 200 ms. Response should be send to f/w with in 200 ms after the Idle/Shutdown req issued */ -int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int *puiBuffer) +int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, + unsigned int *puiBuffer) { int status = STATUS_SUCCESS; unsigned int uiRegRead = 0; int bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "SubType of Message :0x%X", ntohl(*puiBuffer)); - if (ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, " Got GO_TO_IDLE_MODE_PAYLOAD(210) Msg Subtype"); - if (ntohl(*(puiBuffer+1)) == 0 ) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got IDLE MODE WAKE UP Response From F/W"); + if (ntohl(*(puiBuffer+1)) == 0) { - status = wrmalt (Adapter, SW_ABORT_IDLEMODE_LOC, &uiRegRead, sizeof(uiRegRead)); - if (status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg"); + status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, + &uiRegRead, sizeof(uiRegRead)); + if (status) return status; - } - if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { - uiRegRead = 0x00000000 ; - status = wrmalt (Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegRead, sizeof(uiRegRead)); - if (status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg"); + if (Adapter->ulPowerSaveMode == + DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { + uiRegRead = 0x00000000; + status = wrmalt(Adapter, + DEBUG_INTERRUPT_GENERATOR_REGISTOR, + &uiRegRead, sizeof(uiRegRead)); + if (status) return status; - } } - /* Below Register should not br read in case of Manual and Protocol Idle mode */ - else if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) { + /* Below Register should not br read in case of + * Manual and Protocol Idle mode */ + else if (Adapter->ulPowerSaveMode != + DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) { /* clear on read Register */ - bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead)); + bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, + &uiRegRead, sizeof(uiRegRead)); if (bytes < 0) { status = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg0"); return status; } /* clear on read Register */ - bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead)); + bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, + &uiRegRead, sizeof(uiRegRead)); if (bytes < 0) { status = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg1"); return status; } } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device Up from Idle Mode"); - /* Set Idle Mode Flag to False and Clear IdleMode reg. */ + /* Set Idle Mode Flag to False and + * Clear IdleMode reg. */ Adapter->IdleMode = false; Adapter->bTriedToWakeUpFromlowPowerMode = false; @@ -96,124 +101,123 @@ int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int *pui } else { if (TRUE == Adapter->IdleMode) - { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device is already in Idle mode...."); - return status ; - } + return status; uiRegRead = 0; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got Req from F/W to go in IDLE mode \n"); if (Adapter->chip_id == BCS220_2 || Adapter->chip_id == BCS220_2BC || Adapter->chip_id == BCS250_BC || Adapter->chip_id == BCS220_3) { - bytes = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead)); + bytes = rdmalt(Adapter, HPM_CONFIG_MSW, + &uiRegRead, sizeof(uiRegRead)); if (bytes < 0) { status = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "rdm failed while Reading HPM_CONFIG_LDO145 Reg 0\n"); return status; } uiRegRead |= (1<<17); - status = wrmalt (Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead)); - if (status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg\n"); + status = wrmalt(Adapter, HPM_CONFIG_MSW, + &uiRegRead, sizeof(uiRegRead)); + if (status) return status; - } - } SendIdleModeResponse(Adapter); } } else if (ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "OverRiding Service Flow Params"); OverrideServiceFlowParams(Adapter, puiBuffer); } return status; } -static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter, unsigned int Pattern) +static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter, + unsigned int Pattern) { - int status = STATUS_SUCCESS; + int status = STATUS_SUCCESS; unsigned int value; - unsigned int chip_id ; + unsigned int chip_id; unsigned long timeout = 0, itr = 0; - int lenwritten = 0; - unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; - struct bcm_interface_adapter *psInterfaceAdapter = Adapter->pvInterfaceAdapter; + int lenwritten = 0; + unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF}; + struct bcm_interface_adapter *psInterfaceAdapter = + Adapter->pvInterfaceAdapter; /* Abort Bus suspend if its already suspended */ - if ((TRUE == psInterfaceAdapter->bSuspended) && (TRUE == Adapter->bDoSuspend)) { - status = usb_autopm_get_interface(psInterfaceAdapter->interface); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Bus got wakeup..Aborting Idle mode... status:%d \n", status); - - } - - if ((Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) - || - (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) { + if ((TRUE == psInterfaceAdapter->bSuspended) && + (TRUE == Adapter->bDoSuspend)) + status = usb_autopm_get_interface( + psInterfaceAdapter->interface); + + if ((Adapter->ulPowerSaveMode == + DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) || + (Adapter->ulPowerSaveMode == + DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) { /* write the SW abort pattern. */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing pattern<%d> to SW_ABORT_IDLEMODE_LOC\n", Pattern); - status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(Pattern)); - if (status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "WRM to Register SW_ABORT_IDLEMODE_LOC failed.."); - return status; - } + status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, + &Pattern, sizeof(Pattern)); + if (status) + return status; } - if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { + if (Adapter->ulPowerSaveMode == + DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { value = 0x80000000; - status = wrmalt(Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &value, sizeof(value)); + status = wrmalt(Adapter, + DEBUG_INTERRUPT_GENERATOR_REGISTOR, + &value, sizeof(value)); if (status) - { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Register failed"); return status; - } - } else if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) { + } else if (Adapter->ulPowerSaveMode != + DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) { /* * Get a Interrupt Out URB and send 8 Bytes Down * To be Done in Thread Context. * Not using Asynchronous Mechanism. */ - status = usb_interrupt_msg (psInterfaceAdapter->udev, + status = usb_interrupt_msg(psInterfaceAdapter->udev, usb_sndintpipe(psInterfaceAdapter->udev, psInterfaceAdapter->sIntrOut.int_out_endpointAddr), aucAbortPattern, 8, &lenwritten, 5000); - if (status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Sending Abort pattern down fails with status:%d..\n", status); + if (status) return status; - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "NOB Sent down :%d", lenwritten); - } + else + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + IDLE_MODE, DBG_LVL_ALL, + "NOB Sent down :%d", lenwritten); /* mdelay(25); */ - timeout = jiffies + msecs_to_jiffies(50) ; - while ( timeout > jiffies ) { - itr++ ; + timeout = jiffies + msecs_to_jiffies(50); + while (time_after(timeout, jiffies)) { + itr++; rdmalt(Adapter, CHIP_ID_REG, &chip_id, sizeof(UINT)); if (0xbece3200 == (chip_id&~(0xF0))) chip_id = chip_id&~(0xF0); if (chip_id == Adapter->chip_id) break; } - if (timeout < jiffies ) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Not able to read chip-id even after 25 msec"); + if (time_before(timeout, jiffies)) + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + IDLE_MODE, DBG_LVL_ALL, + "Not able to read chip-id even after 25 msec"); else - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Number of completed iteration to read chip-id :%lu", itr); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + IDLE_MODE, DBG_LVL_ALL, + "Number of completed iteration to" + "read chip-id :%lu", itr); - status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(status)); - if (status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to Register SW_ABORT_IDLEMODE_LOC failed.."); + status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, + &Pattern, sizeof(status)); + if (status) return status; - } } return status; } @@ -221,9 +225,10 @@ int InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter) { ULONG Status = 0; if (Adapter->bTriedToWakeUpFromlowPowerMode) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Wake up already attempted.. ignoring\n"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, + IDLE_MODE, DBG_LVL_ALL, + "Wake up already attempted.. ignoring\n"); } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing Low Power Mode Abort pattern to the Device\n"); Adapter->bTriedToWakeUpFromlowPowerMode = TRUE; InterfaceAbortIdlemode(Adapter, Adapter->usIdleModePattern); @@ -237,30 +242,33 @@ void InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter) INT Status = 0; int bytes; - if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { + if (Adapter->ulPowerSaveMode == + DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { /* clear idlemode interrupt. */ uiRegVal = 0; - Status = wrmalt(Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegVal, sizeof(uiRegVal)); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,"WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Failed with err :%d", Status); + Status = wrmalt(Adapter, + DEBUG_INTERRUPT_GENERATOR_REGISTOR, + &uiRegVal, sizeof(uiRegVal)); + if (Status) return; - } } - else { + else { - /* clear Interrupt EP registers. */ - bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal)); +/* clear Interrupt EP registers. */ + bytes = rdmalt(Adapter, + DEVICE_INT_OUT_EP_REG0, + &uiRegVal, sizeof(uiRegVal)); if (bytes < 0) { Status = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM of DEVICE_INT_OUT_EP_REG0 failed with Err :%d", Status); return; } - bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal)); + bytes = rdmalt(Adapter, + DEVICE_INT_OUT_EP_REG1, + &uiRegVal, sizeof(uiRegVal)); if (bytes < 0) { Status = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM of DEVICE_INT_OUT_EP_REG1 failed with Err :%d", Status); return; } } diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c index 3acdb58a10f58d..94f32728f7c8c3 100644 --- a/drivers/staging/bcm/InterfaceInit.c +++ b/drivers/staging/bcm/InterfaceInit.c @@ -69,7 +69,7 @@ static void InterfaceAdapterFree(struct bcm_interface_adapter *psIntfAdapter) static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter) { - unsigned long ulReg = 0; + u32 ulReg; int bytes; /* Program EP2 MAX_PKT_SIZE */ @@ -96,7 +96,7 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE); /* Program TX EP as interrupt(Alternate Setting) */ - bytes = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32)); + bytes = rdmalt(Adapter, 0x0F0110F8, &ulReg, sizeof(u32)); if (bytes < 0) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "reading of Tx EP failed\n"); @@ -119,18 +119,18 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter * Update EEPROM Version. * Read 4 bytes from 508 and modify 511 and 510. */ - ReadBeceemEEPROM(Adapter, 0x1FC, (PUINT)&ulReg); + ReadBeceemEEPROM(Adapter, 0x1FC, &ulReg); ulReg &= 0x0101FFFF; BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1FC, 4, TRUE); /* Update length field if required. Also make the string NULL terminated. */ - ReadBeceemEEPROM(Adapter, 0xA8, (PUINT)&ulReg); + ReadBeceemEEPROM(Adapter, 0xA8, &ulReg); if ((ulReg&0x00FF0000)>>16 > 0x30) { ulReg = (ulReg&0xFF00FFFF)|(0x30<<16); BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0xA8, 4, TRUE); } - ReadBeceemEEPROM(Adapter, 0x148, (PUINT)&ulReg); + ReadBeceemEEPROM(Adapter, 0x148, &ulReg); if ((ulReg&0x00FF0000)>>16 > 0x30) { ulReg = (ulReg&0xFF00FFFF)|(0x30<<16); BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x148, 4, TRUE); diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c index f2973f5e503a45..11008173f915e0 100644 --- a/drivers/staging/bcm/InterfaceRx.c +++ b/drivers/staging/bcm/InterfaceRx.c @@ -1,11 +1,11 @@ #include "headers.h" -static int SearchVcid(struct bcm_mini_adapter *Adapter,unsigned short usVcid) +static int SearchVcid(struct bcm_mini_adapter *Adapter, unsigned short usVcid) { - int iIndex=0; + int iIndex = 0; - for(iIndex=(NO_OF_QUEUES-1);iIndex>=0;iIndex--) - if(Adapter->PackInfo[iIndex].usVCID_Value == usVcid) + for (iIndex = (NO_OF_QUEUES-1); iIndex >= 0; iIndex--) + if (Adapter->PackInfo[iIndex].usVCID_Value == usVcid) return iIndex; return NO_OF_QUEUES+1; @@ -18,15 +18,14 @@ GetBulkInRcb(struct bcm_interface_adapter *psIntfAdapter) struct bcm_usb_rcb *pRcb = NULL; UINT index = 0; - if((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) && - (psIntfAdapter->psAdapter->StopAllXaction == false)) - { + if ((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) && + (psIntfAdapter->psAdapter->StopAllXaction == false)) { index = atomic_read(&psIntfAdapter->uCurrRcb); pRcb = &psIntfAdapter->asUsbRcb[index]; pRcb->bUsed = TRUE; - pRcb->psIntfAdapter= psIntfAdapter; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Got Rx desc %d used %d", - index, atomic_read(&psIntfAdapter->uNumRcbUsed)); + pRcb->psIntfAdapter = psIntfAdapter; + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Got Rx desc %d used %d", + index, atomic_read(&psIntfAdapter->uNumRcbUsed)); index = (index + 1) % MAXIMUM_USB_RCB; atomic_set(&psIntfAdapter->uCurrRcb, index); atomic_inc(&psIntfAdapter->uNumRcbUsed); @@ -40,9 +39,8 @@ static void read_bulk_callback(struct urb *urb) struct sk_buff *skb = NULL; bool bHeaderSupressionEnabled = false; int QueueIndex = NO_OF_QUEUES + 1; - UINT uiIndex=0; + UINT uiIndex = 0; int process_done = 1; - //int idleflag = 0 ; struct bcm_usb_rcb *pRcb = (struct bcm_usb_rcb *)urb->context; struct bcm_interface_adapter *psIntfAdapter = pRcb->psIntfAdapter; struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter; @@ -52,49 +50,40 @@ static void read_bulk_callback(struct urb *urb) pr_info(PFX "%s: rx urb status %d length %d\n", Adapter->dev->name, urb->status, urb->actual_length); - if((Adapter->device_removed == TRUE) || - (TRUE == Adapter->bEndPointHalted) || - (0 == urb->actual_length) - ) - { - pRcb->bUsed = false; - atomic_dec(&psIntfAdapter->uNumRcbUsed); + if ((Adapter->device_removed == TRUE) || + (TRUE == Adapter->bEndPointHalted) || + (0 == urb->actual_length)) { + pRcb->bUsed = false; + atomic_dec(&psIntfAdapter->uNumRcbUsed); return; } - if(urb->status != STATUS_SUCCESS) - { - if(urb->status == -EPIPE) - { - Adapter->bEndPointHalted = TRUE ; + if (urb->status != STATUS_SUCCESS) { + if (urb->status == -EPIPE) { + Adapter->bEndPointHalted = TRUE; wake_up(&Adapter->tx_packet_wait_queue); - } - else - { - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"Rx URB has got cancelled. status :%d", urb->status); + } else { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Rx URB has got cancelled. status :%d", urb->status); } pRcb->bUsed = false; - atomic_dec(&psIntfAdapter->uNumRcbUsed); - urb->status = STATUS_SUCCESS ; - return ; + atomic_dec(&psIntfAdapter->uNumRcbUsed); + urb->status = STATUS_SUCCESS; + return; } - if(Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode)) - { - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"device is going in low power mode while PMU option selected..hence rx packet should not be process"); - return ; + if (Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode)) { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "device is going in low power mode while PMU option selected..hence rx packet should not be process"); + return; } - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Read back done len %d\n", pLeader->PLength); - if(!pLeader->PLength) - { - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Length 0"); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Read back done len %d\n", pLeader->PLength); + if (!pLeader->PLength) { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Length 0"); atomic_dec(&psIntfAdapter->uNumRcbUsed); return; } - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status,pLeader->PLength,pLeader->Vcid); - if(MAX_CNTL_PKT_SIZE < pLeader->PLength) - { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status, pLeader->PLength, pLeader->Vcid); + if (MAX_CNTL_PKT_SIZE < pLeader->PLength) { if (netif_msg_rx_err(Adapter)) pr_info(PFX "%s: corrupted leader length...%d\n", Adapter->dev->name, pLeader->PLength); @@ -103,65 +92,58 @@ static void read_bulk_callback(struct urb *urb) return; } - QueueIndex = SearchVcid( Adapter,pLeader->Vcid); - if(QueueIndex < NO_OF_QUEUES) - { + QueueIndex = SearchVcid(Adapter, pLeader->Vcid); + if (QueueIndex < NO_OF_QUEUES) { bHeaderSupressionEnabled = Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled; bHeaderSupressionEnabled = bHeaderSupressionEnabled & Adapter->bPHSEnabled; } - skb = dev_alloc_skb (pLeader->PLength + SKB_RESERVE_PHS_BYTES + SKB_RESERVE_ETHERNET_HEADER);//2 //2 for allignment - if(!skb) - { - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "NO SKBUFF!!! Dropping the Packet"); + skb = dev_alloc_skb(pLeader->PLength + SKB_RESERVE_PHS_BYTES + SKB_RESERVE_ETHERNET_HEADER); + if (!skb) { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "NO SKBUFF!!! Dropping the Packet"); atomic_dec(&psIntfAdapter->uNumRcbUsed); return; } - /* If it is a control Packet, then call handle_bcm_packet ()*/ - if((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) || - (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F))) - { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, "Received control pkt..."); + /* If it is a control Packet, then call handle_bcm_packet ()*/ + if ((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) || + (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F))) { + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, "Received control pkt..."); *(PUSHORT)skb->data = pLeader->Status; - memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer + - (sizeof(struct bcm_leader)), pLeader->PLength); + memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer + + (sizeof(struct bcm_leader)), pLeader->PLength); skb->len = pLeader->PLength + sizeof(USHORT); spin_lock(&Adapter->control_queue_lock); - ENQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail,skb); + ENQUEUEPACKET(Adapter->RxControlHead, Adapter->RxControlTail, skb); spin_unlock(&Adapter->control_queue_lock); atomic_inc(&Adapter->cntrlpktCnt); wake_up(&Adapter->process_rx_cntrlpkt); - } - else - { + } else { /* - * Data Packet, Format a proper Ethernet Header - * and give it to the stack - */ - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt..."); + * Data Packet, Format a proper Ethernet Header + * and give it to the stack + */ + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt..."); skb_reserve(skb, 2 + SKB_RESERVE_PHS_BYTES); memcpy(skb->data+ETH_HLEN, (PUCHAR)urb->transfer_buffer + sizeof(struct bcm_leader), pLeader->PLength); skb->dev = Adapter->dev; /* currently skb->len has extra ETH_HLEN bytes in the beginning */ - skb_put (skb, pLeader->PLength + ETH_HLEN); - Adapter->PackInfo[QueueIndex].uiTotalRxBytes+=pLeader->PLength; - Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes+= pLeader->PLength; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt of len :0x%X", pLeader->PLength); + skb_put(skb, pLeader->PLength + ETH_HLEN); + Adapter->PackInfo[QueueIndex].uiTotalRxBytes += pLeader->PLength; + Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes += pLeader->PLength; + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt of len :0x%X", pLeader->PLength); - if(netif_running(Adapter->dev)) - { + if (netif_running(Adapter->dev)) { /* Moving ahead by ETH_HLEN to the data ptr as received from FW */ skb_pull(skb, ETH_HLEN); PHSReceive(Adapter, pLeader->Vcid, skb, &skb->len, - NULL,bHeaderSupressionEnabled); + NULL, bHeaderSupressionEnabled); - if(!Adapter->PackInfo[QueueIndex].bEthCSSupport) - { + if (!Adapter->PackInfo[QueueIndex].bEthCSSupport) { skb_push(skb, ETH_HLEN); memcpy(skb->data, skb->dev->dev_addr, 6); @@ -169,29 +151,26 @@ static void read_bulk_callback(struct urb *urb) (*(skb->data+11))++; *(skb->data+12) = 0x08; *(skb->data+13) = 0x00; - pLeader->PLength+=ETH_HLEN; + pLeader->PLength += ETH_HLEN; } skb->protocol = eth_type_trans(skb, Adapter->dev); process_done = netif_rx(skb); - } - else - { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB..."); + } else { + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB..."); dev_kfree_skb(skb); } ++Adapter->dev->stats.rx_packets; Adapter->dev->stats.rx_bytes += pLeader->PLength; - for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++) - { - if((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) - && (pLeader->PLength > MIBS_PKTSIZEHIST_RANGE*(uiIndex))) + for (uiIndex = 0; uiIndex < MIBS_MAX_HIST_ENTRIES; uiIndex++) { + if ((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) && + (pLeader->PLength > MIBS_PKTSIZEHIST_RANGE*(uiIndex))) Adapter->aRxPktSizeHist[uiIndex]++; } } - Adapter->PrevNumRecvDescs++; + Adapter->PrevNumRecvDescs++; pRcb->bUsed = false; atomic_dec(&psIntfAdapter->uNumRcbUsed); } @@ -201,23 +180,18 @@ static int ReceiveRcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_us struct urb *urb = pRcb->urb; int retval = 0; - usb_fill_bulk_urb(urb, psIntfAdapter->udev, usb_rcvbulkpipe( - psIntfAdapter->udev, psIntfAdapter->sBulkIn.bulk_in_endpointAddr), - urb->transfer_buffer, BCM_USB_MAX_READ_LENGTH, read_bulk_callback, - pRcb); - if(false == psIntfAdapter->psAdapter->device_removed && - false == psIntfAdapter->psAdapter->bEndPointHalted && - false == psIntfAdapter->bSuspended && - false == psIntfAdapter->bPreparingForBusSuspend) - { + usb_fill_bulk_urb(urb, psIntfAdapter->udev, usb_rcvbulkpipe(psIntfAdapter->udev, psIntfAdapter->sBulkIn.bulk_in_endpointAddr), + urb->transfer_buffer, BCM_USB_MAX_READ_LENGTH, read_bulk_callback, pRcb); + if (false == psIntfAdapter->psAdapter->device_removed && + false == psIntfAdapter->psAdapter->bEndPointHalted && + false == psIntfAdapter->bSuspended && + false == psIntfAdapter->bPreparingForBusSuspend) { retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval) - { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "failed submitting read urb, error %d", retval); - //if this return value is because of pipe halt. need to clear this. - if(retval == -EPIPE) - { - psIntfAdapter->psAdapter->bEndPointHalted = TRUE ; + if (retval) { + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "failed submitting read urb, error %d", retval); + /* if this return value is because of pipe halt. need to clear this. */ + if (retval == -EPIPE) { + psIntfAdapter->psAdapter->bEndPointHalted = TRUE; wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue); } @@ -240,25 +214,20 @@ Return: TRUE - If Rx was successful. Other - If an error occurred. */ -bool InterfaceRx (struct bcm_interface_adapter *psIntfAdapter) +bool InterfaceRx(struct bcm_interface_adapter *psIntfAdapter) { USHORT RxDescCount = NUM_RX_DESC - atomic_read(&psIntfAdapter->uNumRcbUsed); struct bcm_usb_rcb *pRcb = NULL; -// RxDescCount = psIntfAdapter->psAdapter->CurrNumRecvDescs - -// psIntfAdapter->psAdapter->PrevNumRecvDescs; - while(RxDescCount) - { + while (RxDescCount) { pRcb = GetBulkInRcb(psIntfAdapter); - if(pRcb == NULL) - { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Unable to get Rcb pointer"); + if (pRcb == NULL) { + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Unable to get Rcb pointer"); return false; } - //atomic_inc(&psIntfAdapter->uNumRcbUsed); ReceiveRcb(psIntfAdapter, pRcb); RxDescCount--; - } + } return TRUE; } diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c index b9c2784e9811bb..ea7707b8e60e2c 100644 --- a/drivers/staging/bcm/InterfaceTx.c +++ b/drivers/staging/bcm/InterfaceTx.c @@ -3,26 +3,22 @@ /*this is transmit call-back(BULK OUT)*/ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/) { - struct bcm_usb_tcb *pTcb= (struct bcm_usb_tcb *)urb->context; + struct bcm_usb_tcb *pTcb = (struct bcm_usb_tcb *)urb->context; struct bcm_interface_adapter *psIntfAdapter = pTcb->psIntfAdapter; struct bcm_link_request *pControlMsg = (struct bcm_link_request *)urb->transfer_buffer; - struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter ; - bool bpowerDownMsg = false ; + struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter; + bool bpowerDownMsg = false; struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - if (unlikely(netif_msg_tx_done(Adapter))) - pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status); + if (unlikely(netif_msg_tx_done(Adapter))) + pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status); - if(urb->status != STATUS_SUCCESS) - { - if(urb->status == -EPIPE) - { - psIntfAdapter->psAdapter->bEndPointHalted = TRUE ; + if (urb->status != STATUS_SUCCESS) { + if (urb->status == -EPIPE) { + psIntfAdapter->psAdapter->bEndPointHalted = TRUE; wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue); - } - else - { - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Tx URB has got cancelled. status :%d", urb->status); + } else { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Tx URB has got cancelled. status :%d", urb->status); } } @@ -31,69 +27,59 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/) - if(TRUE == psAdapter->bPreparingForLowPowerMode) - { - - if(((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) && - (pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE))) + if (TRUE == psAdapter->bPreparingForLowPowerMode) { - { - bpowerDownMsg = TRUE ; - //This covers the bus err while Idle Request msg sent down. - if(urb->status != STATUS_SUCCESS) - { - psAdapter->bPreparingForLowPowerMode = false ; - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Idle Mode Request msg failed to reach to Modem"); - //Signalling the cntrl pkt path in Ioctl + if (((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) && + (pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE))) { + bpowerDownMsg = TRUE; + /* This covers the bus err while Idle Request msg sent down. */ + if (urb->status != STATUS_SUCCESS) { + psAdapter->bPreparingForLowPowerMode = false; + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Idle Mode Request msg failed to reach to Modem"); + /* Signalling the cntrl pkt path in Ioctl */ wake_up(&psAdapter->lowpower_mode_wait_queue); StartInterruptUrb(psIntfAdapter); goto err_exit; } - if(psAdapter->bDoSuspend == false) - { + if (psAdapter->bDoSuspend == false) { psAdapter->IdleMode = TRUE; - //since going in Idle mode completed hence making this var false; - psAdapter->bPreparingForLowPowerMode = false ; + /* since going in Idle mode completed hence making this var false */ + psAdapter->bPreparingForLowPowerMode = false; - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in Idle Mode State..."); - //Signalling the cntrl pkt path in Ioctl + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in Idle Mode State..."); + /* Signalling the cntrl pkt path in Ioctl*/ wake_up(&psAdapter->lowpower_mode_wait_queue); } - } - else if((pControlMsg->Leader.Status == LINK_UP_CONTROL_REQ) && + } else if ((pControlMsg->Leader.Status == LINK_UP_CONTROL_REQ) && (pControlMsg->szData[0] == LINK_UP_ACK) && (pControlMsg->szData[1] == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) && - (pControlMsg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER)) - { - //This covers the bus err while shutdown Request msg sent down. - if(urb->status != STATUS_SUCCESS) - { - psAdapter->bPreparingForLowPowerMode = false ; - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Shutdown Request Msg failed to reach to Modem"); - //Signalling the cntrl pkt path in Ioctl + (pControlMsg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER)) { + /* This covers the bus err while shutdown Request msg sent down. */ + if (urb->status != STATUS_SUCCESS) { + psAdapter->bPreparingForLowPowerMode = false; + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Shutdown Request Msg failed to reach to Modem"); + /* Signalling the cntrl pkt path in Ioctl */ wake_up(&psAdapter->lowpower_mode_wait_queue); StartInterruptUrb(psIntfAdapter); goto err_exit; } - bpowerDownMsg = TRUE ; - if(psAdapter->bDoSuspend == false) - { + bpowerDownMsg = TRUE; + if (psAdapter->bDoSuspend == false) { psAdapter->bShutStatus = TRUE; - //since going in shutdown mode completed hence making this var false; - psAdapter->bPreparingForLowPowerMode = false ; - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Host Entered in shutdown Mode State..."); - //Signalling the cntrl pkt path in Ioctl + /* since going in shutdown mode completed hence making this var false */ + psAdapter->bPreparingForLowPowerMode = false; + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in shutdown Mode State..."); + /* Signalling the cntrl pkt path in Ioctl */ wake_up(&psAdapter->lowpower_mode_wait_queue); } } - if(psAdapter->bDoSuspend && bpowerDownMsg) - { - //issuing bus suspend request - BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Issuing the Bus suspend request to USB stack"); + if (psAdapter->bDoSuspend && bpowerDownMsg) { + /* issuing bus suspend request */ + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Issuing the Bus suspend request to USB stack"); psIntfAdapter->bPreparingForBusSuspend = TRUE; schedule_work(&psIntfAdapter->usbSuspendWork); @@ -101,9 +87,9 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/) } -err_exit : +err_exit: usb_free_coherent(urb->dev, urb->transfer_buffer_length, - urb->transfer_buffer, urb->transfer_dma); + urb->transfer_buffer, urb->transfer_dma); } @@ -112,14 +98,13 @@ static struct bcm_usb_tcb *GetBulkOutTcb(struct bcm_interface_adapter *psIntfAda struct bcm_usb_tcb *pTcb = NULL; UINT index = 0; - if((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) && - (psIntfAdapter->psAdapter->StopAllXaction ==false)) - { + if ((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) && + (psIntfAdapter->psAdapter->StopAllXaction == false)) { index = atomic_read(&psIntfAdapter->uCurrTcb); pTcb = &psIntfAdapter->asUsbTcb[index]; pTcb->bUsed = TRUE; - pTcb->psIntfAdapter= psIntfAdapter; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got Tx desc %d used %d", + pTcb->psIntfAdapter = psIntfAdapter; + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got Tx desc %d used %d", index, atomic_read(&psIntfAdapter->uNumTcbUsed)); index = (index + 1) % MAXIMUM_USB_TCB; atomic_set(&psIntfAdapter->uCurrTcb, index); @@ -135,44 +120,37 @@ static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_u int retval = 0; urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len, - GFP_ATOMIC, &urb->transfer_dma); - if (!urb->transfer_buffer) - { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n"); + GFP_ATOMIC, &urb->transfer_dma); + if (!urb->transfer_buffer) { + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n"); return -ENOMEM; } memcpy(urb->transfer_buffer, data, len); urb->transfer_buffer_length = len; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending Bulk out packet\n"); - //For T3B,INT OUT end point will be used as bulk out end point - if((psIntfAdapter->psAdapter->chip_id == T3B) && (psIntfAdapter->bHighSpeedDevice == TRUE)) - { + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending Bulk out packet\n"); + /* For T3B,INT OUT end point will be used as bulk out end point */ + if ((psIntfAdapter->psAdapter->chip_id == T3B) && (psIntfAdapter->bHighSpeedDevice == TRUE)) { usb_fill_int_urb(urb, psIntfAdapter->udev, - psIntfAdapter->sBulkOut.bulk_out_pipe, + psIntfAdapter->sBulkOut.bulk_out_pipe, urb->transfer_buffer, len, write_bulk_callback, pTcb, psIntfAdapter->sBulkOut.int_out_interval); - } - else - { + } else { usb_fill_bulk_urb(urb, psIntfAdapter->udev, psIntfAdapter->sBulkOut.bulk_out_pipe, urb->transfer_buffer, len, write_bulk_callback, pTcb); } urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* For DMA transfer */ - if(false == psIntfAdapter->psAdapter->device_removed && + if (false == psIntfAdapter->psAdapter->device_removed && false == psIntfAdapter->psAdapter->bEndPointHalted && false == psIntfAdapter->bSuspended && - false == psIntfAdapter->bPreparingForBusSuspend) - { + false == psIntfAdapter->bPreparingForBusSuspend) { retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval) - { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "failed submitting write urb, error %d", retval); - if(retval == -EPIPE) - { - psIntfAdapter->psAdapter->bEndPointHalted = TRUE ; + if (retval) { + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "failed submitting write urb, error %d", retval); + if (retval == -EPIPE) { + psIntfAdapter->psAdapter->bEndPointHalted = TRUE; wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue); } } @@ -182,13 +160,12 @@ static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_u int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len) { - struct bcm_usb_tcb *pTcb= NULL; + struct bcm_usb_tcb *pTcb = NULL; struct bcm_interface_adapter *psIntfAdapter = arg; - pTcb= GetBulkOutTcb(psIntfAdapter); - if(pTcb == NULL) - { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "No URB to transmit packet, dropping packet"); + pTcb = GetBulkOutTcb(psIntfAdapter); + if (pTcb == NULL) { + BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "No URB to transmit packet, dropping packet"); return -EFAULT; } return TransmitTcb(psIntfAdapter, pTcb, data, len); diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c index 892ebc65cdd396..afc7bcc3e54b5b 100644 --- a/drivers/staging/bcm/PHSModule.c +++ b/drivers/staging/bcm/PHSModule.c @@ -1280,11 +1280,11 @@ static int phs_decompress(unsigned char *in_buf, if (bit == SUPPRESS) { *out_buf = *phsf; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d phsf %d ouput %d", + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d phsf %d output %d", phss, *phsf, *out_buf); } else { *out_buf = *in_buf; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d input %d ouput %d", + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d input %d output %d", phss, *in_buf, *out_buf); in_buf++; size++; diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c index 1609a2bdc522a8..0727599bf5fa8e 100644 --- a/drivers/staging/bcm/Qos.c +++ b/drivers/staging/bcm/Qos.c @@ -24,7 +24,7 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex); * * Returns - TRUE(If address matches) else FAIL . *********************************************************************/ -bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP) +static bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP) { UCHAR ucLoopIndex = 0; @@ -58,7 +58,7 @@ bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSr * * Returns - TRUE(If address matches) else FAIL . *********************************************************************/ -bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP) +static bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP) { UCHAR ucLoopIndex = 0; struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); @@ -91,7 +91,7 @@ bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulD * * Returns - TRUE(If address matches) else FAIL. **************************************************************************/ -bool MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService) +static bool MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService) { struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c index 9e5f955a1a0852..fca164f51f4b11 100644 --- a/drivers/staging/bcm/nvm.c +++ b/drivers/staging/bcm/nvm.c @@ -1354,67 +1354,6 @@ static int BeceemFlashBulkWriteStatus(struct bcm_mini_adapter *Adapter, return Status; } -/* - * Procedure: PropagateCalParamsFromEEPROMToMemory - * - * Description: Dumps the calibration section of EEPROM to DDR. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * Returns: - * OSAL_STATUS_CODE - * - */ - -int PropagateCalParamsFromEEPROMToMemory(struct bcm_mini_adapter *Adapter) -{ - PCHAR pBuff = kmalloc(BUFFER_4K, GFP_KERNEL); - unsigned int uiEepromSize = 0; - unsigned int uiIndex = 0; - unsigned int uiBytesToCopy = 0; - unsigned int uiCalStartAddr = EEPROM_CALPARAM_START; - unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC; - unsigned int value; - int Status = 0; - - if (!pBuff) - return -ENOMEM; - - if (0 != BeceemEEPROMBulkRead(Adapter, &uiEepromSize, EEPROM_SIZE_OFFSET, 4)) { - kfree(pBuff); - return -1; - } - - uiEepromSize >>= 16; - if (uiEepromSize > 1024 * 1024) { - kfree(pBuff); - return -1; - } - - uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize); - - while (uiBytesToCopy) { - if (0 != BeceemEEPROMBulkRead(Adapter, (PUINT)pBuff, uiCalStartAddr, uiBytesToCopy)) { - Status = -1; - break; - } - wrm(Adapter, uiMemoryLoc, (PCHAR)(((PULONG)pBuff) + uiIndex), uiBytesToCopy); - uiMemoryLoc += uiBytesToCopy; - uiEepromSize -= uiBytesToCopy; - uiCalStartAddr += uiBytesToCopy; - uiIndex += uiBytesToCopy / 4; - uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize); - - } - value = 0xbeadbead; - wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value)); - value = 0xbeadbead; - wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value)); - kfree(pBuff); - - return Status; -} - /* * Procedure: PropagateCalParamsFromFlashToMemory * @@ -2873,7 +2812,7 @@ int BcmFlash2xBulkRead(struct bcm_mini_adapter *Adapter, SectionStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal); if (SectionStartOffset == STATUS_FAILURE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exixt in Flash 2.x Map ", eFlash2xSectionVal); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash 2.x Map ", eFlash2xSectionVal); return -EINVAL; } @@ -2936,7 +2875,7 @@ int BcmFlash2xBulkWrite(struct bcm_mini_adapter *Adapter, FlashSectValStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectVal); if (FlashSectValStartOffset == STATUS_FAILURE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exixt in Flash Map 2.x", eFlash2xSectVal); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash Map 2.x", eFlash2xSectVal); return -EINVAL; } @@ -3911,7 +3850,7 @@ int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2 uiNumOfBytes = psFlash2xReadWrite->numOfBytes; if (IsSectionExistInFlash(Adapter, psFlash2xReadWrite->Section) != TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exixt in Flash", psFlash2xReadWrite->Section); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exist in Flash", psFlash2xReadWrite->Section); return false; } uiSectStartOffset = BcmGetSectionValStartOffset(Adapter, psFlash2xReadWrite->Section); @@ -3944,6 +3883,15 @@ int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "End offset :%x\n", uiSectEndOffset); + /* psFlash2xReadWrite->offset and uiNumOfBytes are user controlled and can lead to integer overflows */ + if (psFlash2xReadWrite->offset > uiSectEndOffset) { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request...."); + return false; + } + if (uiNumOfBytes > uiSectEndOffset) { + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request...."); + return false; + } /* Checking the boundary condition */ if ((uiSectStartOffset + psFlash2xReadWrite->offset + uiNumOfBytes) <= uiSectEndOffset) return TRUE; @@ -4530,13 +4478,13 @@ int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section int Status = false; if (IsSectionExistInFlash(Adapter, Section) == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exixt", Section); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exist", Section); return false; } offset = BcmGetSectionValStartOffset(Adapter, Section); if (offset == INVALID_OFFSET) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exixt", Section); + BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exist", Section); return false; } diff --git a/drivers/staging/btmtk_usb/Kconfig b/drivers/staging/btmtk_usb/Kconfig deleted file mode 100644 index a425ebda6c7a34..00000000000000 --- a/drivers/staging/btmtk_usb/Kconfig +++ /dev/null @@ -1,11 +0,0 @@ -config USB_BTMTK - tristate "Mediatek Bluetooth support" - depends on USB && BT && m - ---help--- - Say Y here if you wish to control a MTK USB Bluetooth. - - This option depends on 'USB' support being enabled - - To compile this driver as a module, choose M here: the - module will be called btmtk_usb. - diff --git a/drivers/staging/btmtk_usb/Makefile b/drivers/staging/btmtk_usb/Makefile deleted file mode 100644 index 4d6c9d764621cf..00000000000000 --- a/drivers/staging/btmtk_usb/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_USB_BTMTK) += btmtk_usb.o diff --git a/drivers/staging/btmtk_usb/README b/drivers/staging/btmtk_usb/README deleted file mode 100644 index c046c8e96b2d4b..00000000000000 --- a/drivers/staging/btmtk_usb/README +++ /dev/null @@ -1,14 +0,0 @@ --build driver modules - make - --install driver modules - make install - --remove driver modules - make clean - --dynamic debug message - turn on CONFIG_DYNAMIC_DEBUG compiler flag for current kernel - mount -t debugfs none /sys/kernel/debug/ - echo "module module_name +p" > /sys/kernel/debug/dynamic_debug/control(turn on debug messages, module name such as btmtk_usb) - echo "module module_name -p" > /sys/kernel/debug/dynamic_debug/control(turn off debug messages, module name such as btmtk_usb) diff --git a/drivers/staging/btmtk_usb/TODO b/drivers/staging/btmtk_usb/TODO deleted file mode 100644 index a71d1297942dd8..00000000000000 --- a/drivers/staging/btmtk_usb/TODO +++ /dev/null @@ -1,10 +0,0 @@ -TODO: - - checkpatch.pl clean - - determine if the driver should not be using a duplicate - version of the usb-bluetooth interface code, but should - be merged into the drivers/bluetooth/ directory and - infrastructure instead. - - review by the bluetooth developer community - -Please send any patches for this driver to Yu-Chen, Cho and -jay.hung@mediatek.com diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c deleted file mode 100644 index 9a5ebd6cc51235..00000000000000 --- a/drivers/staging/btmtk_usb/btmtk_usb.c +++ /dev/null @@ -1,1810 +0,0 @@ -/* - * MediaTek Bluetooth USB Driver - * - * Copyright (C) 2013, MediaTek co. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * or on the worldwide web at - * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "btmtk_usb.h" - -#define VERSION "1.0.4" -#define MT7650_FIRMWARE "mt7650.bin" -#define MT7662_FIRMWARE "mt7662.bin" - -static struct usb_driver btmtk_usb_driver; - - -static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *); -static int btmtk_usb_load_fw(struct btmtk_usb_data *); - -static void hex_dump(char *str, u8 *src_buf, u32 src_buf_len) -{ - unsigned char *pt; - int x; - - pt = src_buf; - - BT_DBG("%s: %p, len = %d\n", str, src_buf, src_buf_len); - - for (x = 0; x < src_buf_len; x++) { - if (x % 16 == 0) - BT_DBG("0x%04x : ", x); - BT_DBG("%02x ", ((unsigned char)pt[x])); - if (x % 16 == 15) - BT_DBG("\n"); - } - - BT_DBG("\n"); -} - -static int btmtk_usb_reset(struct usb_device *udev) -{ - int ret; - - BT_DBG("%s\n", __func__); - - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01, - DEVICE_VENDOR_REQUEST_OUT, 0x01, 0x00, - NULL, 0x00, CONTROL_TIMEOUT_JIFFIES); - - if (ret < 0) { - BT_ERR("%s error(%d)\n", __func__, ret); - return ret; - } - - if (ret > 0) - ret = 0; - - return ret; -} - -static int btmtk_usb_io_read32(struct btmtk_usb_data *data, u32 reg, u32 *val) -{ - u8 request = data->r_request; - struct usb_device *udev = data->udev; - int ret; - __le32 val_le; - - ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request, - DEVICE_VENDOR_REQUEST_IN, 0x0, reg, data->io_buf, - 4, CONTROL_TIMEOUT_JIFFIES); - - if (ret < 0) { - *val = 0xffffffff; - BT_ERR("%s error(%d), reg=%x, value=%x\n", - __func__, ret, reg, *val); - return ret; - } - - memmove(&val_le, data->io_buf, 4); - - *val = le32_to_cpu(val_le); - - if (ret > 0) - ret = 0; - - return ret; -} - -static int btmtk_usb_io_write32(struct btmtk_usb_data *data, u32 reg, u32 val) -{ - u16 value, index; - u8 request = data->w_request; - struct usb_device *udev = data->udev; - int ret; - - index = (u16)reg; - value = val & 0x0000ffff; - - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request, - DEVICE_VENDOR_REQUEST_OUT, value, index, - NULL, 0, CONTROL_TIMEOUT_JIFFIES); - - if (ret < 0) { - BT_ERR("%s error(%d), reg=%x, value=%x\n", - __func__, ret, reg, val); - return ret; - } - - index = (u16)(reg + 2); - value = (val & 0xffff0000) >> 16; - - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), - request, DEVICE_VENDOR_REQUEST_OUT, - value, index, NULL, 0, CONTROL_TIMEOUT_JIFFIES); - - if (ret < 0) { - BT_ERR("%s error(%d), reg=%x, value=%x\n", - __func__, ret, reg, val); - return ret; - } - - if (ret > 0) - ret = 0; - - return ret; -} - -static int btmtk_usb_switch_iobase(struct btmtk_usb_data *data, int base) -{ - int ret = 0; - - switch (base) { - case SYSCTL: - data->w_request = 0x42; - data->r_request = 0x47; - break; - case WLAN: - data->w_request = 0x02; - data->r_request = 0x07; - break; - - default: - return -EINVAL; - } - - return ret; -} - -static void btmtk_usb_cap_init(struct btmtk_usb_data *data) -{ - const struct firmware *firmware; - struct usb_device *udev = data->udev; - int ret; - - btmtk_usb_io_read32(data, 0x00, &data->chip_id); - - BT_DBG("chip id = %x\n", data->chip_id); - - if (is_mt7630(data) || is_mt7650(data)) { - data->need_load_fw = 1; - data->need_load_rom_patch = 0; - ret = request_firmware(&firmware, MT7650_FIRMWARE, &udev->dev); - if (ret < 0) { - if (ret == -ENOENT) { - BT_ERR("Firmware file \"%s\" not found\n", - MT7650_FIRMWARE); - } else { - BT_ERR("Firmware file \"%s\" request failed (err=%d)\n", - MT7650_FIRMWARE, ret); - } - } else { - BT_DBG("Firmware file \"%s\" Found\n", - MT7650_FIRMWARE); - /* load firmware here */ - data->firmware = firmware; - btmtk_usb_load_fw(data); - } - release_firmware(firmware); - } else if (is_mt7632(data) || is_mt7662(data)) { - data->need_load_fw = 0; - data->need_load_rom_patch = 1; - data->rom_patch_offset = 0x90000; - ret = request_firmware(&firmware, MT7662_FIRMWARE, &udev->dev); - if (ret < 0) { - if (ret == -ENOENT) { - BT_ERR("Firmware file \"%s\" not found\n", - MT7662_FIRMWARE); - } else { - BT_ERR("Firmware file \"%s\" request failed (err=%d)\n", - MT7662_FIRMWARE, ret); - } - } else { - BT_DBG("Firmware file \"%s\" Found\n", MT7662_FIRMWARE); - /* load rom patch here */ - data->firmware = firmware; - data->rom_patch_len = firmware->size; - btmtk_usb_load_rom_patch(data); - } - release_firmware(firmware); - } else { - BT_ERR("unknow chip(%x)\n", data->chip_id); - } -} - -static u16 checksume16(u8 *pData, int len) -{ - int sum = 0; - - while (len > 1) { - sum += *((u16 *)pData); - - pData = pData + 2; - - if (sum & 0x80000000) - sum = (sum & 0xFFFF) + (sum >> 16); - - len -= 2; - } - - if (len) - sum += *((u8 *)pData); - - while (sum >> 16) - sum = (sum & 0xFFFF) + (sum >> 16); - - return ~sum; -} - -static int btmtk_usb_chk_crc(struct btmtk_usb_data *data, u32 checksum_len) -{ - int ret = 0; - struct usb_device *udev = data->udev; - - BT_DBG("%s\n", __func__); - - memmove(data->io_buf, &data->rom_patch_offset, 4); - memmove(&data->io_buf[4], &checksum_len, 4); - - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x1, - DEVICE_VENDOR_REQUEST_IN, 0x20, 0x00, data->io_buf, - 8, CONTROL_TIMEOUT_JIFFIES); - - if (ret < 0) - BT_ERR("%s error(%d)\n", __func__, ret); - - return ret; -} - -static u16 btmtk_usb_get_crc(struct btmtk_usb_data *data) -{ - int ret = 0; - struct usb_device *udev = data->udev; - u16 crc, count = 0; - __le16 crc_le; - - BT_DBG("%s\n", __func__); - - while (1) { - ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - 0x01, DEVICE_VENDOR_REQUEST_IN, - 0x21, 0x00, data->io_buf, 2, - CONTROL_TIMEOUT_JIFFIES); - - if (ret < 0) { - crc = 0xFFFF; - BT_ERR("%s error(%d)\n", __func__, ret); - } - - memmove(&crc_le, data->io_buf, 2); - - crc = le16_to_cpu(crc_le); - - if (crc != 0xFFFF) - break; - - mdelay(100); - - if (count++ > 100) { - BT_ERR("Query CRC over %d times\n", count); - break; - } - } - - return crc; -} - -static int btmtk_usb_reset_wmt(struct btmtk_usb_data *data) -{ - int ret = 0; - - /* reset command */ - u8 cmd[8] = {0x6F, 0xFC, 0x05, 0x01, 0x07, 0x01, 0x00, 0x04}; - - memmove(data->io_buf, cmd, 8); - - BT_DBG("%s\n", __func__); - - ret = usb_control_msg(data->udev, usb_sndctrlpipe(data->udev, 0), 0x01, - DEVICE_CLASS_REQUEST_OUT, 0x12, 0x00, - data->io_buf, 8, CONTROL_TIMEOUT_JIFFIES); - - if (ret) - BT_ERR("%s:(%d)\n", __func__, ret); - - return ret; -} - -static void load_rom_patch_complete(struct urb *urb) -{ - - struct completion *sent_to_mcu_done = (struct completion *)urb->context; - - complete(sent_to_mcu_done); -} - -static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *data) -{ - u32 loop = 0; - u32 value; - s32 sent_len; - int ret = 0, total_checksum = 0; - struct urb *urb; - u32 patch_len = 0; - u32 cur_len = 0; - dma_addr_t data_dma; - struct completion sent_to_mcu_done; - int first_block = 1; - unsigned char phase; - void *buf; - char *pos; - unsigned int pipe; - pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); - - if (!data->firmware) { - BT_ERR("%s:please assign a rom patch\n", __func__); - return -1; - } - -load_patch_protect: - btmtk_usb_switch_iobase(data, WLAN); - btmtk_usb_io_read32(data, SEMAPHORE_03, &value); - loop++; - - if (((value & 0x01) == 0x00) && (loop < 600)) { - mdelay(1); - goto load_patch_protect; - } - - btmtk_usb_io_write32(data, 0x1004, 0x2c); - - btmtk_usb_switch_iobase(data, SYSCTL); - - btmtk_usb_io_write32(data, 0x1c, 0x30); - - /* Enable USB_DMA_CFG */ - btmtk_usb_io_write32(data, 0x9018, 0x00c00020); - - btmtk_usb_switch_iobase(data, WLAN); - - /* check ROM patch if upgrade */ - btmtk_usb_io_read32(data, COM_REG0, &value); - - if ((value & 0x02) == 0x02) - goto error0; - - urb = usb_alloc_urb(0, GFP_ATOMIC); - - if (!urb) { - ret = -ENOMEM; - goto error0; - } - - buf = usb_alloc_coherent(data->udev, UPLOAD_PATCH_UNIT, - GFP_ATOMIC, &data_dma); - - if (!buf) { - ret = -ENOMEM; - goto error1; - } - - pos = buf; - BT_DBG("loading rom patch"); - - init_completion(&sent_to_mcu_done); - - cur_len = 0x00; - patch_len = data->rom_patch_len - PATCH_INFO_SIZE; - - /* loading rom patch */ - while (1) { - s32 sent_len_max = UPLOAD_PATCH_UNIT - PATCH_HEADER_SIZE; - sent_len = min_t(s32, (patch_len - cur_len), sent_len_max); - - BT_DBG("patch_len = %d\n", patch_len); - BT_DBG("cur_len = %d\n", cur_len); - BT_DBG("sent_len = %d\n", sent_len); - - if (sent_len <= 0) - break; - - if (first_block == 1) { - if (sent_len < sent_len_max) - phase = PATCH_PHASE3; - else - phase = PATCH_PHASE1; - first_block = 0; - } else if (sent_len == sent_len_max) { - phase = PATCH_PHASE2; - } else { - phase = PATCH_PHASE3; - } - - /* prepare HCI header */ - pos[0] = 0x6F; - pos[1] = 0xFC; - pos[2] = (sent_len + 5) & 0xFF; - pos[3] = ((sent_len + 5) >> 8) & 0xFF; - - /* prepare WMT header */ - pos[4] = 0x01; - pos[5] = 0x01; - pos[6] = (sent_len + 1) & 0xFF; - pos[7] = ((sent_len + 1) >> 8) & 0xFF; - - pos[8] = phase; - - memcpy(&pos[9], - data->firmware->data + PATCH_INFO_SIZE + cur_len, - sent_len); - - BT_DBG("sent_len + PATCH_HEADER_SIZE = %d, phase = %d\n", - sent_len + PATCH_HEADER_SIZE, phase); - - usb_fill_bulk_urb(urb, - data->udev, - pipe, - buf, - sent_len + PATCH_HEADER_SIZE, - load_rom_patch_complete, - &sent_to_mcu_done); - - urb->transfer_dma = data_dma; - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - - ret = usb_submit_urb(urb, GFP_ATOMIC); - - if (ret) - goto error2; - - if (!wait_for_completion_timeout(&sent_to_mcu_done, - msecs_to_jiffies(1000))) { - usb_kill_urb(urb); - BT_ERR("upload rom_patch timeout\n"); - goto error2; - } - - BT_DBG("."); - - mdelay(200); - - cur_len += sent_len; - - } - - total_checksum = checksume16( - (u8 *)data->firmware->data + PATCH_INFO_SIZE, - patch_len); - - BT_DBG("Send checksum req..\n"); - - btmtk_usb_chk_crc(data, patch_len); - - mdelay(20); - - if (total_checksum != btmtk_usb_get_crc(data)) { - BT_ERR("checksum fail!, local(0x%x) <> fw(0x%x)\n", - total_checksum, btmtk_usb_get_crc(data)); - ret = -1; - goto error2; - } - - mdelay(20); - - ret = btmtk_usb_reset_wmt(data); - - mdelay(20); - -error2: - usb_free_coherent(data->udev, UPLOAD_PATCH_UNIT, buf, data_dma); -error1: - usb_free_urb(urb); -error0: - btmtk_usb_io_write32(data, SEMAPHORE_03, 0x1); - return ret; -} - - -static int load_fw_iv(struct btmtk_usb_data *data) -{ - int ret; - struct usb_device *udev = data->udev; - char *buf = kmalloc(64, GFP_ATOMIC); - - memmove(buf, data->firmware->data + 32, 64); - - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01, - DEVICE_VENDOR_REQUEST_OUT, 0x12, 0x0, buf, 64, - CONTROL_TIMEOUT_JIFFIES); - - if (ret < 0) { - BT_ERR("%s error(%d) step4\n", __func__, ret); - kfree(buf); - return ret; - } - - if (ret > 0) - ret = 0; - - kfree(buf); - - return ret; -} - -static void load_fw_complete(struct urb *urb) -{ - - struct completion *sent_to_mcu_done = (struct completion *)urb->context; - - complete(sent_to_mcu_done); -} - -static int btmtk_usb_load_fw(struct btmtk_usb_data *data) -{ - struct usb_device *udev = data->udev; - struct urb *urb; - void *buf; - u32 cur_len = 0; - u32 packet_header = 0; - __le32 packet_header_le; - u32 value; - u32 ilm_len = 0, dlm_len = 0; - u16 fw_ver, build_ver; - u32 loop = 0; - dma_addr_t data_dma; - int ret = 0, sent_len; - struct completion sent_to_mcu_done; - unsigned int pipe; - pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); - - if (!data->firmware) { - BT_ERR("%s:please assign a fw\n", __func__); - return -1; - } - - BT_DBG("bulk_tx_ep = %x\n", data->bulk_tx_ep->bEndpointAddress); - -loadfw_protect: - btmtk_usb_switch_iobase(data, WLAN); - btmtk_usb_io_read32(data, SEMAPHORE_00, &value); - loop++; - - if (((value & 0x1) == 0) && (loop < 10000)) - goto loadfw_protect; - - /* check MCU if ready */ - btmtk_usb_io_read32(data, COM_REG0, &value); - - if ((value & 0x01) == 0x01) - goto error0; - - /* Enable MPDMA TX and EP2 load FW mode */ - btmtk_usb_io_write32(data, 0x238, 0x1c000000); - - btmtk_usb_reset(udev); - mdelay(100); - - ilm_len = (*(data->firmware->data + 3) << 24) - | (*(data->firmware->data + 2) << 16) - | (*(data->firmware->data + 1) << 8) - | (*data->firmware->data); - - dlm_len = (*(data->firmware->data + 7) << 24) - | (*(data->firmware->data + 6) << 16) - | (*(data->firmware->data + 5) << 8) - | (*(data->firmware->data + 4)); - - fw_ver = (*(data->firmware->data + 11) << 8) | - (*(data->firmware->data + 10)); - - build_ver = (*(data->firmware->data + 9) << 8) | - (*(data->firmware->data + 8)); - - BT_DBG("fw version:%d.%d.%02d ", - (fw_ver & 0xf000) >> 8, - (fw_ver & 0x0f00) >> 8, - (fw_ver & 0x00ff)); - - BT_DBG("build:%x\n", build_ver); - - BT_DBG("build Time ="); - - for (loop = 0; loop < 16; loop++) - BT_DBG("%c", *(data->firmware->data + 16 + loop)); - - BT_DBG("\n"); - - BT_DBG("ILM length = %d(bytes)\n", ilm_len); - BT_DBG("DLM length = %d(bytes)\n", dlm_len); - - btmtk_usb_switch_iobase(data, SYSCTL); - - /* U2M_PDMA rx_ring_base_ptr */ - btmtk_usb_io_write32(data, 0x790, 0x400230); - - /* U2M_PDMA rx_ring_max_cnt */ - btmtk_usb_io_write32(data, 0x794, 0x1); - - /* U2M_PDMA cpu_idx */ - btmtk_usb_io_write32(data, 0x798, 0x1); - - /* U2M_PDMA enable */ - btmtk_usb_io_write32(data, 0x704, 0x44); - - urb = usb_alloc_urb(0, GFP_ATOMIC); - - if (!urb) { - ret = -ENOMEM; - goto error1; - } - - buf = usb_alloc_coherent(udev, 14592, GFP_ATOMIC, &data_dma); - - if (!buf) { - ret = -ENOMEM; - goto error2; - } - - BT_DBG("loading fw"); - - init_completion(&sent_to_mcu_done); - - btmtk_usb_switch_iobase(data, SYSCTL); - - cur_len = 0x40; - - /* Loading ILM */ - while (1) { - sent_len = min_t(s32, (ilm_len - cur_len), 14336); - - if (sent_len > 0) { - packet_header &= ~(0xffffffff); - packet_header |= (sent_len << 16); - packet_header_le = cpu_to_le32(packet_header); - - memmove(buf, &packet_header_le, 4); - memmove(buf + 4, data->firmware->data + 32 + cur_len, - sent_len); - - /* U2M_PDMA descriptor */ - btmtk_usb_io_write32(data, 0x230, cur_len); - - while ((sent_len % 4) != 0) - sent_len++; - - /* U2M_PDMA length */ - btmtk_usb_io_write32(data, 0x234, sent_len << 16); - - usb_fill_bulk_urb(urb, - udev, - pipe, - buf, - sent_len + 4, - load_fw_complete, - &sent_to_mcu_done); - - urb->transfer_dma = data_dma; - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - - ret = usb_submit_urb(urb, GFP_ATOMIC); - - if (ret) - goto error3; - - if (!wait_for_completion_timeout(&sent_to_mcu_done, - msecs_to_jiffies(1000))) { - usb_kill_urb(urb); - BT_ERR("upload ilm fw timeout\n"); - goto error3; - } - - BT_DBG("."); - - mdelay(200); - - cur_len += sent_len; - } else { - break; - } - } - - init_completion(&sent_to_mcu_done); - cur_len = 0x00; - - /* Loading DLM */ - while (1) { - sent_len = min_t(s32, (dlm_len - cur_len), 14336); - - if (sent_len <= 0) - break; - - packet_header &= ~(0xffffffff); - packet_header |= (sent_len << 16); - packet_header_le = cpu_to_le32(packet_header); - - memmove(buf, &packet_header_le, 4); - memmove(buf + 4, - data->firmware->data + 32 + ilm_len + cur_len, - sent_len); - - /* U2M_PDMA descriptor */ - btmtk_usb_io_write32(data, 0x230, 0x80000 + cur_len); - - while ((sent_len % 4) != 0) { - BT_DBG("sent_len is not divided by 4\n"); - sent_len++; - } - - /* U2M_PDMA length */ - btmtk_usb_io_write32(data, 0x234, sent_len << 16); - - usb_fill_bulk_urb(urb, - udev, - pipe, - buf, - sent_len + 4, - load_fw_complete, - &sent_to_mcu_done); - - urb->transfer_dma = data_dma; - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - - ret = usb_submit_urb(urb, GFP_ATOMIC); - - if (ret) - goto error3; - - if (!wait_for_completion_timeout(&sent_to_mcu_done, - msecs_to_jiffies(1000))) { - usb_kill_urb(urb); - BT_ERR("upload dlm fw timeout\n"); - goto error3; - } - - BT_DBG("."); - - mdelay(500); - - cur_len += sent_len; - - } - - /* upload 64bytes interrupt vector */ - ret = load_fw_iv(data); - mdelay(100); - - btmtk_usb_switch_iobase(data, WLAN); - - /* check MCU if ready */ - loop = 0; - - do { - btmtk_usb_io_read32(data, COM_REG0, &value); - - if (value == 0x01) - break; - - mdelay(10); - loop++; - } while (loop <= 100); - - if (loop > 1000) { - BT_ERR("wait for 100 times\n"); - ret = -ENODEV; - } - -error3: - usb_free_coherent(udev, 14592, buf, data_dma); -error2: - usb_free_urb(urb); -error1: - /* Disbale load fw mode */ - btmtk_usb_io_read32(data, 0x238, &value); - value = value & ~(0x10000000); - btmtk_usb_io_write32(data, 0x238, value); -error0: - btmtk_usb_io_write32(data, SEMAPHORE_00, 0x1); - return ret; -} - -static int inc_tx(struct btmtk_usb_data *data) -{ - unsigned long flags; - int rv; - - spin_lock_irqsave(&data->txlock, flags); - rv = test_bit(BTUSB_SUSPENDING, &data->flags); - if (!rv) - data->tx_in_flight++; - spin_unlock_irqrestore(&data->txlock, flags); - - return rv; -} - -static void btmtk_usb_intr_complete(struct urb *urb) -{ - struct hci_dev *hdev = urb->context; - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - int err; - - BT_DBG("%s: %s urb %p status %d count %d\n", __func__, hdev->name, - urb, urb->status, urb->actual_length); - - if (!test_bit(HCI_RUNNING, &hdev->flags)) - return; - - if (urb->status == 0) { - hdev->stat.byte_rx += urb->actual_length; - - hex_dump("hci event", urb->transfer_buffer, urb->actual_length); - - if (hci_recv_fragment(hdev, HCI_EVENT_PKT, - urb->transfer_buffer, - urb->actual_length) < 0) { - BT_ERR("%s corrupted event packet", hdev->name); - hdev->stat.err_rx++; - } - } - - if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) - return; - - usb_mark_last_busy(data->udev); - usb_anchor_urb(urb, &data->intr_anchor); - - err = usb_submit_urb(urb, GFP_ATOMIC); - - if (err < 0) { - /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p failed to resubmit (%d)", - hdev->name, urb, -err); - usb_unanchor_urb(urb); - } -} - -static int btmtk_usb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - struct urb *urb; - unsigned char *buf; - unsigned int pipe; - int err, size; - - BT_DBG("%s\n", __func__); - - if (!data->intr_ep) - return -ENODEV; - - urb = usb_alloc_urb(0, mem_flags); - if (!urb) - return -ENOMEM; - - size = le16_to_cpu(data->intr_ep->wMaxPacketSize); - - buf = kmalloc(size, mem_flags); - if (!buf) { - usb_free_urb(urb); - return -ENOMEM; - } - - pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress); - - usb_fill_int_urb(urb, data->udev, pipe, buf, size, - btmtk_usb_intr_complete, hdev, - data->intr_ep->bInterval); - - urb->transfer_flags |= URB_FREE_BUFFER; - - usb_anchor_urb(urb, &data->intr_anchor); - - err = usb_submit_urb(urb, mem_flags); - if (err < 0) { - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); - usb_unanchor_urb(urb); - } - - usb_free_urb(urb); - - return err; - -} - -static void btmtk_usb_bulk_in_complete(struct urb *urb) -{ - struct hci_dev *hdev = urb->context; - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - int err; - - BT_DBG("%s:%s urb %p status %d count %d", __func__, hdev->name, - urb, urb->status, urb->actual_length); - - if (!test_bit(HCI_RUNNING, &hdev->flags)) - return; - - if (urb->status == 0) { - hdev->stat.byte_rx += urb->actual_length; - - if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, - urb->transfer_buffer, - urb->actual_length) < 0) { - BT_ERR("%s corrupted ACL packet", hdev->name); - hdev->stat.err_rx++; - } - } - - if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) - return; - - usb_anchor_urb(urb, &data->bulk_anchor); - usb_mark_last_busy(data->udev); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) { - /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p failed to resubmit (%d)", - hdev->name, urb, -err); - usb_unanchor_urb(urb); - } -} - -static int btmtk_usb_submit_bulk_in_urb(struct hci_dev *hdev, gfp_t mem_flags) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - struct urb *urb; - unsigned char *buf; - unsigned int pipe; - int err, size = HCI_MAX_FRAME_SIZE; - - BT_DBG("%s:%s\n", __func__, hdev->name); - - if (!data->bulk_rx_ep) - return -ENODEV; - - urb = usb_alloc_urb(0, mem_flags); - if (!urb) - return -ENOMEM; - - buf = kmalloc(size, mem_flags); - if (!buf) { - usb_free_urb(urb); - return -ENOMEM; - } - - pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress); - - usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, - btmtk_usb_bulk_in_complete, hdev); - - urb->transfer_flags |= URB_FREE_BUFFER; - - usb_mark_last_busy(data->udev); - usb_anchor_urb(urb, &data->bulk_anchor); - - err = usb_submit_urb(urb, mem_flags); - if (err < 0) { - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); - usb_unanchor_urb(urb); - } - - usb_free_urb(urb); - - return err; -} - -static void btmtk_usb_isoc_in_complete(struct urb *urb) - -{ - struct hci_dev *hdev = urb->context; - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - int i, err; - - BT_DBG("%s: %s urb %p status %d count %d", __func__, hdev->name, - urb, urb->status, urb->actual_length); - - if (!test_bit(HCI_RUNNING, &hdev->flags)) - return; - - if (urb->status == 0) { - for (i = 0; i < urb->number_of_packets; i++) { - unsigned int offset = urb->iso_frame_desc[i].offset; - unsigned int length; - length = urb->iso_frame_desc[i].actual_length; - - if (urb->iso_frame_desc[i].status) - continue; - - hdev->stat.byte_rx += length; - - if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, - urb->transfer_buffer + offset, - length) < 0) { - BT_ERR("%s corrupted SCO packet", hdev->name); - hdev->stat.err_rx++; - } - } - } - - if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) - return; - - usb_anchor_urb(urb, &data->isoc_anchor); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) { - /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p failed to resubmit (%d)", - hdev->name, urb, -err); - usb_unanchor_urb(urb); - } -} - -static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu) -{ - int i, offset = 0; - - BT_DBG("len %d mtu %d", len, mtu); - - for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; - i++, offset += mtu, len -= mtu) { - urb->iso_frame_desc[i].offset = offset; - urb->iso_frame_desc[i].length = mtu; - } - - if (len && i < BTUSB_MAX_ISOC_FRAMES) { - urb->iso_frame_desc[i].offset = offset; - urb->iso_frame_desc[i].length = len; - i++; - } - - urb->number_of_packets = i; -} - -static int btmtk_usb_submit_isoc_in_urb(struct hci_dev *hdev, gfp_t mem_flags) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - struct urb *urb; - unsigned char *buf; - unsigned int pipe; - int err, size; - - BT_DBG("%s\n", __func__); - - if (!data->isoc_rx_ep) - return -ENODEV; - - urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags); - if (!urb) - return -ENOMEM; - - size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * - BTUSB_MAX_ISOC_FRAMES; - - buf = kmalloc(size, mem_flags); - if (!buf) { - usb_free_urb(urb); - return -ENOMEM; - } - - pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); - - usb_fill_int_urb(urb, data->udev, pipe, buf, size, - btmtk_usb_isoc_in_complete, hdev, - data->isoc_rx_ep->bInterval); - - urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; - - __fill_isoc_descriptor(urb, size, - le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); - - usb_anchor_urb(urb, &data->isoc_anchor); - - err = usb_submit_urb(urb, mem_flags); - if (err < 0) { - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); - usb_unanchor_urb(urb); - } - - usb_free_urb(urb); - - return err; -} - -static int btmtk_usb_open(struct hci_dev *hdev) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - int err; - - BT_DBG("%s\n", __func__); - - err = usb_autopm_get_interface(data->intf); - if (err < 0) - return err; - - data->intf->needs_remote_wakeup = 1; - - if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) - goto done; - - if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) - goto done; - - err = btmtk_usb_submit_intr_urb(hdev, GFP_KERNEL); - if (err < 0) - goto failed; - - err = btmtk_usb_submit_bulk_in_urb(hdev, GFP_KERNEL); - if (err < 0) { - usb_kill_anchored_urbs(&data->intr_anchor); - goto failed; - } - - set_bit(BTUSB_BULK_RUNNING, &data->flags); - btmtk_usb_submit_bulk_in_urb(hdev, GFP_KERNEL); - -done: - usb_autopm_put_interface(data->intf); - return 0; - -failed: - clear_bit(BTUSB_INTR_RUNNING, &data->flags); - clear_bit(HCI_RUNNING, &hdev->flags); - usb_autopm_put_interface(data->intf); - return err; -} - -static void btmtk_usb_stop_traffic(struct btmtk_usb_data *data) -{ - BT_DBG("%s\n", __func__); - - usb_kill_anchored_urbs(&data->intr_anchor); - usb_kill_anchored_urbs(&data->bulk_anchor); - usb_kill_anchored_urbs(&data->isoc_anchor); -} - -static int btmtk_usb_close(struct hci_dev *hdev) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - int err; - - BT_DBG("%s\n", __func__); - - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) - return 0; - - cancel_work_sync(&data->work); - cancel_work_sync(&data->waker); - - clear_bit(BTUSB_ISOC_RUNNING, &data->flags); - clear_bit(BTUSB_BULK_RUNNING, &data->flags); - clear_bit(BTUSB_INTR_RUNNING, &data->flags); - - btmtk_usb_stop_traffic(data); - - err = usb_autopm_get_interface(data->intf); - if (err < 0) - goto failed; - - data->intf->needs_remote_wakeup = 0; - usb_autopm_put_interface(data->intf); - -failed: - usb_scuttle_anchored_urbs(&data->deferred); - return 0; -} - -static int btmtk_usb_flush(struct hci_dev *hdev) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - - BT_DBG("%s\n", __func__); - - usb_kill_anchored_urbs(&data->tx_anchor); - - return 0; -} - -static void btmtk_usb_tx_complete(struct urb *urb) -{ - struct sk_buff *skb = urb->context; - struct hci_dev *hdev = (struct hci_dev *)skb->dev; - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - - BT_DBG("%s: %s urb %p status %d count %d\n", __func__, hdev->name, - urb, urb->status, urb->actual_length); - - if (!test_bit(HCI_RUNNING, &hdev->flags)) - goto done; - - if (!urb->status) - hdev->stat.byte_tx += urb->transfer_buffer_length; - else - hdev->stat.err_tx++; - -done: - spin_lock(&data->txlock); - data->tx_in_flight--; - spin_unlock(&data->txlock); - - kfree(urb->setup_packet); - - kfree_skb(skb); -} - -static void btmtk_usb_isoc_tx_complete(struct urb *urb) -{ - struct sk_buff *skb = urb->context; - struct hci_dev *hdev = (struct hci_dev *) skb->dev; - - BT_DBG("%s: %s urb %p status %d count %d", __func__, hdev->name, - urb, urb->status, urb->actual_length); - - if (!test_bit(HCI_RUNNING, &hdev->flags)) - goto done; - - if (!urb->status) - hdev->stat.byte_tx += urb->transfer_buffer_length; - else - hdev->stat.err_tx++; - -done: - kfree(urb->setup_packet); - - kfree_skb(skb); -} - -static int btmtk_usb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - struct usb_ctrlrequest *dr; - struct urb *urb; - unsigned int pipe; - int err; - - BT_DBG("%s\n", __func__); - - if (!test_bit(HCI_RUNNING, &hdev->flags)) - return -EBUSY; - - switch (bt_cb(skb)->pkt_type) { - case HCI_COMMAND_PKT: - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) - return -ENOMEM; - - dr = kmalloc(sizeof(*dr), GFP_ATOMIC); - if (!dr) { - usb_free_urb(urb); - return -ENOMEM; - } - - dr->bRequestType = data->cmdreq_type; - dr->bRequest = 0; - dr->wIndex = 0; - dr->wValue = 0; - dr->wLength = __cpu_to_le16(skb->len); - - pipe = usb_sndctrlpipe(data->udev, 0x00); - - if (test_bit(HCI_RUNNING, &hdev->flags)) { - u16 op_code; - memcpy(&op_code, skb->data, 2); - BT_DBG("ogf = %x\n", (op_code & 0xfc00) >> 10); - BT_DBG("ocf = %x\n", op_code & 0x03ff); - hex_dump("hci command", skb->data, skb->len); - - } - - usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, - skb->data, skb->len, - btmtk_usb_tx_complete, skb); - - hdev->stat.cmd_tx++; - break; - - case HCI_ACLDATA_PKT: - if (!data->bulk_tx_ep) - return -ENODEV; - - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) - return -ENOMEM; - - pipe = usb_sndbulkpipe(data->udev, - data->bulk_tx_ep->bEndpointAddress); - - usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, - skb->len, btmtk_usb_tx_complete, skb); - - hdev->stat.acl_tx++; - BT_DBG("HCI_ACLDATA_PKT:\n"); - break; - - case HCI_SCODATA_PKT: - if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) - return -ENODEV; - - urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); - if (!urb) - return -ENOMEM; - - pipe = usb_sndisocpipe(data->udev, - data->isoc_tx_ep->bEndpointAddress); - - usb_fill_int_urb(urb, data->udev, pipe, - skb->data, skb->len, btmtk_usb_isoc_tx_complete, - skb, data->isoc_tx_ep->bInterval); - - urb->transfer_flags = URB_ISO_ASAP; - - __fill_isoc_descriptor(urb, skb->len, - le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); - - hdev->stat.sco_tx++; - BT_DBG("HCI_SCODATA_PKT:\n"); - goto skip_waking; - - default: - return -EILSEQ; - } - - err = inc_tx(data); - - if (err) { - usb_anchor_urb(urb, &data->deferred); - schedule_work(&data->waker); - err = 0; - goto done; - } - -skip_waking: - usb_anchor_urb(urb, &data->tx_anchor); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) { - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); - kfree(urb->setup_packet); - usb_unanchor_urb(urb); - } else { - usb_mark_last_busy(data->udev); - } - -done: - usb_free_urb(urb); - return err; -} - -static void btmtk_usb_notify(struct hci_dev *hdev, unsigned int evt) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - - BT_DBG("%s evt %d", hdev->name, evt); - - if (hdev->conn_hash.sco_num != data->sco_num) { - data->sco_num = hdev->conn_hash.sco_num; - schedule_work(&data->work); - } -} - -static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting) -{ - struct btmtk_usb_data *data = hci_get_drvdata(hdev); - struct usb_interface *intf = data->isoc; - struct usb_endpoint_descriptor *ep_desc; - int i, err; - - if (!data->isoc) - return -ENODEV; - - err = usb_set_interface(data->udev, 1, altsetting); - if (err < 0) { - BT_ERR("%s setting interface failed (%d)", hdev->name, -err); - return err; - } - - data->isoc_altsetting = altsetting; - - data->isoc_tx_ep = NULL; - data->isoc_rx_ep = NULL; - - for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { - ep_desc = &intf->cur_altsetting->endpoint[i].desc; - - if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { - data->isoc_tx_ep = ep_desc; - continue; - } - - if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { - data->isoc_rx_ep = ep_desc; - continue; - } - } - - if (!data->isoc_tx_ep || !data->isoc_rx_ep) { - BT_ERR("%s invalid SCO descriptors", hdev->name); - return -ENODEV; - } - - return 0; -} - -static void btmtk_usb_work(struct work_struct *work) -{ - struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data, - work); - struct hci_dev *hdev = data->hdev; - int new_alts; - int err; - - BT_DBG("%s\n", __func__); - - if (hdev->conn_hash.sco_num > 0) { - if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { - err = usb_autopm_get_interface(data->isoc ? - data->isoc : data->intf); - if (err < 0) { - clear_bit(BTUSB_ISOC_RUNNING, &data->flags); - usb_kill_anchored_urbs(&data->isoc_anchor); - return; - } - - set_bit(BTUSB_DID_ISO_RESUME, &data->flags); - } - - if (hdev->voice_setting & 0x0020) { - static const int alts[3] = { 2, 4, 5 }; - new_alts = alts[hdev->conn_hash.sco_num - 1]; - } else { - new_alts = hdev->conn_hash.sco_num; - } - - if (data->isoc_altsetting != new_alts) { - clear_bit(BTUSB_ISOC_RUNNING, &data->flags); - usb_kill_anchored_urbs(&data->isoc_anchor); - - if (__set_isoc_interface(hdev, new_alts) < 0) - return; - } - - if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { - if (btmtk_usb_submit_isoc_in_urb(hdev, GFP_KERNEL) < 0) - clear_bit(BTUSB_ISOC_RUNNING, &data->flags); - else - btmtk_usb_submit_isoc_in_urb(hdev, GFP_KERNEL); - } - } else { - clear_bit(BTUSB_ISOC_RUNNING, &data->flags); - usb_kill_anchored_urbs(&data->isoc_anchor); - - __set_isoc_interface(hdev, 0); - - if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) - usb_autopm_put_interface(data->isoc ? - data->isoc : data->intf); - } -} - -static void btmtk_usb_waker(struct work_struct *work) -{ - struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data, - waker); - int err; - - err = usb_autopm_get_interface(data->intf); - - if (err < 0) - return; - - usb_autopm_put_interface(data->intf); -} - -static int btmtk_usb_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct btmtk_usb_data *data; - struct usb_endpoint_descriptor *ep_desc; - int i, err; - struct hci_dev *hdev; - - /* interface numbers are hardcoded in the spec */ - if (intf->cur_altsetting->desc.bInterfaceNumber != 0) - return -ENODEV; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - - if (!data) - return -ENOMEM; - - for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { - ep_desc = &intf->cur_altsetting->endpoint[i].desc; - - if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) { - data->intr_ep = ep_desc; - continue; - } - - if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { - data->bulk_tx_ep = ep_desc; - continue; - } - - if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { - data->bulk_rx_ep = ep_desc; - continue; - } - } - - if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) { - kfree(data); - return -ENODEV; - } - - data->cmdreq_type = USB_TYPE_CLASS; - - data->udev = interface_to_usbdev(intf); - data->intf = intf; - - spin_lock_init(&data->lock); - INIT_WORK(&data->work, btmtk_usb_work); - INIT_WORK(&data->waker, btmtk_usb_waker); - spin_lock_init(&data->txlock); - - init_usb_anchor(&data->tx_anchor); - init_usb_anchor(&data->intr_anchor); - init_usb_anchor(&data->bulk_anchor); - init_usb_anchor(&data->isoc_anchor); - init_usb_anchor(&data->deferred); - - hdev = hci_alloc_dev(); - if (!hdev) { - kfree(data); - return -ENOMEM; - } - - hdev->bus = HCI_USB; - - hci_set_drvdata(hdev, data); - - data->hdev = hdev; - - SET_HCIDEV_DEV(hdev, &intf->dev); - - hdev->open = btmtk_usb_open; - hdev->close = btmtk_usb_close; - hdev->flush = btmtk_usb_flush; - hdev->send = btmtk_usb_send_frame; - hdev->notify = btmtk_usb_notify; - - /* Interface numbers are hardcoded in the specification */ - data->isoc = usb_ifnum_to_if(data->udev, 1); - - if (data->isoc) { - err = usb_driver_claim_interface(&btmtk_usb_driver, - data->isoc, data); - if (err < 0) { - hci_free_dev(hdev); - kfree(data); - return err; - } - } - - data->io_buf = kmalloc(256, GFP_KERNEL); - if (!data->io_buf) { - hci_free_dev(hdev); - kfree(data); - return -ENOMEM; - } - - btmtk_usb_switch_iobase(data, WLAN); - - btmtk_usb_cap_init(data); - - err = hci_register_dev(hdev); - if (err < 0) { - hci_free_dev(hdev); - kfree(data); - return err; - } - - usb_set_intfdata(intf, data); - - return 0; -} - -static void btmtk_usb_disconnect(struct usb_interface *intf) -{ - struct btmtk_usb_data *data = usb_get_intfdata(intf); - struct hci_dev *hdev; - - BT_DBG("%s\n", __func__); - - if (!data) - return; - - hdev = data->hdev; - usb_set_intfdata(data->intf, NULL); - - if (data->isoc) - usb_set_intfdata(data->isoc, NULL); - - hci_unregister_dev(hdev); - - if (intf == data->isoc) - usb_driver_release_interface(&btmtk_usb_driver, data->intf); - else if (data->isoc) - usb_driver_release_interface(&btmtk_usb_driver, data->isoc); - - hci_free_dev(hdev); - - kfree(data->io_buf); - - kfree(data); -} - -#ifdef CONFIG_PM -static int btmtk_usb_suspend(struct usb_interface *intf, pm_message_t message) -{ - struct btmtk_usb_data *data = usb_get_intfdata(intf); - - BT_DBG("%s\n", __func__); - - if (data->suspend_count++) - return 0; - - spin_lock_irq(&data->txlock); - if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) { - set_bit(BTUSB_SUSPENDING, &data->flags); - spin_unlock_irq(&data->txlock); - } else { - spin_unlock_irq(&data->txlock); - data->suspend_count--; - return -EBUSY; - } - - cancel_work_sync(&data->work); - - btmtk_usb_stop_traffic(data); - usb_kill_anchored_urbs(&data->tx_anchor); - - return 0; -} - -static void play_deferred(struct btmtk_usb_data *data) -{ - struct urb *urb; - int err; - - while ((urb = usb_get_from_anchor(&data->deferred))) { - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) - break; - - data->tx_in_flight++; - } - - usb_scuttle_anchored_urbs(&data->deferred); -} - -static int btmtk_usb_resume(struct usb_interface *intf) -{ - struct btmtk_usb_data *data = usb_get_intfdata(intf); - struct hci_dev *hdev = data->hdev; - int err = 0; - - BT_DBG("%s\n", __func__); - - if (--data->suspend_count) - return 0; - - if (!test_bit(HCI_RUNNING, &hdev->flags)) - goto done; - - if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) { - err = btmtk_usb_submit_intr_urb(hdev, GFP_NOIO); - if (err < 0) { - clear_bit(BTUSB_INTR_RUNNING, &data->flags); - goto failed; - } - } - - if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) { - err = btmtk_usb_submit_bulk_in_urb(hdev, GFP_NOIO); - if (err < 0) { - clear_bit(BTUSB_BULK_RUNNING, &data->flags); - goto failed; - } - - btmtk_usb_submit_bulk_in_urb(hdev, GFP_NOIO); - } - - if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) { - if (btmtk_usb_submit_isoc_in_urb(hdev, GFP_NOIO) < 0) - clear_bit(BTUSB_ISOC_RUNNING, &data->flags); - else - btmtk_usb_submit_isoc_in_urb(hdev, GFP_NOIO); - } - - spin_lock_irq(&data->txlock); - play_deferred(data); - clear_bit(BTUSB_SUSPENDING, &data->flags); - spin_unlock_irq(&data->txlock); - schedule_work(&data->work); - - return 0; - -failed: - usb_scuttle_anchored_urbs(&data->deferred); -done: - spin_lock_irq(&data->txlock); - clear_bit(BTUSB_SUSPENDING, &data->flags); - spin_unlock_irq(&data->txlock); - - return err; -} -#endif - -static struct usb_device_id btmtk_usb_table[] = { - /* Mediatek MT7650 */ - { USB_DEVICE(0x0e8d, 0x7650) }, - { USB_DEVICE(0x0e8d, 0x7630) }, - { USB_DEVICE(0x0e8d, 0x763e) }, - /* Mediatek MT662 */ - { USB_DEVICE(0x0e8d, 0x7662) }, - { USB_DEVICE(0x0e8d, 0x7632) }, - { } /* Terminating entry */ -}; - -static struct usb_driver btmtk_usb_driver = { - .name = "btmtk_usb", - .probe = btmtk_usb_probe, - .disconnect = btmtk_usb_disconnect, -#ifdef CONFIG_PM - .suspend = btmtk_usb_suspend, - .resume = btmtk_usb_resume, -#endif - .id_table = btmtk_usb_table, - .supports_autosuspend = 1, - .disable_hub_initiated_lpm = 1, -}; - -module_usb_driver(btmtk_usb_driver); - -MODULE_DESCRIPTION("Mediatek Bluetooth USB driver ver " VERSION); -MODULE_VERSION(VERSION); -MODULE_LICENSE("GPL"); -MODULE_FIRMWARE(MT7650_FIRMWARE); -MODULE_FIRMWARE(MT7662_FIRMWARE); diff --git a/drivers/staging/btmtk_usb/btmtk_usb.h b/drivers/staging/btmtk_usb/btmtk_usb.h deleted file mode 100644 index 12f0d3b27bfe98..00000000000000 --- a/drivers/staging/btmtk_usb/btmtk_usb.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * MediaTek Bluetooth USB Driver - * - * Copyright (C) 2013, MediaTek co. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * or on the worldwide web at - * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - * - */ - -#ifndef __BTMTK_USB_H__ -#define __BTMTK_USB_H_ - -/* Memory map for MTK BT */ - -/* SYS Control */ -#define SYSCTL 0x400000 - -/* WLAN */ -#define WLAN 0x410000 - -/* MCUCTL */ -#define INT_LEVEL 0x0718 -#define COM_REG0 0x0730 -#define SEMAPHORE_00 0x07B0 -#define SEMAPHORE_01 0x07B4 -#define SEMAPHORE_02 0x07B8 -#define SEMAPHORE_03 0x07BC - -/* Chip definition */ - -#define CONTROL_TIMEOUT_JIFFIES ((300 * HZ) / 100) -#define DEVICE_VENDOR_REQUEST_OUT 0x40 -#define DEVICE_VENDOR_REQUEST_IN 0xc0 -#define DEVICE_CLASS_REQUEST_OUT 0x20 - -#define BTUSB_MAX_ISOC_FRAMES 10 -#define BTUSB_INTR_RUNNING 0 -#define BTUSB_BULK_RUNNING 1 -#define BTUSB_ISOC_RUNNING 2 -#define BTUSB_SUSPENDING 3 -#define BTUSB_DID_ISO_RESUME 4 - -/* ROM Patch */ -#define PATCH_HCI_HEADER_SIZE 4 -#define PATCH_WMT_HEADER_SIZE 5 -#define PATCH_HEADER_SIZE (PATCH_HCI_HEADER_SIZE + PATCH_WMT_HEADER_SIZE) -#define UPLOAD_PATCH_UNIT 2048 -#define PATCH_INFO_SIZE 30 -#define PATCH_PHASE1 1 -#define PATCH_PHASE2 2 -#define PATCH_PHASE3 3 - -struct btmtk_usb_data { - struct hci_dev *hdev; - struct usb_device *udev; - struct usb_interface *intf; - struct usb_interface *isoc; - - spinlock_t lock; - - unsigned long flags; - struct work_struct work; - struct work_struct waker; - - struct usb_anchor tx_anchor; - struct usb_anchor intr_anchor; - struct usb_anchor bulk_anchor; - struct usb_anchor isoc_anchor; - struct usb_anchor deferred; - int tx_in_flight; - spinlock_t txlock; - - struct usb_endpoint_descriptor *intr_ep; - struct usb_endpoint_descriptor *bulk_tx_ep; - struct usb_endpoint_descriptor *bulk_rx_ep; - struct usb_endpoint_descriptor *isoc_tx_ep; - struct usb_endpoint_descriptor *isoc_rx_ep; - - __u8 cmdreq_type; - - unsigned int sco_num; - int isoc_altsetting; - int suspend_count; - - /* request for different io operation */ - u8 w_request; - u8 r_request; - - /* io buffer for usb control transfer */ - char *io_buf; - - struct semaphore fw_upload_sem; - - /* unsigned char *fw_image; */ - /* unsigned char *rom_patch; */ - const struct firmware *firmware; - u32 chip_id; - u8 need_load_fw; - u8 need_load_rom_patch; - u32 rom_patch_offset; - u32 rom_patch_len; -}; - -static inline int is_mt7630(struct btmtk_usb_data *data) -{ - return ((data->chip_id & 0xffff0000) == 0x76300000); -} - -static inline int is_mt7650(struct btmtk_usb_data *data) -{ - return ((data->chip_id & 0xffff0000) == 0x76500000); -} - -static inline int is_mt7632(struct btmtk_usb_data *data) -{ - return ((data->chip_id & 0xffff0000) == 0x76320000); -} - -static inline int is_mt7662(struct btmtk_usb_data *data) -{ - return ((data->chip_id & 0xffff0000) == 0x76620000); -} - -#endif diff --git a/drivers/staging/ced1401/ced_ioc.c b/drivers/staging/ced1401/ced_ioc.c index 62efd74b8c0474..bf532b1bd34536 100644 --- a/drivers/staging/ced1401/ced_ioc.c +++ b/drivers/staging/ced1401/ced_ioc.c @@ -19,7 +19,6 @@ */ #include #include -#include #include #include #include @@ -630,7 +629,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea) } spin_unlock_irq(&pdx->stagedLock); - if (pPages) { /* if we decided to release the memory */ + if (pPages) { /* if we decided to release the memory */ /* Now we must undo the pinning down of the pages. We will assume the worst and mark */ /* all the pages as dirty. Don't be tempted to move this up above as you must not be */ /* holding a spin lock to do this stuff as it is not atomic. */ diff --git a/drivers/staging/ced1401/usb1401.c b/drivers/staging/ced1401/usb1401.c index 97c55f9e5151e9..efc310ca789eb7 100644 --- a/drivers/staging/ced1401/usb1401.c +++ b/drivers/staging/ced1401/usb1401.c @@ -89,7 +89,6 @@ synchronous non-Urb based transfers. #include #include #include -#include #include #include #include diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index bfa27e7fc0169f..89e25b4203ad25 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig @@ -884,6 +884,12 @@ config COMEDI_GSC_HPDI To compile this driver as a module, choose M here: the module will be called gsc_hpdi. +config COMEDI_MF6X4 + tristate "Humusoft MF634 and MF624 DAQ Card support" + ---help--- + This driver supports both Humusoft MF634 and MF624 Data acquisition + cards. The legacy Humusoft MF614 card is not supported. + config COMEDI_ICP_MULTI tristate "Inova ICP_MULTI support" ---help--- diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile index e6dfc98f8c8ed8..fae2d909000684 100644 --- a/drivers/staging/comedi/Makefile +++ b/drivers/staging/comedi/Makefile @@ -1,3 +1,5 @@ +ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG + comedi-y := comedi_fops.o range.o drivers.o \ comedi_buf.o comedi-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c index 4e26bd7fc84f35..924fce977985b2 100644 --- a/drivers/staging/comedi/comedi_buf.c +++ b/drivers/staging/comedi/comedi_buf.c @@ -16,6 +16,7 @@ */ #include +#include #include "comedidev.h" #include "comedi_internal.h" @@ -26,31 +27,21 @@ #define COMEDI_PAGE_PROTECTION PAGE_KERNEL #endif -static void __comedi_buf_free(struct comedi_device *dev, - struct comedi_subdevice *s, - unsigned n_pages) +static void comedi_buf_map_kref_release(struct kref *kref) { - struct comedi_async *async = s->async; + struct comedi_buf_map *bm = + container_of(kref, struct comedi_buf_map, refcount); struct comedi_buf_page *buf; - unsigned i; - - if (async->prealloc_buf) { - vunmap(async->prealloc_buf); - async->prealloc_buf = NULL; - async->prealloc_bufsz = 0; - } + unsigned int i; - if (!async->buf_page_list) - return; - - for (i = 0; i < n_pages; ++i) { - buf = &async->buf_page_list[i]; - if (buf->virt_addr) { + if (bm->page_list) { + for (i = 0; i < bm->n_pages; i++) { + buf = &bm->page_list[i]; clear_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags)); - if (s->async_dma_dir != DMA_NONE) { + if (bm->dma_dir != DMA_NONE) { #ifdef CONFIG_HAS_DMA - dma_free_coherent(dev->hw_dev, + dma_free_coherent(bm->dma_hw_dev, PAGE_SIZE, buf->virt_addr, buf->dma_addr); @@ -59,10 +50,26 @@ static void __comedi_buf_free(struct comedi_device *dev, free_page((unsigned long)buf->virt_addr); } } + vfree(bm->page_list); } - vfree(async->buf_page_list); - async->buf_page_list = NULL; - async->n_buf_pages = 0; + if (bm->dma_dir != DMA_NONE) + put_device(bm->dma_hw_dev); + kfree(bm); +} + +static void __comedi_buf_free(struct comedi_device *dev, + struct comedi_subdevice *s) +{ + struct comedi_async *async = s->async; + + if (async->prealloc_buf) { + vunmap(async->prealloc_buf); + async->prealloc_buf = NULL; + async->prealloc_bufsz = 0; + } + + comedi_buf_map_put(async->buf_map); + async->buf_map = NULL; } static void __comedi_buf_alloc(struct comedi_device *dev, @@ -71,6 +78,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev, { struct comedi_async *async = s->async; struct page **pages = NULL; + struct comedi_buf_map *bm; struct comedi_buf_page *buf; unsigned i; @@ -80,18 +88,29 @@ static void __comedi_buf_alloc(struct comedi_device *dev, return; } - async->buf_page_list = vzalloc(sizeof(*buf) * n_pages); - if (async->buf_page_list) + bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL); + if (!bm) + return; + + async->buf_map = bm; + kref_init(&bm->refcount); + bm->dma_dir = s->async_dma_dir; + if (bm->dma_dir != DMA_NONE) + /* Need ref to hardware device to free buffer later. */ + bm->dma_hw_dev = get_device(dev->hw_dev); + + bm->page_list = vzalloc(sizeof(*buf) * n_pages); + if (bm->page_list) pages = vmalloc(sizeof(struct page *) * n_pages); if (!pages) return; for (i = 0; i < n_pages; i++) { - buf = &async->buf_page_list[i]; - if (s->async_dma_dir != DMA_NONE) + buf = &bm->page_list[i]; + if (bm->dma_dir != DMA_NONE) #ifdef CONFIG_HAS_DMA - buf->virt_addr = dma_alloc_coherent(dev->hw_dev, + buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev, PAGE_SIZE, &buf->dma_addr, GFP_KERNEL | @@ -108,6 +127,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev, pages[i] = virt_to_page(buf->virt_addr); } + bm->n_pages = i; /* vmap the prealloc_buf if all the pages were allocated */ if (i == n_pages) @@ -117,6 +137,26 @@ static void __comedi_buf_alloc(struct comedi_device *dev, vfree(pages); } +void comedi_buf_map_get(struct comedi_buf_map *bm) +{ + if (bm) + kref_get(&bm->refcount); +} + +int comedi_buf_map_put(struct comedi_buf_map *bm) +{ + if (bm) + return kref_put(&bm->refcount, comedi_buf_map_kref_release); + return 1; +} + +bool comedi_buf_is_mmapped(struct comedi_async *async) +{ + struct comedi_buf_map *bm = async->buf_map; + + return bm && (atomic_read(&bm->refcount.refcount) > 1); +} + int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long new_size) { @@ -130,7 +170,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s, return 0; /* deallocate old buffer */ - __comedi_buf_free(dev, s, async->n_buf_pages); + __comedi_buf_free(dev, s); /* allocate new buffer */ if (new_size) { @@ -140,10 +180,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s, if (!async->prealloc_buf) { /* allocation failed */ - __comedi_buf_free(dev, s, n_pages); + __comedi_buf_free(dev, s); return -ENOMEM; } - async->n_buf_pages = n_pages; } async->prealloc_bufsz = new_size; diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index f3d59e2a1152b5..c22c617b0da1fd 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -16,8 +16,6 @@ GNU General Public License for more details. */ -#undef DEBUG - #include "comedi_compat32.h" #include @@ -47,15 +45,6 @@ #define COMEDI_NUM_SUBDEVICE_MINORS \ (COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS) -#ifdef CONFIG_COMEDI_DEBUG -int comedi_debug; -EXPORT_SYMBOL_GPL(comedi_debug); -module_param(comedi_debug, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(comedi_debug, - "enable comedi core and driver debugging if non-zero (default 0)" - ); -#endif - static int comedi_num_legacy_minors; module_param(comedi_num_legacy_minors, int, S_IRUGO); MODULE_PARM_DESC(comedi_num_legacy_minors, @@ -89,11 +78,38 @@ static struct cdev comedi_cdev; static void comedi_device_init(struct comedi_device *dev) { + kref_init(&dev->refcount); spin_lock_init(&dev->spinlock); mutex_init(&dev->mutex); + init_rwsem(&dev->attach_lock); dev->minor = -1; } +static void comedi_dev_kref_release(struct kref *kref) +{ + struct comedi_device *dev = + container_of(kref, struct comedi_device, refcount); + + mutex_destroy(&dev->mutex); + put_device(dev->class_dev); + kfree(dev); +} + +int comedi_dev_put(struct comedi_device *dev) +{ + if (dev) + return kref_put(&dev->refcount, comedi_dev_kref_release); + return 1; +} +EXPORT_SYMBOL_GPL(comedi_dev_put); + +static struct comedi_device *comedi_dev_get(struct comedi_device *dev) +{ + if (dev) + kref_get(&dev->refcount); + return dev; +} + static void comedi_device_cleanup(struct comedi_device *dev) { struct module *driver_module = NULL; @@ -104,14 +120,9 @@ static void comedi_device_cleanup(struct comedi_device *dev) if (dev->attached) driver_module = dev->driver->module; comedi_device_detach(dev); - while (dev->use_count > 0) { - if (driver_module) - module_put(driver_module); - module_put(THIS_MODULE); - dev->use_count--; - } + if (driver_module && dev->use_count) + module_put(driver_module); mutex_unlock(&dev->mutex); - mutex_destroy(&dev->mutex); } static bool comedi_clear_board_dev(struct comedi_device *dev) @@ -142,17 +153,17 @@ static struct comedi_device *comedi_clear_board_minor(unsigned minor) static void comedi_free_board_dev(struct comedi_device *dev) { if (dev) { + comedi_device_cleanup(dev); if (dev->class_dev) { device_destroy(comedi_class, MKDEV(COMEDI_MAJOR, dev->minor)); } - comedi_device_cleanup(dev); - kfree(dev); + comedi_dev_put(dev); } } static struct comedi_subdevice -*comedi_subdevice_from_minor(unsigned minor) +*comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned minor) { struct comedi_subdevice *s; unsigned int i = minor - COMEDI_NUM_BOARD_MINORS; @@ -160,37 +171,45 @@ static struct comedi_subdevice BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS); mutex_lock(&comedi_subdevice_minor_table_lock); s = comedi_subdevice_minor_table[i]; + if (s && s->device != dev) + s = NULL; mutex_unlock(&comedi_subdevice_minor_table_lock); return s; } -static struct comedi_device *comedi_dev_from_board_minor(unsigned minor) +static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor) { struct comedi_device *dev; BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS); mutex_lock(&comedi_board_minor_table_lock); - dev = comedi_board_minor_table[minor]; + dev = comedi_dev_get(comedi_board_minor_table[minor]); mutex_unlock(&comedi_board_minor_table_lock); return dev; } -static struct comedi_device *comedi_dev_from_subdevice_minor(unsigned minor) +static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor) { + struct comedi_device *dev; struct comedi_subdevice *s; + unsigned int i = minor - COMEDI_NUM_BOARD_MINORS; - s = comedi_subdevice_from_minor(minor); - return s ? s->device : NULL; + BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS); + mutex_lock(&comedi_subdevice_minor_table_lock); + s = comedi_subdevice_minor_table[i]; + dev = comedi_dev_get(s ? s->device : NULL); + mutex_unlock(&comedi_subdevice_minor_table_lock); + return dev; } -struct comedi_device *comedi_dev_from_minor(unsigned minor) +struct comedi_device *comedi_dev_get_from_minor(unsigned minor) { if (minor < COMEDI_NUM_BOARD_MINORS) - return comedi_dev_from_board_minor(minor); + return comedi_dev_get_from_board_minor(minor); else - return comedi_dev_from_subdevice_minor(minor); + return comedi_dev_get_from_subdevice_minor(minor); } -EXPORT_SYMBOL_GPL(comedi_dev_from_minor); +EXPORT_SYMBOL_GPL(comedi_dev_get_from_minor); static struct comedi_subdevice * comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor) @@ -198,10 +217,8 @@ comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor) struct comedi_subdevice *s; if (minor >= COMEDI_NUM_BOARD_MINORS) { - s = comedi_subdevice_from_minor(minor); - if (!s || s->device != dev) - return NULL; - if (s->subdev_flags & SDF_CMD_READ) + s = comedi_subdevice_from_minor(dev, minor); + if (s == NULL || (s->subdev_flags & SDF_CMD_READ)) return s; } return dev->read_subdev; @@ -213,10 +230,8 @@ comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor) struct comedi_subdevice *s; if (minor >= COMEDI_NUM_BOARD_MINORS) { - s = comedi_subdevice_from_minor(minor); - if (!s || s->device != dev) - return NULL; - if (s->subdev_flags & SDF_CMD_WRITE) + s = comedi_subdevice_from_minor(dev, minor); + if (s == NULL || (s->subdev_flags & SDF_CMD_WRITE)) return s; } return dev->write_subdev; @@ -232,11 +247,13 @@ static int resize_async_buffer(struct comedi_device *dev, return -EPERM; if (s->busy) { - DPRINTK("subdevice is busy, cannot resize buffer\n"); + dev_dbg(dev->class_dev, + "subdevice is busy, cannot resize buffer\n"); return -EBUSY; } - if (async->mmap_count) { - DPRINTK("subdevice is mmapped, cannot resize buffer\n"); + if (comedi_buf_is_mmapped(async)) { + dev_dbg(dev->class_dev, + "subdevice is mmapped, cannot resize buffer\n"); return -EBUSY; } @@ -254,8 +271,8 @@ static int resize_async_buffer(struct comedi_device *dev, return retval; } - DPRINTK("comedi%i subd %d buffer resized to %i bytes\n", - dev->minor, s->index, async->prealloc_bufsz); + dev_dbg(dev->class_dev, "subd %d buffer resized to %i bytes\n", + s->index, async->prealloc_bufsz); return 0; } @@ -269,7 +286,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev, struct comedi_subdevice *s; unsigned int size = 0; - dev = comedi_dev_from_minor(minor); + dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; @@ -279,6 +296,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev, size = s->async->max_bufsize / 1024; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return snprintf(buf, PAGE_SIZE, "%i\n", size); } @@ -299,7 +317,7 @@ static ssize_t max_read_buffer_kb_store(struct device *csdev, return -EINVAL; size *= 1024; - dev = comedi_dev_from_minor(minor); + dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; @@ -311,6 +329,7 @@ static ssize_t max_read_buffer_kb_store(struct device *csdev, err = -EINVAL; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(max_read_buffer_kb); @@ -323,7 +342,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev, struct comedi_subdevice *s; unsigned int size = 0; - dev = comedi_dev_from_minor(minor); + dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; @@ -333,6 +352,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev, size = s->async->prealloc_bufsz / 1024; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return snprintf(buf, PAGE_SIZE, "%i\n", size); } @@ -353,7 +373,7 @@ static ssize_t read_buffer_kb_store(struct device *csdev, return -EINVAL; size *= 1024; - dev = comedi_dev_from_minor(minor); + dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; @@ -365,6 +385,7 @@ static ssize_t read_buffer_kb_store(struct device *csdev, err = -EINVAL; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(read_buffer_kb); @@ -378,7 +399,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev, struct comedi_subdevice *s; unsigned int size = 0; - dev = comedi_dev_from_minor(minor); + dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; @@ -388,6 +409,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev, size = s->async->max_bufsize / 1024; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return snprintf(buf, PAGE_SIZE, "%i\n", size); } @@ -408,7 +430,7 @@ static ssize_t max_write_buffer_kb_store(struct device *csdev, return -EINVAL; size *= 1024; - dev = comedi_dev_from_minor(minor); + dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; @@ -420,6 +442,7 @@ static ssize_t max_write_buffer_kb_store(struct device *csdev, err = -EINVAL; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(max_write_buffer_kb); @@ -432,7 +455,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev, struct comedi_subdevice *s; unsigned int size = 0; - dev = comedi_dev_from_minor(minor); + dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; @@ -442,6 +465,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev, size = s->async->prealloc_bufsz / 1024; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return snprintf(buf, PAGE_SIZE, "%i\n", size); } @@ -462,7 +486,7 @@ static ssize_t write_buffer_kb_store(struct device *csdev, return -EINVAL; size *= 1024; - dev = comedi_dev_from_minor(minor); + dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; @@ -474,6 +498,7 @@ static ssize_t write_buffer_kb_store(struct device *csdev, err = -EINVAL; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(write_buffer_kb); @@ -562,12 +587,13 @@ static void do_become_nonbusy(struct comedi_device *dev, async->inttrig = NULL; kfree(async->cmd.chanlist); async->cmd.chanlist = NULL; + s->busy = NULL; + wake_up_interruptible_all(&s->async->wait_head); } else { dev_err(dev->class_dev, "BUG: (?) do_become_nonbusy called with async=NULL\n"); + s->busy = NULL; } - - s->busy = NULL; } static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s) @@ -582,6 +608,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s) return ret; } +void comedi_device_cancel_all(struct comedi_device *dev) +{ + struct comedi_subdevice *s; + int i; + + if (!dev->attached) + return; + + for (i = 0; i < dev->n_subdevices; i++) { + s = &dev->subdevices[i]; + if (s->async) + do_cancel(dev, s); + } +} + static int is_device_busy(struct comedi_device *dev) { struct comedi_subdevice *s; @@ -594,7 +635,7 @@ static int is_device_busy(struct comedi_device *dev) s = &dev->subdevices[i]; if (s->busy) return 1; - if (s->async && s->async->mmap_count) + if (s->async && comedi_buf_is_mmapped(s->async)) return 1; } @@ -684,7 +725,8 @@ static int do_bufconfig_ioctl(struct comedi_device *dev, async = s->async; if (!async) { - DPRINTK("subdevice does not have async capability\n"); + dev_dbg(dev->class_dev, + "subdevice does not have async capability\n"); bc.size = 0; bc.maximum_size = 0; goto copyback; @@ -931,7 +973,8 @@ static int do_bufinfo_ioctl(struct comedi_device *dev, async = s->async; if (!async) { - DPRINTK("subdevice does not have async capability\n"); + dev_dbg(dev->class_dev, + "subdevice does not have async capability\n"); bi.buf_write_ptr = 0; bi.buf_read_ptr = 0; bi.buf_write_count = 0; @@ -1083,19 +1126,20 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, break; } if (insn->subdev >= dev->n_subdevices) { - DPRINTK("%d not usable subdevice\n", + dev_dbg(dev->class_dev, + "%d not usable subdevice\n", insn->subdev); ret = -EINVAL; break; } s = &dev->subdevices[insn->subdev]; if (!s->async) { - DPRINTK("no async\n"); + dev_dbg(dev->class_dev, "no async\n"); ret = -EINVAL; break; } if (!s->async->inttrig) { - DPRINTK("no inttrig\n"); + dev_dbg(dev->class_dev, "no inttrig\n"); ret = -EAGAIN; break; } @@ -1104,7 +1148,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, ret = 1; break; default: - DPRINTK("invalid insn\n"); + dev_dbg(dev->class_dev, "invalid insn\n"); ret = -EINVAL; break; } @@ -1113,21 +1157,23 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, unsigned int maxdata; if (insn->subdev >= dev->n_subdevices) { - DPRINTK("subdevice %d out of range\n", insn->subdev); + dev_dbg(dev->class_dev, "subdevice %d out of range\n", + insn->subdev); ret = -EINVAL; goto out; } s = &dev->subdevices[insn->subdev]; if (s->type == COMEDI_SUBD_UNUSED) { - DPRINTK("%d not usable subdevice\n", insn->subdev); + dev_dbg(dev->class_dev, "%d not usable subdevice\n", + insn->subdev); ret = -EIO; goto out; } /* are we locked? (ioctl lock) */ if (s->lock && s->lock != file) { - DPRINTK("device locked\n"); + dev_dbg(dev->class_dev, "device locked\n"); ret = -EACCES; goto out; } @@ -1135,7 +1181,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, ret = comedi_check_chanlist(s, 1, &insn->chanspec); if (ret < 0) { ret = -EINVAL; - DPRINTK("bad chanspec\n"); + dev_dbg(dev->class_dev, "bad chanspec\n"); goto out; } @@ -1156,7 +1202,8 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, for (i = 0; i < insn->n; ++i) { if (data[i] > maxdata) { ret = -EINVAL; - DPRINTK("bad data value(s)\n"); + dev_dbg(dev->class_dev, + "bad data value(s)\n"); break; } } @@ -1238,35 +1285,35 @@ static int do_insnlist_ioctl(struct comedi_device *dev, data = kmalloc(sizeof(unsigned int) * MAX_SAMPLES, GFP_KERNEL); if (!data) { - DPRINTK("kmalloc failed\n"); ret = -ENOMEM; goto error; } insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL); if (!insns) { - DPRINTK("kmalloc failed\n"); ret = -ENOMEM; goto error; } if (copy_from_user(insns, insnlist.insns, sizeof(*insns) * insnlist.n_insns)) { - DPRINTK("copy_from_user failed\n"); + dev_dbg(dev->class_dev, "copy_from_user failed\n"); ret = -EFAULT; goto error; } for (i = 0; i < insnlist.n_insns; i++) { if (insns[i].n > MAX_SAMPLES) { - DPRINTK("number of samples too large\n"); + dev_dbg(dev->class_dev, + "number of samples too large\n"); ret = -EINVAL; goto error; } if (insns[i].insn & INSN_MASK_WRITE) { if (copy_from_user(data, insns[i].data, insns[i].n * sizeof(unsigned int))) { - DPRINTK("copy_from_user failed\n"); + dev_dbg(dev->class_dev, + "copy_from_user failed\n"); ret = -EFAULT; goto error; } @@ -1277,7 +1324,8 @@ static int do_insnlist_ioctl(struct comedi_device *dev, if (insns[i].insn & INSN_MASK_READ) { if (copy_to_user(insns[i].data, data, insns[i].n * sizeof(unsigned int))) { - DPRINTK("copy_to_user failed\n"); + dev_dbg(dev->class_dev, + "copy_to_user failed\n"); ret = -EFAULT; goto error; } @@ -1367,14 +1415,14 @@ static int do_cmd_ioctl(struct comedi_device *dev, unsigned int __user *user_chanlist; if (copy_from_user(&cmd, arg, sizeof(cmd))) { - DPRINTK("bad cmd address\n"); + dev_dbg(dev->class_dev, "bad cmd address\n"); return -EFAULT; } /* save user's chanlist pointer so it can be restored later */ user_chanlist = (unsigned int __user *)cmd.chanlist; if (cmd.subdev >= dev->n_subdevices) { - DPRINTK("%d no such subdevice\n", cmd.subdev); + dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev); return -ENODEV; } @@ -1382,38 +1430,38 @@ static int do_cmd_ioctl(struct comedi_device *dev, async = s->async; if (s->type == COMEDI_SUBD_UNUSED) { - DPRINTK("%d not valid subdevice\n", cmd.subdev); + dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev); return -EIO; } if (!s->do_cmd || !s->do_cmdtest || !s->async) { - DPRINTK("subdevice %i does not support commands\n", - cmd.subdev); + dev_dbg(dev->class_dev, + "subdevice %i does not support commands\n", cmd.subdev); return -EIO; } /* are we locked? (ioctl lock) */ if (s->lock && s->lock != file) { - DPRINTK("subdevice locked\n"); + dev_dbg(dev->class_dev, "subdevice locked\n"); return -EACCES; } /* are we busy? */ if (s->busy) { - DPRINTK("subdevice busy\n"); + dev_dbg(dev->class_dev, "subdevice busy\n"); return -EBUSY; } /* make sure channel/gain list isn't too long */ if (cmd.chanlist_len > s->len_chanlist) { - DPRINTK("channel/gain list too long %u > %d\n", + dev_dbg(dev->class_dev, "channel/gain list too long %u > %d\n", cmd.chanlist_len, s->len_chanlist); return -EINVAL; } /* make sure channel/gain list isn't too short */ if (cmd.chanlist_len < 1) { - DPRINTK("channel/gain list too short %u < 1\n", + dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n", cmd.chanlist_len); return -EINVAL; } @@ -1425,7 +1473,9 @@ static int do_cmd_ioctl(struct comedi_device *dev, async->cmd.chanlist_len * sizeof(int)); if (IS_ERR(async->cmd.chanlist)) { ret = PTR_ERR(async->cmd.chanlist); - DPRINTK("memdup_user failed with code %d\n", ret); + async->cmd.chanlist = NULL; + dev_dbg(dev->class_dev, "memdup_user failed with code %d\n", + ret); goto cleanup; } @@ -1434,20 +1484,20 @@ static int do_cmd_ioctl(struct comedi_device *dev, async->cmd.chanlist_len, async->cmd.chanlist); if (ret < 0) { - DPRINTK("bad chanlist\n"); + dev_dbg(dev->class_dev, "bad chanlist\n"); goto cleanup; } ret = s->do_cmdtest(dev, s, &async->cmd); if (async->cmd.flags & TRIG_BOGUS || ret) { - DPRINTK("test returned %d\n", ret); + dev_dbg(dev->class_dev, "test returned %d\n", ret); cmd = async->cmd; /* restore chanlist pointer before copying back */ cmd.chanlist = (unsigned int __force *)user_chanlist; cmd.data = NULL; if (copy_to_user(arg, &cmd, sizeof(cmd))) { - DPRINTK("fault writing cmd\n"); + dev_dbg(dev->class_dev, "fault writing cmd\n"); ret = -EFAULT; goto cleanup; } @@ -1457,7 +1507,7 @@ static int do_cmd_ioctl(struct comedi_device *dev, if (!async->prealloc_bufsz) { ret = -ENOMEM; - DPRINTK("no buffer (?)\n"); + dev_dbg(dev->class_dev, "no buffer (?)\n"); goto cleanup; } @@ -1469,8 +1519,7 @@ static int do_cmd_ioctl(struct comedi_device *dev, if (async->cmd.flags & TRIG_WAKE_EOS) async->cb_mask |= COMEDI_CB_EOS; - comedi_set_subdevice_runflags(s, SRF_USER | SRF_ERROR | SRF_RUNNING, - SRF_USER | SRF_RUNNING); + comedi_set_subdevice_runflags(s, SRF_ERROR | SRF_RUNNING, SRF_RUNNING); /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with * comedi_read() or comedi_write() */ @@ -1510,32 +1559,32 @@ static int do_cmdtest_ioctl(struct comedi_device *dev, unsigned int __user *user_chanlist; if (copy_from_user(&cmd, arg, sizeof(cmd))) { - DPRINTK("bad cmd address\n"); + dev_dbg(dev->class_dev, "bad cmd address\n"); return -EFAULT; } /* save user's chanlist pointer so it can be restored later */ user_chanlist = (unsigned int __user *)cmd.chanlist; if (cmd.subdev >= dev->n_subdevices) { - DPRINTK("%d no such subdevice\n", cmd.subdev); + dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev); return -ENODEV; } s = &dev->subdevices[cmd.subdev]; if (s->type == COMEDI_SUBD_UNUSED) { - DPRINTK("%d not valid subdevice\n", cmd.subdev); + dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev); return -EIO; } if (!s->do_cmd || !s->do_cmdtest) { - DPRINTK("subdevice %i does not support commands\n", - cmd.subdev); + dev_dbg(dev->class_dev, + "subdevice %i does not support commands\n", cmd.subdev); return -EIO; } /* make sure channel/gain list isn't too long */ if (cmd.chanlist_len > s->len_chanlist) { - DPRINTK("channel/gain list too long %d > %d\n", + dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n", cmd.chanlist_len, s->len_chanlist); ret = -EINVAL; goto cleanup; @@ -1547,14 +1596,16 @@ static int do_cmdtest_ioctl(struct comedi_device *dev, cmd.chanlist_len * sizeof(int)); if (IS_ERR(chanlist)) { ret = PTR_ERR(chanlist); - DPRINTK("memdup_user exited with code %d", ret); + chanlist = NULL; + dev_dbg(dev->class_dev, + "memdup_user exited with code %d", ret); goto cleanup; } /* make sure each element in channel/gain list is valid */ ret = comedi_check_chanlist(s, cmd.chanlist_len, chanlist); if (ret < 0) { - DPRINTK("bad chanlist\n"); + dev_dbg(dev->class_dev, "bad chanlist\n"); goto cleanup; } @@ -1567,7 +1618,7 @@ static int do_cmdtest_ioctl(struct comedi_device *dev, cmd.chanlist = (unsigned int __force *)user_chanlist; if (copy_to_user(arg, &cmd, sizeof(cmd))) { - DPRINTK("bad cmd address\n"); + dev_dbg(dev->class_dev, "bad cmd address\n"); ret = -EFAULT; goto cleanup; } @@ -1700,8 +1751,6 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg, return -EBUSY; ret = do_cancel(dev, s); - if (comedi_get_subdevice_runflags(s) & SRF_USER) - wake_up_interruptible(&s->async->wait_head); return ret; } @@ -1748,12 +1797,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { const unsigned minor = iminor(file_inode(file)); - struct comedi_device *dev = comedi_dev_from_minor(minor); + struct comedi_device *dev = file->private_data; int rc; - if (!dev) - return -ENODEV; - mutex_lock(&dev->mutex); /* Device config is special, because it must work on @@ -1782,7 +1828,7 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, } if (!dev->attached) { - DPRINTK("no driver configured on /dev/comedi%i\n", dev->minor); + dev_dbg(dev->class_dev, "no driver attached\n"); rc = -ENODEV; goto done; } @@ -1852,28 +1898,18 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, static void comedi_vm_open(struct vm_area_struct *area) { - struct comedi_async *async; - struct comedi_device *dev; - - async = area->vm_private_data; - dev = async->subdevice->device; + struct comedi_buf_map *bm; - mutex_lock(&dev->mutex); - async->mmap_count++; - mutex_unlock(&dev->mutex); + bm = area->vm_private_data; + comedi_buf_map_get(bm); } static void comedi_vm_close(struct vm_area_struct *area) { - struct comedi_async *async; - struct comedi_device *dev; - - async = area->vm_private_data; - dev = async->subdevice->device; + struct comedi_buf_map *bm; - mutex_lock(&dev->mutex); - async->mmap_count--; - mutex_unlock(&dev->mutex); + bm = area->vm_private_data; + comedi_buf_map_put(bm); } static struct vm_operations_struct comedi_vm_ops = { @@ -1884,22 +1920,20 @@ static struct vm_operations_struct comedi_vm_ops = { static int comedi_mmap(struct file *file, struct vm_area_struct *vma) { const unsigned minor = iminor(file_inode(file)); - struct comedi_device *dev = comedi_dev_from_minor(minor); + struct comedi_device *dev = file->private_data; struct comedi_subdevice *s; struct comedi_async *async; + struct comedi_buf_map *bm; unsigned long start = vma->vm_start; unsigned long size; int n_pages; int i; int retval; - if (!dev) - return -ENODEV; - mutex_lock(&dev->mutex); if (!dev->attached) { - DPRINTK("no driver configured on comedi%i\n", dev->minor); + dev_dbg(dev->class_dev, "no driver attached\n"); retval = -ENODEV; goto done; } @@ -1920,7 +1954,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma) } if (vma->vm_pgoff != 0) { - DPRINTK("comedi: mmap() offset must be 0.\n"); + dev_dbg(dev->class_dev, "mmap() offset must be 0.\n"); retval = -EINVAL; goto done; } @@ -1936,8 +1970,13 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma) } n_pages = size >> PAGE_SHIFT; + bm = async->buf_map; + if (!bm || n_pages > bm->n_pages) { + retval = -EINVAL; + goto done; + } for (i = 0; i < n_pages; ++i) { - struct comedi_buf_page *buf = &async->buf_page_list[i]; + struct comedi_buf_page *buf = &bm->page_list[i]; if (remap_pfn_range(vma, start, page_to_pfn(virt_to_page(buf->virt_addr)), @@ -1949,9 +1988,9 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma) } vma->vm_ops = &comedi_vm_ops; - vma->vm_private_data = async; + vma->vm_private_data = bm; - async->mmap_count++; + vma->vm_ops->open(vma); retval = 0; done: @@ -1963,16 +2002,13 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; const unsigned minor = iminor(file_inode(file)); - struct comedi_device *dev = comedi_dev_from_minor(minor); + struct comedi_device *dev = file->private_data; struct comedi_subdevice *s; - if (!dev) - return -ENODEV; - mutex_lock(&dev->mutex); if (!dev->attached) { - DPRINTK("no driver configured on comedi%i\n", dev->minor); + dev_dbg(dev->class_dev, "no driver attached\n"); goto done; } @@ -2008,39 +2044,75 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, int n, m, count = 0, retval = 0; DECLARE_WAITQUEUE(wait, current); const unsigned minor = iminor(file_inode(file)); - struct comedi_device *dev = comedi_dev_from_minor(minor); + struct comedi_device *dev = file->private_data; + bool on_wait_queue = false; + bool attach_locked; + unsigned int old_detach_count; - if (!dev) - return -ENODEV; + /* Protect against device detachment during operation. */ + down_read(&dev->attach_lock); + attach_locked = true; + old_detach_count = dev->detach_count; if (!dev->attached) { - DPRINTK("no driver configured on comedi%i\n", dev->minor); - return -ENODEV; + dev_dbg(dev->class_dev, "no driver attached\n"); + retval = -ENODEV; + goto out; } s = comedi_write_subdevice(dev, minor); - if (!s || !s->async) - return -EIO; + if (!s || !s->async) { + retval = -EIO; + goto out; + } async = s->async; if (!s->busy || !nbytes) - return 0; - if (s->busy != file) - return -EACCES; + goto out; + if (s->busy != file) { + retval = -EACCES; + goto out; + } add_wait_queue(&async->wait_head, &wait); + on_wait_queue = true; while (nbytes > 0 && !retval) { set_current_state(TASK_INTERRUPTIBLE); if (!comedi_is_subdevice_running(s)) { if (count == 0) { - mutex_lock(&dev->mutex); + struct comedi_subdevice *new_s; + if (comedi_is_subdevice_in_error(s)) retval = -EPIPE; else retval = 0; - do_become_nonbusy(dev, s); + /* + * To avoid deadlock, cannot acquire dev->mutex + * while dev->attach_lock is held. Need to + * remove task from the async wait queue before + * releasing dev->attach_lock, as it might not + * be valid afterwards. + */ + remove_wait_queue(&async->wait_head, &wait); + on_wait_queue = false; + up_read(&dev->attach_lock); + attach_locked = false; + mutex_lock(&dev->mutex); + /* + * Become non-busy unless things have changed + * behind our back. Checking dev->detach_count + * is unchanged ought to be sufficient (unless + * there have been 2**32 detaches in the + * meantime!), but check the subdevice pointer + * as well just in case. + */ + new_s = comedi_write_subdevice(dev, minor); + if (dev->attached && + old_detach_count == dev->detach_count && + s == new_s && new_s->async == async) + do_become_nonbusy(dev, s); mutex_unlock(&dev->mutex); } break; @@ -2090,8 +2162,12 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, buf += n; break; /* makes device work like a pipe */ } +out: + if (on_wait_queue) + remove_wait_queue(&async->wait_head, &wait); set_current_state(TASK_RUNNING); - remove_wait_queue(&async->wait_head, &wait); + if (attach_locked) + up_read(&dev->attach_lock); return count ? count : retval; } @@ -2104,25 +2180,35 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, int n, m, count = 0, retval = 0; DECLARE_WAITQUEUE(wait, current); const unsigned minor = iminor(file_inode(file)); - struct comedi_device *dev = comedi_dev_from_minor(minor); + struct comedi_device *dev = file->private_data; + unsigned int old_detach_count; + bool become_nonbusy = false; + bool attach_locked; - if (!dev) - return -ENODEV; + /* Protect against device detachment during operation. */ + down_read(&dev->attach_lock); + attach_locked = true; + old_detach_count = dev->detach_count; if (!dev->attached) { - DPRINTK("no driver configured on comedi%i\n", dev->minor); - return -ENODEV; + dev_dbg(dev->class_dev, "no driver attached\n"); + retval = -ENODEV; + goto out; } s = comedi_read_subdevice(dev, minor); - if (!s || !s->async) - return -EIO; + if (!s || !s->async) { + retval = -EIO; + goto out; + } async = s->async; if (!s->busy || !nbytes) - return 0; - if (s->busy != file) - return -EACCES; + goto out; + if (s->busy != file) { + retval = -EACCES; + goto out; + } add_wait_queue(&async->wait_head, &wait); while (nbytes > 0 && !retval) { @@ -2140,13 +2226,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, if (n == 0) { if (!comedi_is_subdevice_running(s)) { - mutex_lock(&dev->mutex); - do_become_nonbusy(dev, s); if (comedi_is_subdevice_in_error(s)) retval = -EPIPE; else retval = 0; - mutex_unlock(&dev->mutex); + become_nonbusy = true; break; } if (file->f_flags & O_NONBLOCK) { @@ -2184,14 +2268,37 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, buf += n; break; /* makes device work like a pipe */ } - if (comedi_is_subdevice_idle(s)) { + remove_wait_queue(&async->wait_head, &wait); + set_current_state(TASK_RUNNING); + if (become_nonbusy || comedi_is_subdevice_idle(s)) { + struct comedi_subdevice *new_s; + + /* + * To avoid deadlock, cannot acquire dev->mutex + * while dev->attach_lock is held. + */ + up_read(&dev->attach_lock); + attach_locked = false; mutex_lock(&dev->mutex); - if (async->buf_read_count - async->buf_write_count == 0) - do_become_nonbusy(dev, s); + /* + * Check device hasn't become detached behind our back. + * Checking dev->detach_count is unchanged ought to be + * sufficient (unless there have been 2**32 detaches in the + * meantime!), but check the subdevice pointer as well just in + * case. + */ + new_s = comedi_read_subdevice(dev, minor); + if (dev->attached && old_detach_count == dev->detach_count && + s == new_s && new_s->async == async) { + if (become_nonbusy || + async->buf_read_count - async->buf_write_count == 0) + do_become_nonbusy(dev, s); + } mutex_unlock(&dev->mutex); } - set_current_state(TASK_RUNNING); - remove_wait_queue(&async->wait_head, &wait); +out: + if (attach_locked) + up_read(&dev->attach_lock); return count ? count : retval; } @@ -2199,10 +2306,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, static int comedi_open(struct inode *inode, struct file *file) { const unsigned minor = iminor(inode); - struct comedi_device *dev = comedi_dev_from_minor(minor); + struct comedi_device *dev = comedi_dev_get_from_minor(minor); + int rc; if (!dev) { - DPRINTK("invalid minor number\n"); + pr_debug("invalid minor number\n"); return -ENODEV; } @@ -2223,9 +2331,9 @@ static int comedi_open(struct inode *inode, struct file *file) if (dev->attached) goto ok; if (!capable(CAP_NET_ADMIN) && dev->in_request_module) { - DPRINTK("in request module\n"); - mutex_unlock(&dev->mutex); - return -ENODEV; + dev_dbg(dev->class_dev, "in request module\n"); + rc = -ENODEV; + goto out; } if (capable(CAP_NET_ADMIN) && dev->in_request_module) goto ok; @@ -2241,59 +2349,49 @@ static int comedi_open(struct inode *inode, struct file *file) dev->in_request_module = false; if (!dev->attached && !capable(CAP_NET_ADMIN)) { - DPRINTK("not attached and not CAP_NET_ADMIN\n"); - mutex_unlock(&dev->mutex); - return -ENODEV; + dev_dbg(dev->class_dev, "not attached and not CAP_NET_ADMIN\n"); + rc = -ENODEV; + goto out; } ok: - __module_get(THIS_MODULE); - - if (dev->attached) { + if (dev->attached && dev->use_count == 0) { if (!try_module_get(dev->driver->module)) { - module_put(THIS_MODULE); - mutex_unlock(&dev->mutex); - return -ENOSYS; + rc = -ENOSYS; + goto out; } - } - - if (dev->attached && dev->use_count == 0 && dev->open) { - int rc = dev->open(dev); - if (rc < 0) { - module_put(dev->driver->module); - module_put(THIS_MODULE); - mutex_unlock(&dev->mutex); - return rc; + if (dev->open) { + rc = dev->open(dev); + if (rc < 0) { + module_put(dev->driver->module); + goto out; + } } } dev->use_count++; + file->private_data = dev; + rc = 0; +out: mutex_unlock(&dev->mutex); - - return 0; + if (rc) + comedi_dev_put(dev); + return rc; } static int comedi_fasync(int fd, struct file *file, int on) { - const unsigned minor = iminor(file_inode(file)); - struct comedi_device *dev = comedi_dev_from_minor(minor); - - if (!dev) - return -ENODEV; + struct comedi_device *dev = file->private_data; return fasync_helper(fd, file, on, &dev->async_queue); } static int comedi_close(struct inode *inode, struct file *file) { - const unsigned minor = iminor(inode); - struct comedi_device *dev = comedi_dev_from_minor(minor); + struct comedi_device *dev = file->private_data; struct comedi_subdevice *s = NULL; int i; - if (!dev) - return -ENODEV; - mutex_lock(&dev->mutex); if (dev->subdevices) { @@ -2306,16 +2404,16 @@ static int comedi_close(struct inode *inode, struct file *file) s->lock = NULL; } } - if (dev->attached && dev->use_count == 1 && dev->close) - dev->close(dev); - - module_put(THIS_MODULE); - if (dev->attached) + if (dev->attached && dev->use_count == 1) { + if (dev->close) + dev->close(dev); module_put(dev->driver->module); + } dev->use_count--; mutex_unlock(&dev->mutex); + comedi_dev_put(dev); return 0; } @@ -2346,8 +2444,6 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s) unsigned runflags = 0; unsigned runflags_mask = 0; - /* DPRINTK("comedi_event 0x%x\n",mask); */ - if (!comedi_is_subdevice_running(s)) return; @@ -2368,16 +2464,11 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s) } if (async->cb_mask & s->async->events) { - if (comedi_get_subdevice_runflags(s) & SRF_USER) { - wake_up_interruptible(&async->wait_head); - if (s->subdev_flags & SDF_CMD_READ) - kill_fasync(&dev->async_queue, SIGIO, POLL_IN); - if (s->subdev_flags & SDF_CMD_WRITE) - kill_fasync(&dev->async_queue, SIGIO, POLL_OUT); - } else { - if (async->cb_func) - async->cb_func(s->async->events, async->cb_arg); - } + wake_up_interruptible(&async->wait_head); + if (s->subdev_flags & SDF_CMD_READ) + kill_fasync(&dev->async_queue, SIGIO, POLL_IN); + if (s->subdev_flags & SDF_CMD_WRITE) + kill_fasync(&dev->async_queue, SIGIO, POLL_OUT); } s->async->events = 0; } @@ -2408,7 +2499,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device) if (i == COMEDI_NUM_BOARD_MINORS) { mutex_unlock(&dev->mutex); comedi_device_cleanup(dev); - kfree(dev); + comedi_dev_put(dev); pr_err("comedi: error: ran out of minor numbers for board device files.\n"); return ERR_PTR(-EBUSY); } @@ -2416,7 +2507,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device) csdev = device_create(comedi_class, hardware_device, MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i", i); if (!IS_ERR(csdev)) - dev->class_dev = csdev; + dev->class_dev = get_device(csdev); /* Note: dev->mutex needs to be unlocked by the caller. */ return dev; diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h index fda1a7ba0e16bb..9a746570f1619a 100644 --- a/drivers/staging/comedi/comedi_internal.h +++ b/drivers/staging/comedi/comedi_internal.h @@ -16,7 +16,11 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s); int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long new_size); void comedi_buf_reset(struct comedi_async *async); +bool comedi_buf_is_mmapped(struct comedi_async *async); +void comedi_buf_map_get(struct comedi_buf_map *bm); +int comedi_buf_map_put(struct comedi_buf_map *bm); unsigned int comedi_buf_write_n_allocated(struct comedi_async *async); +void comedi_device_cancel_all(struct comedi_device *dev); extern unsigned int comedi_default_buf_size_kb; extern unsigned int comedi_default_buf_maxsize_kb; diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h index 143be8076a2e72..f82bd4256d51a5 100644 --- a/drivers/staging/comedi/comedidev.h +++ b/drivers/staging/comedi/comedidev.h @@ -20,14 +20,13 @@ #define _COMEDIDEV_H #include +#include +#include +#include +#include #include "comedi.h" -#define DPRINTK(format, args...) do { \ - if (comedi_debug) \ - pr_debug("comedi: " format, ## args); \ -} while (0) - #define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) #define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \ COMEDI_MINORVERSION, COMEDI_MICROVERSION) @@ -100,18 +99,22 @@ struct comedi_buf_page { dma_addr_t dma_addr; }; +struct comedi_buf_map { + struct device *dma_hw_dev; + struct comedi_buf_page *page_list; + unsigned int n_pages; + enum dma_data_direction dma_dir; + struct kref refcount; +}; + struct comedi_async { struct comedi_subdevice *subdevice; void *prealloc_buf; /* pre-allocated buffer */ unsigned int prealloc_bufsz; /* buffer size, in bytes */ - /* virtual and dma address of each page */ - struct comedi_buf_page *buf_page_list; - unsigned n_buf_pages; /* num elements in buf_page_list */ + struct comedi_buf_map *buf_map; /* map of buffer pages */ unsigned int max_bufsize; /* maximum buffer size, bytes */ - /* current number of mmaps of prealloc_buf */ - unsigned int mmap_count; /* byte count for writer (write completed) */ unsigned int buf_write_count; @@ -141,10 +144,7 @@ struct comedi_async { wait_queue_head_t wait_head; - /* callback stuff */ unsigned int cb_mask; - int (*cb_func) (unsigned int flags, void *); - void *cb_arg; int (*inttrig) (struct comedi_device *dev, struct comedi_subdevice *s, unsigned int x); @@ -173,6 +173,7 @@ struct comedi_device { struct device *class_dev; int minor; + unsigned int detach_count; /* hw_dev is passed to dma_alloc_coherent when allocating async buffers * for subdevices that have async_dma_dir set to something other than * DMA_NONE */ @@ -185,6 +186,8 @@ struct comedi_device { bool ioenabled:1; spinlock_t spinlock; struct mutex mutex; + struct rw_semaphore attach_lock; + struct kref refcount; int n_subdevices; struct comedi_subdevice *subdevices; @@ -208,12 +211,6 @@ static inline const void *comedi_board(const struct comedi_device *dev) return dev->board_ptr; } -#ifdef CONFIG_COMEDI_DEBUG -extern int comedi_debug; -#else -static const int comedi_debug; -#endif - /* * function prototypes */ @@ -231,7 +228,8 @@ enum comedi_minor_bits { static const unsigned COMEDI_SUBDEVICE_MINOR_SHIFT = 4; static const unsigned COMEDI_SUBDEVICE_MINOR_OFFSET = 1; -struct comedi_device *comedi_dev_from_minor(unsigned minor); +struct comedi_device *comedi_dev_get_from_minor(unsigned minor); +int comedi_dev_put(struct comedi_device *dev); void init_polling(void); void cleanup_polling(void); @@ -240,7 +238,6 @@ void stop_polling(struct comedi_device *); /* subdevice runflags */ enum subdevice_runflags { - SRF_USER = 0x00000001, SRF_RT = 0x00000002, /* indicates an COMEDI_CB_ERROR event has occurred since the last * command was started */ @@ -410,6 +407,7 @@ void comedi_driver_unregister(struct comedi_driver *); #define PCI_VENDOR_ID_IOTECH 0x1616 #define PCI_VENDOR_ID_CONTEC 0x1221 #define PCI_VENDOR_ID_RTD 0x1435 +#define PCI_VENDOR_ID_HUMUSOFT 0x186c struct pci_dev; struct pci_driver; diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 4964d2a2fc7d54..246080316c9014 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -95,7 +95,7 @@ int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices) } EXPORT_SYMBOL_GPL(comedi_alloc_subdevices); -static void cleanup_device(struct comedi_device *dev) +static void comedi_device_detach_cleanup(struct comedi_device *dev) { int i; struct comedi_subdevice *s; @@ -133,10 +133,14 @@ static void cleanup_device(struct comedi_device *dev) void comedi_device_detach(struct comedi_device *dev) { + comedi_device_cancel_all(dev); + down_write(&dev->attach_lock); dev->attached = false; + dev->detach_count++; if (dev->driver) dev->driver->detach(dev); - cleanup_device(dev); + comedi_device_detach_cleanup(dev); + up_write(&dev->attach_lock); } static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s) @@ -355,8 +359,9 @@ static int comedi_device_postconfig(struct comedi_device *dev) ret = __comedi_device_postconfig(dev); if (ret < 0) return ret; - smp_wmb(); + down_write(&dev->attach_lock); dev->attached = true; + up_write(&dev->attach_lock); return 0; } @@ -598,8 +603,12 @@ int comedi_auto_config(struct device *hardware_device, } dev = comedi_alloc_board_minor(hardware_device); - if (IS_ERR(dev)) + if (IS_ERR(dev)) { + dev_warn(hardware_device, + "driver '%s' could not create device.\n", + driver->driver_name); return PTR_ERR(dev); + } /* Note: comedi_alloc_board_minor() locked dev->mutex. */ dev->driver = driver; @@ -611,8 +620,20 @@ int comedi_auto_config(struct device *hardware_device, comedi_device_detach(dev); mutex_unlock(&dev->mutex); - if (ret < 0) + if (ret < 0) { + dev_warn(hardware_device, + "driver '%s' failed to auto-configure device.\n", + driver->driver_name); comedi_release_hardware_device(hardware_device); + } else { + /* + * class_dev should be set properly here + * after a successful auto config + */ + dev_info(dev->class_dev, + "driver '%s' has successfully auto-configured '%s'.\n", + driver->driver_name, dev->board_name); + } return ret; } EXPORT_SYMBOL_GPL(comedi_auto_config); @@ -657,7 +678,7 @@ void comedi_driver_unregister(struct comedi_driver *driver) /* check for devices using this driver */ for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) { - struct comedi_device *dev = comedi_dev_from_minor(i); + struct comedi_device *dev = comedi_dev_get_from_minor(i); if (!dev) continue; @@ -671,6 +692,7 @@ void comedi_driver_unregister(struct comedi_driver *driver) comedi_device_detach(dev); } mutex_unlock(&dev->mutex); + comedi_dev_put(dev); } } EXPORT_SYMBOL_GPL(comedi_driver_unregister); diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c index b4009e863414ed..48817f087d9758 100644 --- a/drivers/staging/comedi/drivers/8255.c +++ b/drivers/staging/comedi/drivers/8255.c @@ -94,7 +94,7 @@ I/O port base address can be found in the output of 'lspci -v'. struct subdev_8255_private { unsigned long iobase; - int (*io) (int, int, int, unsigned long); + int (*io)(int, int, int, unsigned long); }; static int subdev_8255_io(int dir, int port, int data, unsigned long iobase) @@ -262,7 +262,7 @@ static int subdev_8255_cancel(struct comedi_device *dev, } int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s, - int (*io) (int, int, int, unsigned long), + int (*io)(int, int, int, unsigned long), unsigned long iobase) { struct subdev_8255_private *spriv; @@ -289,7 +289,7 @@ int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s, EXPORT_SYMBOL_GPL(subdev_8255_init); int subdev_8255_init_irq(struct comedi_device *dev, struct comedi_subdevice *s, - int (*io) (int, int, int, unsigned long), + int (*io)(int, int, int, unsigned long), unsigned long iobase) { int ret; diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c index c55f234b29e605..8a57c3c1ade0ff 100644 --- a/drivers/staging/comedi/drivers/8255_pci.c +++ b/drivers/staging/comedi/drivers/8255_pci.c @@ -263,7 +263,7 @@ static int pci_8255_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &pci_8255_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = { +static const struct pci_device_id pci_8255_pci_table[] = { { PCI_VDEVICE(ADLINK, 0x7224), BOARD_ADLINK_PCI7224 }, { PCI_VDEVICE(ADLINK, 0x7248), BOARD_ADLINK_PCI7248 }, { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile index 94cbd2618fc8b8..2706f583d8f0f6 100644 --- a/drivers/staging/comedi/drivers/Makefile +++ b/drivers/staging/comedi/drivers/Makefile @@ -1,5 +1,6 @@ # Makefile for individual comedi drivers # +ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG # Comedi "helper" modules @@ -110,6 +111,7 @@ obj-$(CONFIG_COMEDI_NI_PCIMIO) += ni_pcimio.o obj-$(CONFIG_COMEDI_RTD520) += rtd520.o obj-$(CONFIG_COMEDI_S626) += s626.o obj-$(CONFIG_COMEDI_SSV_DNP) += ssv_dnp.o +obj-$(CONFIG_COMEDI_MF6X4) += mf6x4.o # Comedi PCMCIA drivers obj-$(CONFIG_COMEDI_CB_DAS16_CS) += cb_das16_cs.o diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c index 3c9eec84f0eb55..bd05857b82f281 100644 --- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c +++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c @@ -1414,7 +1414,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d) { struct comedi_device *dev = d; struct addi_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; unsigned int next_dma_buf, samplesinbuf; unsigned long low_word, high_word, var; unsigned int ui_Tmp; @@ -1568,8 +1568,8 @@ static void v_APCI3120_InterruptDma(int irq, void *d) static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev) { struct addi_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; int n_chan, i; - struct comedi_subdevice *s = &dev->subdevices[0]; int err = 1; n_chan = devpriv->ui_AiNbrofChannels; @@ -1593,11 +1593,11 @@ static void v_APCI3120_Interrupt(int irq, void *d) { struct comedi_device *dev = d; struct addi_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; unsigned short int_daq; unsigned int int_amcc, ui_Check, i; unsigned short us_TmpValue; unsigned char b_DummyRead; - struct comedi_subdevice *s = &dev->subdevices[0]; ui_Check = 1; diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c index dc73d4d348ed4f..8c85a09d1c661f 100644 --- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c +++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c @@ -2590,8 +2590,8 @@ static int i_APCI3200_CommandAnalogInput(struct comedi_device *dev, static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev) { struct addi_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; unsigned int ui_StatusRegister = 0; - struct comedi_subdevice *s = &dev->subdevices[0]; /* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */ /* comedi_async *async = s->async; */ diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c index 8d229b2f09738c..ccd49211ea17d0 100644 --- a/drivers/staging/comedi/drivers/addi_apci_035.c +++ b/drivers/staging/comedi/drivers/addi_apci_035.c @@ -58,7 +58,7 @@ static int apci035_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci035_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci035_pci_table) = { +static const struct pci_device_id apci035_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x0300) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 34ab0679e99281..0daa0ea63b5ef1 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -325,8 +325,8 @@ static int apci1032_auto_attach(struct comedi_device *dev, s = &dev->subdevices[1]; if (dev->irq) { dev->read_subdev = s; - s->type = COMEDI_SUBD_DI | SDF_CMD_READ; - s->subdev_flags = SDF_READABLE; + s->type = COMEDI_SUBD_DI; + s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 1; s->maxdata = 1; s->range_table = &range_digital; @@ -364,7 +364,7 @@ static int apci1032_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci1032_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci1032_pci_table) = { +static const struct pci_device_id apci1032_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1003) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index ae9ded63dcecd0..74f7ace8adbcb6 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -57,7 +57,7 @@ static int apci1500_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci1500_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci1500_pci_table) = { +static const struct pci_device_id apci1500_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80fc) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c index 9d1b1425c60b7c..e9c5291c77cd43 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1516.c +++ b/drivers/staging/comedi/drivers/addi_apci_1516.c @@ -206,7 +206,7 @@ static int apci1516_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci1516_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci1516_pci_table) = { +static const struct pci_device_id apci1516_pci_table[] = { { PCI_VDEVICE(ADDIDATA, 0x1000), BOARD_APCI1016 }, { PCI_VDEVICE(ADDIDATA, 0x1001), BOARD_APCI1516 }, { PCI_VDEVICE(ADDIDATA, 0x1002), BOARD_APCI2016 }, diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c index c5717d63e16a7b..6248284caaf5c8 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1564.c +++ b/drivers/staging/comedi/drivers/addi_apci_1564.c @@ -55,7 +55,7 @@ static int apci1564_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci1564_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci1564_pci_table) = { +static const struct pci_device_id apci1564_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1006) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c index 5ee204bcbeef3a..28df4b50b87adb 100644 --- a/drivers/staging/comedi/drivers/addi_apci_16xx.c +++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c @@ -168,7 +168,7 @@ static int apci16xx_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci16xx_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci16xx_pci_table) = { +static const struct pci_device_id apci16xx_pci_table[] = { { PCI_VDEVICE(ADDIDATA, 0x1009), BOARD_APCI1648 }, { PCI_VDEVICE(ADDIDATA, 0x100a), BOARD_APCI1696 }, { 0 } diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c index c77ee8732d38fe..c9b933cb5987c0 100644 --- a/drivers/staging/comedi/drivers/addi_apci_2032.c +++ b/drivers/staging/comedi/drivers/addi_apci_2032.c @@ -359,7 +359,7 @@ static int apci2032_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci2032_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci2032_pci_table) = { +static const struct pci_device_id apci2032_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1004) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c index 7fb32e778d8ba5..e1a916546d18a3 100644 --- a/drivers/staging/comedi/drivers/addi_apci_2200.c +++ b/drivers/staging/comedi/drivers/addi_apci_2200.c @@ -134,7 +134,7 @@ static int apci2200_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci2200_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci2200_pci_table) = { +static const struct pci_device_id apci2200_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1005) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c index 67d09e8afb2efd..1e6fa56c516e8b 100644 --- a/drivers/staging/comedi/drivers/addi_apci_3120.c +++ b/drivers/staging/comedi/drivers/addi_apci_3120.c @@ -230,7 +230,7 @@ static int apci3120_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci3120_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci3120_pci_table) = { +static const struct pci_device_id apci3120_pci_table[] = { { PCI_VDEVICE(AMCC, 0x818d), BOARD_APCI3120 }, { PCI_VDEVICE(AMCC, 0x828d), BOARD_APCI3001 }, { 0 } diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c index 1213d5aa6bea9b..9868a5369aff31 100644 --- a/drivers/staging/comedi/drivers/addi_apci_3200.c +++ b/drivers/staging/comedi/drivers/addi_apci_3200.c @@ -109,7 +109,7 @@ static int apci3200_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci3200_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci3200_pci_table) = { +static const struct pci_device_id apci3200_pci_table[] = { { PCI_VDEVICE(ADDIDATA, 0x3000), BOARD_APCI3200 }, { PCI_VDEVICE(ADDIDATA, 0x3007), BOARD_APCI3300 }, { 0 } diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c index 6138440b919e86..4cb69ec54c9bc6 100644 --- a/drivers/staging/comedi/drivers/addi_apci_3501.c +++ b/drivers/staging/comedi/drivers/addi_apci_3501.c @@ -428,7 +428,7 @@ static int apci3501_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci3501_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci3501_pci_table) = { +static const struct pci_device_id apci3501_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c index 761cbf8f964b3c..ceadf8eff47f57 100644 --- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c +++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c @@ -915,7 +915,7 @@ static int apci3xxx_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &apci3xxx_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(apci3xxx_pci_table) = { +static const struct pci_device_id apci3xxx_pci_table[] = { { PCI_VDEVICE(ADDIDATA, 0x3010), BOARD_APCI3000_16 }, { PCI_VDEVICE(ADDIDATA, 0x300f), BOARD_APCI3000_8 }, { PCI_VDEVICE(ADDIDATA, 0x300e), BOARD_APCI3000_4 }, diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c index dd092c7954a9ef..5c1413abc52de1 100644 --- a/drivers/staging/comedi/drivers/adl_pci6208.c +++ b/drivers/staging/comedi/drivers/adl_pci6208.c @@ -242,7 +242,7 @@ static int adl_pci6208_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(adl_pci6208_pci_table) = { +static const struct pci_device_id adl_pci6208_pci_table[] = { { PCI_VDEVICE(ADLINK, 0x6208), BOARD_PCI6208 }, { PCI_VDEVICE(ADLINK, 0x6216), BOARD_PCI6216 }, { 0 } diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c index 5617f5ca384a0b..6f622b4de69860 100644 --- a/drivers/staging/comedi/drivers/adl_pci7x3x.c +++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c @@ -259,7 +259,7 @@ static int adl_pci7x3x_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(adl_pci7x3x_pci_table) = { +static const struct pci_device_id adl_pci7x3x_pci_table[] = { { PCI_VDEVICE(ADLINK, 0x7230), BOARD_PCI7230 }, { PCI_VDEVICE(ADLINK, 0x7233), BOARD_PCI7233 }, { PCI_VDEVICE(ADLINK, 0x7234), BOARD_PCI7234 }, diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c index b3d009285ed4ce..300df55a2802d2 100644 --- a/drivers/staging/comedi/drivers/adl_pci8164.c +++ b/drivers/staging/comedi/drivers/adl_pci8164.c @@ -145,7 +145,7 @@ static int adl_pci8164_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = { +static const struct pci_device_id adl_pci8164_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x8164) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c index eab8da2c3d6691..363f2e42a27f0c 100644 --- a/drivers/staging/comedi/drivers/adl_pci9111.c +++ b/drivers/staging/comedi/drivers/adl_pci9111.c @@ -125,8 +125,7 @@ a multiple of chanlist_len*convert_arg. PLX9052_INTCSR_LI2STAT) static const struct comedi_lrange pci9111_ai_range = { - 5, - { + 5, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), @@ -470,11 +469,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev, struct pci9111_private_data *dev_private = dev->private; struct comedi_cmd *async_cmd = &s->async->cmd; - if (!dev->irq) { - comedi_error(dev, - "no irq assigned for PCI9111, cannot do hardware conversion"); - return -1; - } /* Set channel scan limit */ /* PCI9111 allows only scanning from channel 0 to channel n */ /* TODO: handle the case of an external multiplexer */ @@ -858,12 +852,11 @@ static int pci9111_auto_attach(struct comedi_device *dev, pci9111_reset(dev); - if (pcidev->irq > 0) { - ret = request_irq(dev->irq, pci9111_interrupt, + if (pcidev->irq) { + ret = request_irq(pcidev->irq, pci9111_interrupt, IRQF_SHARED, dev->board_name, dev); - if (ret) - return ret; - dev->irq = pcidev->irq; + if (ret == 0) + dev->irq = pcidev->irq; } ret = comedi_alloc_subdevices(dev, 4); @@ -871,18 +864,21 @@ static int pci9111_auto_attach(struct comedi_device *dev, return ret; s = &dev->subdevices[0]; - dev->read_subdev = s; s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_COMMON; s->n_chan = 16; s->maxdata = 0xffff; - s->len_chanlist = 16; s->range_table = &pci9111_ai_range; - s->cancel = pci9111_ai_cancel; s->insn_read = pci9111_ai_insn_read; - s->do_cmdtest = pci9111_ai_do_cmd_test; - s->do_cmd = pci9111_ai_do_cmd; - s->munge = pci9111_ai_munge; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = s->n_chan; + s->do_cmdtest = pci9111_ai_do_cmd_test; + s->do_cmd = pci9111_ai_do_cmd; + s->cancel = pci9111_ai_cancel; + s->munge = pci9111_ai_munge; + } s = &dev->subdevices[1]; s->type = COMEDI_SUBD_AO; @@ -938,7 +934,7 @@ static int pci9111_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(pci9111_pci_table) = { +static const struct pci_device_id pci9111_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID) }, /* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */ { 0 } diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c index 986489641ed774..4bdd9720e9eb7f 100644 --- a/drivers/staging/comedi/drivers/adl_pci9118.c +++ b/drivers/staging/comedi/drivers/adl_pci9118.c @@ -194,28 +194,30 @@ Configuration options: #define EXTTRG_AI 0 /* ext trg is used by AI */ -static const struct comedi_lrange range_pci9118dg_hr = { 8, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25) - } +static const struct comedi_lrange range_pci9118dg_hr = { + 8, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; -static const struct comedi_lrange range_pci9118hg = { 8, { - BIP_RANGE(5), - BIP_RANGE(0.5), - BIP_RANGE(0.05), - BIP_RANGE(0.005), - UNI_RANGE(10), - UNI_RANGE(1), - UNI_RANGE(0.1), - UNI_RANGE(0.01) - } +static const struct comedi_lrange range_pci9118hg = { + 8, { + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05), + BIP_RANGE(0.005), + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.01) + } }; #define PCI9118_BIPOLAR_RANGES 4 /* @@ -1126,7 +1128,7 @@ static irqreturn_t interrupt_pci9118(int irq, void *d) } } - (devpriv->int_ai_func) (dev, &dev->subdevices[0], int_adstat, + (devpriv->int_ai_func) (dev, dev->read_subdev, int_adstat, int_amcc, int_daq); } @@ -1965,7 +1967,6 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq, struct pci_dev *pcidev = comedi_to_pci_dev(dev); struct comedi_subdevice *s; int ret, pages, i; - unsigned int irq; u16 u16w; dev->board_name = this_board->name; @@ -2036,12 +2037,18 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq, pci_write_config_word(pcidev, PCI_COMMAND, u16w | 64); /* Enable parity check for parity error */ + if (!disable_irq && pcidev->irq) { + ret = request_irq(pcidev->irq, interrupt_pci9118, IRQF_SHARED, + dev->board_name, dev); + if (ret == 0) + dev->irq = pcidev->irq; + } + ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; s = &dev->subdevices[0]; - dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF; if (devpriv->usemux) @@ -2050,11 +2057,17 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq, s->n_chan = this_board->n_aichan; s->maxdata = this_board->ai_maxdata; - s->len_chanlist = this_board->n_aichanlist; s->range_table = this_board->rangelist_ai; - s->cancel = pci9118_ai_cancel; s->insn_read = pci9118_insn_read_ai; - s->munge = pci9118_ai_munge; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = this_board->n_aichanlist; + s->do_cmdtest = pci9118_ai_cmdtest; + s->do_cmd = pci9118_ai_cmd; + s->cancel = pci9118_ai_cancel; + s->munge = pci9118_ai_munge; + } s = &dev->subdevices[1]; s->type = COMEDI_SUBD_AO; @@ -2100,27 +2113,7 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq, break; } - if (disable_irq) - irq = 0; - else - irq = pcidev->irq; - if (irq > 0) { - if (request_irq(irq, interrupt_pci9118, IRQF_SHARED, - dev->board_name, dev)) { - dev_warn(dev->class_dev, - "unable to allocate IRQ %u, DISABLING IT\n", - irq); - } else { - dev->irq = irq; - /* Enable AI commands */ - s = &dev->subdevices[0]; - s->subdev_flags |= SDF_CMD_READ; - s->do_cmdtest = pci9118_ai_cmdtest; - s->do_cmd = pci9118_ai_cmd; - } - } - - pci9118_report_attach(dev, irq); + pci9118_report_attach(dev, dev->irq); return 0; } @@ -2217,7 +2210,7 @@ static int adl_pci9118_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(adl_pci9118_pci_table) = { +static const struct pci_device_id adl_pci9118_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80d9) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c index 8150a67cd1fbf9..3190ef7d285eb1 100644 --- a/drivers/staging/comedi/drivers/adq12b.c +++ b/drivers/staging/comedi/drivers/adq12b.c @@ -97,21 +97,22 @@ If you do not specify any options, they will default to #define TIMEOUT 20 /* available ranges through the PGA gains */ -static const struct comedi_lrange range_adq12b_ai_bipolar = { 4, { - BIP_RANGE(5), - BIP_RANGE(2), - BIP_RANGE(1), - BIP_RANGE(0.5) - } +static const struct comedi_lrange range_adq12b_ai_bipolar = { + 4, { + BIP_RANGE(5), + BIP_RANGE(2), + BIP_RANGE(1), + BIP_RANGE(0.5) + } }; -static const struct comedi_lrange range_adq12b_ai_unipolar = { 4, { - UNI_RANGE(5), - UNI_RANGE(2), - UNI_RANGE(1), - UNI_RANGE - (0.5) - } +static const struct comedi_lrange range_adq12b_ai_unipolar = { + 4, { + UNI_RANGE(5), + UNI_RANGE(2), + UNI_RANGE(1), + UNI_RANGE(0.5) + } }; struct adq12b_private { @@ -162,8 +163,6 @@ static int adq12b_ai_rinsn(struct comedi_device *dev, hi = inb(dev->iobase + ADQ12B_ADHIG); lo = inb(dev->iobase + ADQ12B_ADLOW); - /* printk("debug: chan=%d range=%d status=%d hi=%d lo=%d\n", - channel, range, status, hi, lo); */ data[n] = (hi << 8) | lo; } diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index c3fdcabe9aec6a..593676cf706a2c 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -115,65 +115,70 @@ Configuration options: /* D/A synchronized control (PCI1720_SYNCONT) */ #define Syncont_SC0 1 /* set synchronous output mode */ -static const struct comedi_lrange range_pci1710_3 = { 9, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - BIP_RANGE(10), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25) - } +static const struct comedi_lrange range_pci1710_3 = { + 9, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + BIP_RANGE(10), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; static const char range_codes_pci1710_3[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x10, 0x11, 0x12, 0x13 }; -static const struct comedi_lrange range_pci1710hg = { 12, { - BIP_RANGE(5), - BIP_RANGE(0.5), - BIP_RANGE(0.05), - BIP_RANGE(0.005), - BIP_RANGE(10), - BIP_RANGE(1), - BIP_RANGE(0.1), - BIP_RANGE(0.01), - UNI_RANGE(10), - UNI_RANGE(1), - UNI_RANGE(0.1), - UNI_RANGE(0.01) - } +static const struct comedi_lrange range_pci1710hg = { + 12, { + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05), + BIP_RANGE(0.005), + BIP_RANGE(10), + BIP_RANGE(1), + BIP_RANGE(0.1), + BIP_RANGE(0.01), + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.01) + } }; static const char range_codes_pci1710hg[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13 }; -static const struct comedi_lrange range_pci17x1 = { 5, { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625) - } +static const struct comedi_lrange range_pci17x1 = { + 5, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625) + } }; static const char range_codes_pci17x1[] = { 0x00, 0x01, 0x02, 0x03, 0x04 }; -static const struct comedi_lrange range_pci1720 = { 4, { - UNI_RANGE(5), - UNI_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(10) - } +static const struct comedi_lrange range_pci1720 = { + 4, { + UNI_RANGE(5), + UNI_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(10) + } }; -static const struct comedi_lrange range_pci171x_da = { 2, { - UNI_RANGE(5), - UNI_RANGE(10), - } +static const struct comedi_lrange range_pci171x_da = { + 2, { + UNI_RANGE(5), + UNI_RANGE(10) + } }; enum pci1710_boardid { @@ -731,7 +736,7 @@ static void interrupt_pci1710_every_sample(void *d) { struct comedi_device *dev = d; struct pci1710_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; int m; #ifdef PCI171x_PARANOIDCHECK const struct boardtype *this_board = comedi_board(dev); @@ -740,16 +745,15 @@ static void interrupt_pci1710_every_sample(void *d) m = inw(dev->iobase + PCI171x_STATUS); if (m & Status_FE) { - printk("comedi%d: A/D FIFO empty (%4x)\n", dev->minor, m); + dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", m); pci171x_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return; } if (m & Status_FF) { - printk - ("comedi%d: A/D FIFO Full status (Fatal Error!) (%4x)\n", - dev->minor, m); + dev_dbg(dev->class_dev, + "A/D FIFO Full status (Fatal Error!) (%4x)\n", m); pci171x_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); @@ -825,12 +829,12 @@ static int move_block_from_fifo(struct comedi_device *dev, sampl = inw(dev->iobase + PCI171x_AD_DATA); if (this_board->cardtype != TYPE_PCI1713) if ((sampl & 0xf000) != devpriv->act_chanlist[j]) { - printk - ("comedi%d: A/D FIFO data dropout: received data from channel %d, expected %d! (%d/%d/%d/%d/%d/%4x)\n", - dev->minor, (sampl & 0xf000) >> 12, - (devpriv->act_chanlist[j] & 0xf000) >> 12, - i, j, devpriv->ai_act_scan, n, turn, - sampl); + dev_dbg(dev->class_dev, + "A/D FIFO data dropout: received data from channel %d, expected %d! (%d/%d/%d/%d/%d/%4x)\n", + (sampl & 0xf000) >> 12, + (devpriv->act_chanlist[j] & 0xf000) >> 12, + i, j, devpriv->ai_act_scan, n, turn, + sampl); pci171x_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; @@ -860,22 +864,20 @@ static void interrupt_pci1710_half_fifo(void *d) struct comedi_device *dev = d; const struct boardtype *this_board = comedi_board(dev); struct pci1710_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; int m, samplesinbuf; m = inw(dev->iobase + PCI171x_STATUS); if (!(m & Status_FH)) { - printk("comedi%d: A/D FIFO not half full! (%4x)\n", - dev->minor, m); + dev_dbg(dev->class_dev, "A/D FIFO not half full! (%4x)\n", m); pci171x_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return; } if (m & Status_FF) { - printk - ("comedi%d: A/D FIFO Full status (Fatal Error!) (%4x)\n", - dev->minor, m); + dev_dbg(dev->class_dev, + "A/D FIFO Full status (Fatal Error!) (%4x)\n", m); pci171x_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); @@ -1267,21 +1269,21 @@ static int pci1710_auto_attach(struct comedi_device *dev, if (this_board->n_aichan) { s = &dev->subdevices[subdev]; - dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND; if (this_board->n_aichand) s->subdev_flags |= SDF_DIFF; s->n_chan = this_board->n_aichan; s->maxdata = this_board->ai_maxdata; - s->len_chanlist = this_board->n_aichan; s->range_table = this_board->rangelist_ai; - s->cancel = pci171x_ai_cancel; s->insn_read = pci171x_insn_read_ai; if (dev->irq) { + dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = s->n_chan; s->do_cmdtest = pci171x_ai_cmdtest; s->do_cmd = pci171x_ai_cmd; + s->cancel = pci171x_ai_cancel; } devpriv->i8254_osc_base = I8254_OSC_BASE_10MHZ; subdev++; @@ -1374,7 +1376,7 @@ static int adv_pci1710_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(adv_pci1710_pci_table) = { +static const struct pci_device_id adv_pci1710_pci_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710, PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050), diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c index bd4f781b4b24a0..72394267ddfeb3 100644 --- a/drivers/staging/comedi/drivers/adv_pci1723.c +++ b/drivers/staging/comedi/drivers/adv_pci1723.c @@ -306,7 +306,7 @@ static int adv_pci1723_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(adv_pci1723_pci_table) = { +static const struct pci_device_id adv_pci1723_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1723) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c index 009a3039fc4fef..af670acb03d835 100644 --- a/drivers/staging/comedi/drivers/adv_pci1724.c +++ b/drivers/staging/comedi/drivers/adv_pci1724.c @@ -116,8 +116,8 @@ enum board_id_contents { BOARD_ID_MASK = 0xf }; -static const struct comedi_lrange ao_ranges_1724 = { 4, - { +static const struct comedi_lrange ao_ranges_1724 = { + 4, { BIP_RANGE(10), RANGE_mA(0, 20), RANGE_mA(4, 20), @@ -381,7 +381,7 @@ static int adv_pci1724_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(adv_pci1724_pci_table) = { +static const struct pci_device_id adv_pci1724_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1724) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c index 6bac665261f847..d4bd61d84dafc0 100644 --- a/drivers/staging/comedi/drivers/adv_pci_dio.c +++ b/drivers/staging/comedi/drivers/adv_pci_dio.c @@ -1188,7 +1188,7 @@ static int adv_pci_dio_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &adv_pci_dio_driver, cardtype); } -static DEFINE_PCI_DEVICE_TABLE(adv_pci_dio_pci_table) = { +static const struct pci_device_id adv_pci_dio_pci_table[] = { { PCI_VDEVICE(ADVANTECH, 0x1730), TYPE_PCI1730 }, { PCI_VDEVICE(ADVANTECH, 0x1733), TYPE_PCI1733 }, { PCI_VDEVICE(ADVANTECH, 0x1734), TYPE_PCI1734 }, diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c index abb28498b58c3f..68c3fb3226ca3b 100644 --- a/drivers/staging/comedi/drivers/aio_aio12_8.c +++ b/drivers/staging/comedi/drivers/aio_aio12_8.c @@ -181,13 +181,12 @@ static int aio_aio12_8_ao_write(struct comedi_device *dev, } static const struct comedi_lrange range_aio_aio12_8 = { - 4, - { - UNI_RANGE(5), - BIP_RANGE(5), - UNI_RANGE(10), - BIP_RANGE(10), - } + 4, { + UNI_RANGE(5), + BIP_RANGE(5), + UNI_RANGE(10), + BIP_RANGE(10) + } }; static int aio_aio12_8_attach(struct comedi_device *dev, diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h index b810d5f3d97174..2ba736444610fc 100644 --- a/drivers/staging/comedi/drivers/amcc_s5933.h +++ b/drivers/staging/comedi/drivers/amcc_s5933.h @@ -145,12 +145,12 @@ #define AINT_READ_COMPL 0x00008000 #define AINT_WRITE_COMPL 0x00004000 -#define AINT_OMB_ENABLE 0x00001000 -#define AINT_OMB_SELECT 0x00000c00 +#define AINT_OMB_ENABLE 0x00001000 +#define AINT_OMB_SELECT 0x00000c00 #define AINT_OMB_BYTE 0x00000300 -#define AINT_IMB_ENABLE 0x00000010 -#define AINT_IMB_SELECT 0x0000000c +#define AINT_IMB_ENABLE 0x00000010 +#define AINT_IMB_SELECT 0x0000000c #define AINT_IMB_BYTE 0x00000003 /* these are bits from various different registers, needs cleanup XXX */ diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c index 2e4bf284d52c08..9c9559ffc643d5 100644 --- a/drivers/staging/comedi/drivers/amplc_dio200_common.c +++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c @@ -513,7 +513,7 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev, int event = 0; spin_lock_irqsave(&subpriv->spinlock, flags); - subpriv->active = 1; + subpriv->active = true; /* Set up end of acquisition. */ switch (cmd->stop_src) { diff --git a/drivers/staging/comedi/drivers/amplc_dio200_pci.c b/drivers/staging/comedi/drivers/amplc_dio200_pci.c index a810a241644328..e0367380b37aa3 100644 --- a/drivers/staging/comedi/drivers/amplc_dio200_pci.c +++ b/drivers/staging/comedi/drivers/amplc_dio200_pci.c @@ -439,7 +439,7 @@ static struct comedi_driver dio200_pci_comedi_driver = { .detach = dio200_pci_detach, }; -static DEFINE_PCI_DEVICE_TABLE(dio200_pci_table) = { +static const struct pci_device_id dio200_pci_table[] = { { PCI_VDEVICE(AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215), pci215_model diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c index 98075f999c9fdb..31734e1142fd31 100644 --- a/drivers/staging/comedi/drivers/amplc_pc236.c +++ b/drivers/staging/comedi/drivers/amplc_pc236.c @@ -356,7 +356,7 @@ static int pc236_intr_cancel(struct comedi_device *dev, static irqreturn_t pc236_interrupt(int irq, void *d) { struct comedi_device *dev = d; - struct comedi_subdevice *s = &dev->subdevices[1]; + struct comedi_subdevice *s = dev->read_subdev; int handled; handled = pc236_intr_check(dev); @@ -567,7 +567,7 @@ static struct comedi_driver amplc_pc236_driver = { }; #if DO_PCI -static DEFINE_PCI_DEVICE_TABLE(pc236_pci_table) = { +static const struct pci_device_id pc236_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236) }, {0} }; diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c index 810e397d8fd700..ae4048a624fa2b 100644 --- a/drivers/staging/comedi/drivers/amplc_pci224.c +++ b/drivers/staging/comedi/drivers/amplc_pci224.c @@ -267,17 +267,16 @@ Passing a zero for an option is the same as leaving it unspecified. /* The software selectable internal ranges for PCI224 (option[2] == 0). */ static const struct comedi_lrange range_pci224_internal = { - 8, - { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - } + 8, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; static const unsigned short hwrange_pci224_internal[8] = { @@ -293,11 +292,10 @@ static const unsigned short hwrange_pci224_internal[8] = { /* The software selectable external ranges for PCI224 (option[2] == 1). */ static const struct comedi_lrange range_pci224_external = { - 2, - { - RANGE_ext(-1, 1), /* bipolar [-Vref,+Vref] */ - RANGE_ext(0, 1), /* unipolar [0,+Vref] */ - } + 2, { + RANGE_ext(-1, 1), /* bipolar [-Vref,+Vref] */ + RANGE_ext(0, 1) /* unipolar [0,+Vref] */ + } }; static const unsigned short hwrange_pci224_external[2] = { @@ -308,19 +306,17 @@ static const unsigned short hwrange_pci224_external[2] = { /* The hardware selectable Vref*2 external range for PCI234 * (option[2] == 1, option[3+n] == 0). */ static const struct comedi_lrange range_pci234_ext2 = { - 1, - { - RANGE_ext(-2, 2), - } + 1, { + RANGE_ext(-2, 2) + } }; /* The hardware selectable Vref external range for PCI234 * (option[2] == 1, option[3+n] == 1). */ static const struct comedi_lrange range_pci234_ext = { - 1, - { - RANGE_ext(-1, 1), - } + 1, { + RANGE_ext(-1, 1) + } }; /* This serves for all the PCI234 ranges. */ @@ -909,16 +905,14 @@ pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, } if (errors) { if (errors & dupchan_err) { - DPRINTK("comedi%d: " DRIVER_NAME - ": ao_cmdtest: " - "entries in chanlist must contain no " - "duplicate channels\n", dev->minor); + dev_dbg(dev->class_dev, + "%s: entries in chanlist must contain no duplicate channels\n", + __func__); } if (errors & range_err) { - DPRINTK("comedi%d: " DRIVER_NAME - ": ao_cmdtest: " - "entries in chanlist must all have " - "the same range index\n", dev->minor); + dev_dbg(dev->class_dev, + "%s: entries in chanlist must all have the same range index\n", + __func__); } err++; } @@ -1142,7 +1136,7 @@ static irqreturn_t pci224_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct pci224_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->write_subdev; struct comedi_cmd *cmd; unsigned char intstat, valid_intstat; unsigned char curenab; @@ -1498,7 +1492,7 @@ static int amplc_pci224_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(amplc_pci224_pci_table) = { +static const struct pci_device_id amplc_pci224_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224) }, { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234) }, { 0 } diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c index a97bbd6ca3db5a..c08dfbbe406227 100644 --- a/drivers/staging/comedi/drivers/amplc_pci230.c +++ b/drivers/staging/comedi/drivers/amplc_pci230.c @@ -546,15 +546,16 @@ static const unsigned int pci230_timebase[8] = { }; /* PCI230 analogue input range table */ -static const struct comedi_lrange pci230_ai_range = { 7, { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5) - } +static const struct comedi_lrange pci230_ai_range = { + 7, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5) + } }; /* PCI230 analogue gain bits for each input range. */ @@ -564,10 +565,11 @@ static const unsigned char pci230_ai_gain[7] = { 0, 1, 2, 3, 1, 2, 3 }; static const unsigned char pci230_ai_bipolar[7] = { 1, 1, 1, 1, 0, 0, 0 }; /* PCI230 analogue output range table */ -static const struct comedi_lrange pci230_ao_range = { 2, { - UNI_RANGE(10), - BIP_RANGE(10) - } +static const struct comedi_lrange pci230_ao_range = { + 2, { + UNI_RANGE(10), + BIP_RANGE(10) + } }; /* PCI230 daccon bipolar flag for each analogue output range. */ @@ -818,9 +820,9 @@ static int pci230_ai_rinsn(struct comedi_device *dev, if (aref == AREF_DIFF) { /* Differential. */ if (chan >= s->n_chan / 2) { - DPRINTK("comedi%d: amplc_pci230: ai_rinsn: " - "differential channel number out of range " - "0 to %u\n", dev->minor, (s->n_chan / 2) - 1); + dev_dbg(dev->class_dev, + "%s: differential channel number out of range 0 to %u\n", + __func__, (s->n_chan / 2) - 1); return -EINVAL; } } @@ -1092,14 +1094,14 @@ static int pci230_ao_cmdtest(struct comedi_device *dev, if (errors != 0) { err++; if ((errors & seq_err) != 0) { - DPRINTK("comedi%d: amplc_pci230: ao_cmdtest: " - "channel numbers must increase\n", - dev->minor); + dev_dbg(dev->class_dev, + "%s: channel numbers must increase\n", + __func__); } if ((errors & range_err) != 0) { - DPRINTK("comedi%d: amplc_pci230: ao_cmdtest: " - "channels must have the same range\n", - dev->minor); + dev_dbg(dev->class_dev, + "%s: channels must have the same range\n", + __func__); } } } @@ -1835,33 +1837,29 @@ static int pci230_ai_cmdtest(struct comedi_device *dev, if (errors != 0) { err++; if ((errors & seq_err) != 0) { - DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: " - "channel numbers must increase or " - "sequence must repeat exactly\n", - dev->minor); + dev_dbg(dev->class_dev, + "%s: channel numbers must increase or sequence must repeat exactly\n", + __func__); } if ((errors & rangepair_err) != 0) { - DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: " - "single-ended channel pairs must " - "have the same range\n", dev->minor); + dev_dbg(dev->class_dev, + "%s: single-ended channel pairs must have the same range\n", + __func__); } if ((errors & polarity_err) != 0) { - DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: " - "channel sequence ranges must be all " - "bipolar or all unipolar\n", - dev->minor); + dev_dbg(dev->class_dev, + "%s: channel sequence ranges must be all bipolar or all unipolar\n", + __func__); } if ((errors & aref_err) != 0) { - DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: " - "channel sequence analogue references " - "must be all the same (single-ended " - "or differential)\n", dev->minor); + dev_dbg(dev->class_dev, + "%s: channel sequence analogue references must be all the same (single-ended or differential)\n", + __func__); } if ((errors & diffchan_err) != 0) { - DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: " - "differential channel number out of " - "range 0 to %u\n", dev->minor, - (s->n_chan / 2) - 1); + dev_dbg(dev->class_dev, + "%s: differential channel number out of range 0 to %u\n", + __func__, (s->n_chan / 2) - 1); } if ((errors & buggy_chan0_err) != 0) { dev_info(dev->class_dev, @@ -2637,7 +2635,7 @@ static int pci230_attach_common(struct comedi_device *dev, struct comedi_subdevice *s; unsigned long iobase1, iobase2; /* PCI230's I/O spaces 1 and 2 respectively. */ - int irq_hdl, rc; + int rc; comedi_set_hw_dev(dev, &pci_dev->dev); @@ -2709,16 +2707,12 @@ static int pci230_attach_common(struct comedi_device *dev, outw(devpriv->adcg, dev->iobase + PCI230_ADCG); outw(devpriv->adccon | PCI230_ADC_FIFO_RESET, dev->iobase + PCI230_ADCCON); - /* Register the interrupt handler. */ - irq_hdl = request_irq(pci_dev->irq, pci230_interrupt, - IRQF_SHARED, "amplc_pci230", dev); - if (irq_hdl < 0) { - dev_warn(dev->class_dev, - "unable to register irq %u, commands will not be available\n", - pci_dev->irq); - } else { - dev->irq = pci_dev->irq; - dev_dbg(dev->class_dev, "registered irq %u\n", pci_dev->irq); + + if (pci_dev->irq) { + rc = request_irq(pci_dev->irq, pci230_interrupt, IRQF_SHARED, + dev->board_name, dev); + if (rc == 0) + dev->irq = pci_dev->irq; } rc = comedi_alloc_subdevices(dev, 3); @@ -2734,14 +2728,14 @@ static int pci230_attach_common(struct comedi_device *dev, s->range_table = &pci230_ai_range; s->insn_read = &pci230_ai_rinsn; s->len_chanlist = 256; /* but there are restrictions. */ - /* Only register commands if the interrupt handler is installed. */ - if (irq_hdl == 0) { + if (dev->irq) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; s->do_cmd = &pci230_ai_cmd; s->do_cmdtest = &pci230_ai_cmdtest; s->cancel = pci230_ai_cancel; } + s = &dev->subdevices[1]; /* analog output subdevice */ if (thisboard->ao_chans > 0) { @@ -2753,9 +2747,7 @@ static int pci230_attach_common(struct comedi_device *dev, s->insn_write = &pci230_ao_winsn; s->insn_read = &pci230_ao_rinsn; s->len_chanlist = thisboard->ao_chans; - /* Only register commands if the interrupt handler is - * installed. */ - if (irq_hdl == 0) { + if (dev->irq) { dev->write_subdev = s; s->subdev_flags |= SDF_CMD_WRITE; s->do_cmd = &pci230_ao_cmd; @@ -2765,6 +2757,7 @@ static int pci230_attach_common(struct comedi_device *dev, } else { s->type = COMEDI_SUBD_UNUSED; } + s = &dev->subdevices[2]; /* digital i/o subdevice */ if (thisboard->have_dio) { @@ -2856,7 +2849,7 @@ static int amplc_pci230_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(amplc_pci230_pci_table) = { +static const struct pci_device_id amplc_pci230_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) }, { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) }, { 0 } diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c index 4bd4ef8e88cd05..be28e6cbea2065 100644 --- a/drivers/staging/comedi/drivers/amplc_pci263.c +++ b/drivers/staging/comedi/drivers/amplc_pci263.c @@ -96,7 +96,7 @@ static struct comedi_driver amplc_pci263_driver = { .detach = comedi_pci_disable, }; -static DEFINE_PCI_DEVICE_TABLE(pci263_pci_table) = { +static const struct pci_device_id pci263_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263) }, {0} }; diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c index 217aa19cdc3211..5034f663eec9e1 100644 --- a/drivers/staging/comedi/drivers/c6xdigio.c +++ b/drivers/staging/comedi/drivers/c6xdigio.c @@ -94,8 +94,6 @@ static void C6X_pwmInit(unsigned long baseAddr) { int timeout = 0; -/* printk("Inside C6X_pwmInit\n"); */ - WriteByteToHwPort(baseAddr, 0x70); while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) { @@ -132,8 +130,6 @@ static void C6X_pwmOutput(unsigned long baseAddr, unsigned channel, int value) int timeout = 0; unsigned tmp; - /* printk("Inside C6X_pwmOutput\n"); */ - pwm.cmd = value; if (pwm.cmd > 498) pwm.cmd = 498; @@ -200,8 +196,6 @@ static int C6X_encInput(unsigned long baseAddr, unsigned channel) int timeout = 0; int tmp; - /* printk("Inside C6X_encInput\n"); */ - enc.value = 0; if (channel == 0) ppcmd = 0x48; @@ -295,8 +289,6 @@ static void C6X_encResetAll(unsigned long baseAddr) { unsigned timeout = 0; -/* printk("Inside C6X_encResetAll\n"); */ - WriteByteToHwPort(baseAddr, 0x68); while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) { @@ -322,14 +314,6 @@ static void C6X_encResetAll(unsigned long baseAddr) } } -static int c6xdigio_pwmo_insn_read(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) -{ - printk(KERN_DEBUG "c6xdigio_pwmo_insn_read %x\n", insn->n); - return insn->n; -} - static int c6xdigio_pwmo_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, @@ -338,7 +322,6 @@ static int c6xdigio_pwmo_insn_write(struct comedi_device *dev, int i; int chan = CR_CHAN(insn->chanspec); - /* printk("c6xdigio_pwmo_insn_write %x\n", insn->n); */ for (i = 0; i < insn->n; i++) { C6X_pwmOutput(dev->iobase, chan, data[i]); /* devpriv->ao_readback[chan] = data[i]; */ @@ -346,31 +329,10 @@ static int c6xdigio_pwmo_insn_write(struct comedi_device *dev, return i; } -/* static int c6xdigio_ei_init_insn_read(struct comedi_device *dev, */ -/* struct comedi_subdevice *s, */ -/* struct comedi_insn *insn, */ -/* unsigned int *data) */ -/* { */ -/* printk("c6xdigio_ei_init_insn_read %x\n", insn->n); */ -/* return insn->n; */ -/* } */ - -/* static int c6xdigio_ei_init_insn_write(struct comedi_device *dev, */ -/* struct comedi_subdevice *s, */ -/* struct comedi_insn *insn, */ -/* unsigned int *data) */ -/* { */ -/* int i; */ -/* int chan = CR_CHAN(insn->chanspec); */ - /* *//* C6X_encResetAll( dev->iobase ); */ - /* *//* return insn->n; */ -/* } */ - static int c6xdigio_ei_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { - /* printk("c6xdigio_ei__insn_read %x\n", insn->n); */ int n; int chan = CR_CHAN(insn->chanspec); @@ -382,12 +344,8 @@ static int c6xdigio_ei_insn_read(struct comedi_device *dev, static void board_init(struct comedi_device *dev) { - - /* printk("Inside board_init\n"); */ - C6X_pwmInit(dev->iobase); C6X_encResetAll(dev->iobase); - } static const struct pnp_device_id c6xdigio_pnp_tbl[] = { @@ -426,7 +384,6 @@ static int c6xdigio_attach(struct comedi_device *dev, s->subdev_flags = SDF_WRITEABLE; s->n_chan = 2; /* s->trig[0] = c6xdigio_pwmo; */ - s->insn_read = c6xdigio_pwmo_insn_read; s->insn_write = c6xdigio_pwmo_insn_write; s->maxdata = 500; s->range_table = &range_bipolar10; /* A suitable lie */ @@ -441,17 +398,6 @@ static int c6xdigio_attach(struct comedi_device *dev, s->maxdata = 0xffffff; s->range_table = &range_unknown; - /* s = &dev->subdevices[2]; */ - /* pwm output subdevice */ - /* s->type = COMEDI_SUBD_COUNTER; // Not sure what to put here */ - /* s->subdev_flags = SDF_WRITEABLE; */ - /* s->n_chan = 1; */ - /* s->trig[0] = c6xdigio_ei_init; */ - /* s->insn_read = c6xdigio_ei_init_insn_read; */ - /* s->insn_write = c6xdigio_ei_init_insn_write; */ - /* s->maxdata = 0xFFFF; // Really just a don't care */ - /* s->range_table = &range_unknown; // Not sure what to put here */ - /* I will call this init anyway but more than likely the DSP board */ /* will not be connected when device driver is loaded. */ board_init(dev); diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c index e72a403db17ca8..9819be092f8da8 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas.c +++ b/drivers/staging/comedi/drivers/cb_pcidas.c @@ -181,43 +181,40 @@ static inline unsigned int DAC_DATA_REG(unsigned int channel) /* analog input ranges for most boards */ static const struct comedi_lrange cb_pcidas_ranges = { - 8, - { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25) - } + 8, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; /* pci-das1001 input ranges */ static const struct comedi_lrange cb_pcidas_alt_ranges = { - 8, - { - BIP_RANGE(10), - BIP_RANGE(1), - BIP_RANGE(0.1), - BIP_RANGE(0.01), - UNI_RANGE(10), - UNI_RANGE(1), - UNI_RANGE(0.1), - UNI_RANGE(0.01) - } + 8, { + BIP_RANGE(10), + BIP_RANGE(1), + BIP_RANGE(0.1), + BIP_RANGE(0.01), + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.01) + } }; /* analog output ranges */ static const struct comedi_lrange cb_pcidas_ao_ranges = { - 4, - { - BIP_RANGE(5), - BIP_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(10), - } + 4, { + BIP_RANGE(5), + BIP_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(10) + } }; enum trimpot_model { @@ -1614,7 +1611,7 @@ static int cb_pcidas_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(cb_pcidas_pci_table) = { +static const struct pci_device_id cb_pcidas_pci_table[] = { { PCI_VDEVICE(CB, 0x0001), BOARD_PCIDAS1602_16 }, { PCI_VDEVICE(CB, 0x000f), BOARD_PCIDAS1200 }, { PCI_VDEVICE(CB, 0x0010), BOARD_PCIDAS1602_12 }, diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c index ff5206536be36c..4fff1738e3f8c2 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas64.c +++ b/drivers/staging/comedi/drivers/cb_pcidas64.c @@ -94,15 +94,6 @@ #include "plx9080.h" #include "comedi_fc.h" -#undef PCIDAS64_DEBUG /* disable debugging code */ -/* #define PCIDAS64_DEBUG enable debugging code */ - -#ifdef PCIDAS64_DEBUG -#define DEBUG_PRINT(format, args...) pr_debug(format, ## args) -#else -#define DEBUG_PRINT(format, args...) no_printk(format, ## args) -#endif - #define TIMER_BASE 25 /* 40MHz master clock */ /* 100kHz 'prescaled' clock for slow acquisition, * maybe I'll support this someday */ @@ -438,91 +429,85 @@ static inline uint8_t attenuate_bit(unsigned int channel) /* analog input ranges for 64xx boards */ static const struct comedi_lrange ai_ranges_64xx = { - 8, - { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25) - } + 8, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; /* analog input ranges for 60xx boards */ static const struct comedi_lrange ai_ranges_60xx = { - 4, - { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(0.5), - BIP_RANGE(0.05), - } + 4, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05) + } }; /* analog input ranges for 6030, etc boards */ static const struct comedi_lrange ai_ranges_6030 = { - 14, - { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2), - BIP_RANGE(1), - BIP_RANGE(0.5), - BIP_RANGE(0.2), - BIP_RANGE(0.1), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2), - UNI_RANGE(1), - UNI_RANGE(0.5), - UNI_RANGE(0.2), - UNI_RANGE(0.1), - } + 14, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2), + BIP_RANGE(1), + BIP_RANGE(0.5), + BIP_RANGE(0.2), + BIP_RANGE(0.1), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2), + UNI_RANGE(1), + UNI_RANGE(0.5), + UNI_RANGE(0.2), + UNI_RANGE(0.1) + } }; /* analog input ranges for 6052, etc boards */ static const struct comedi_lrange ai_ranges_6052 = { - 15, - { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1), - BIP_RANGE(0.5), - BIP_RANGE(0.25), - BIP_RANGE(0.1), - BIP_RANGE(0.05), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2), - UNI_RANGE(1), - UNI_RANGE(0.5), - UNI_RANGE(0.2), - UNI_RANGE(0.1), - } + 15, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1), + BIP_RANGE(0.5), + BIP_RANGE(0.25), + BIP_RANGE(0.1), + BIP_RANGE(0.05), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2), + UNI_RANGE(1), + UNI_RANGE(0.5), + UNI_RANGE(0.2), + UNI_RANGE(0.1) + } }; /* analog input ranges for 4020 board */ static const struct comedi_lrange ai_ranges_4020 = { - 2, - { - BIP_RANGE(5), - BIP_RANGE(1), - } + 2, { + BIP_RANGE(5), + BIP_RANGE(1) + } }; /* analog output ranges */ static const struct comedi_lrange ao_ranges_64xx = { - 4, - { - BIP_RANGE(5), - BIP_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(10), - } + 4, { + BIP_RANGE(5), + BIP_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(10) + } }; static const int ao_range_code_64xx[] = { @@ -537,11 +522,10 @@ static const int ao_range_code_60xx[] = { }; static const struct comedi_lrange ao_ranges_6030 = { - 2, - { - BIP_RANGE(10), - UNI_RANGE(10), - } + 2, { + BIP_RANGE(10), + UNI_RANGE(10) + } }; static const int ao_range_code_6030[] = { @@ -550,11 +534,10 @@ static const int ao_range_code_6030[] = { }; static const struct comedi_lrange ao_ranges_4020 = { - 2, - { - BIP_RANGE(5), - BIP_RANGE(10), - } + 2, { + BIP_RANGE(5), + BIP_RANGE(10) + } }; static const int ao_range_code_4020[] = { @@ -1252,8 +1235,6 @@ static void disable_ai_interrupts(struct comedi_device *dev) writew(devpriv->intr_enable_bits, devpriv->main_iobase + INTR_ENABLE_REG); spin_unlock_irqrestore(&dev->spinlock, flags); - - DEBUG_PRINT("intr enable bits 0x%x\n", devpriv->intr_enable_bits); } static void enable_ai_interrupts(struct comedi_device *dev, @@ -1277,7 +1258,6 @@ static void enable_ai_interrupts(struct comedi_device *dev, devpriv->intr_enable_bits |= bits; writew(devpriv->intr_enable_bits, devpriv->main_iobase + INTR_ENABLE_REG); - DEBUG_PRINT("intr enable bits 0x%x\n", devpriv->intr_enable_bits); spin_unlock_irqrestore(&dev->spinlock, flags); } @@ -1292,38 +1272,6 @@ static void init_plx9080(struct comedi_device *dev) devpriv->plx_control_bits = readl(devpriv->plx9080_iobase + PLX_CONTROL_REG); - /* plx9080 dump */ - DEBUG_PRINT(" plx interrupt status 0x%x\n", - readl(plx_iobase + PLX_INTRCS_REG)); - DEBUG_PRINT(" plx id bits 0x%x\n", readl(plx_iobase + PLX_ID_REG)); - DEBUG_PRINT(" plx control reg 0x%x\n", devpriv->plx_control_bits); - DEBUG_PRINT(" plx mode/arbitration reg 0x%x\n", - readl(plx_iobase + PLX_MARB_REG)); - DEBUG_PRINT(" plx region0 reg 0x%x\n", - readl(plx_iobase + PLX_REGION0_REG)); - DEBUG_PRINT(" plx region1 reg 0x%x\n", - readl(plx_iobase + PLX_REGION1_REG)); - - DEBUG_PRINT(" plx revision 0x%x\n", - readl(plx_iobase + PLX_REVISION_REG)); - DEBUG_PRINT(" plx dma channel 0 mode 0x%x\n", - readl(plx_iobase + PLX_DMA0_MODE_REG)); - DEBUG_PRINT(" plx dma channel 1 mode 0x%x\n", - readl(plx_iobase + PLX_DMA1_MODE_REG)); - DEBUG_PRINT(" plx dma channel 0 pci address 0x%x\n", - readl(plx_iobase + PLX_DMA0_PCI_ADDRESS_REG)); - DEBUG_PRINT(" plx dma channel 0 local address 0x%x\n", - readl(plx_iobase + PLX_DMA0_LOCAL_ADDRESS_REG)); - DEBUG_PRINT(" plx dma channel 0 transfer size 0x%x\n", - readl(plx_iobase + PLX_DMA0_TRANSFER_SIZE_REG)); - DEBUG_PRINT(" plx dma channel 0 descriptor 0x%x\n", - readl(plx_iobase + PLX_DMA0_DESCRIPTOR_REG)); - DEBUG_PRINT(" plx dma channel 0 command status 0x%x\n", - readb(plx_iobase + PLX_DMA0_CS_REG)); - DEBUG_PRINT(" plx dma channel 0 threshold 0x%x\n", - readl(plx_iobase + PLX_DMA0_THRESHOLD_REG)); - DEBUG_PRINT(" plx bigend 0x%x\n", readl(plx_iobase + PLX_BIGEND_REG)); - #ifdef __BIG_ENDIAN bits = BIGEND_DMA0 | BIGEND_DMA1; #else @@ -1417,9 +1365,6 @@ static int set_ai_fifo_segment_length(struct comedi_device *dev, devpriv->ai_fifo_segment_length = num_increments * increment_size; - DEBUG_PRINT("set hardware fifo segment length to %i\n", - devpriv->ai_fifo_segment_length); - return devpriv->ai_fifo_segment_length; } @@ -1441,8 +1386,6 @@ static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples) num_samples = retval * fifo->num_segments * fifo->sample_packing_ratio; - DEBUG_PRINT("set hardware fifo size to %i\n", num_samples); - return num_samples; } @@ -1538,8 +1481,6 @@ static int alloc_and_init_dma_members(struct comedi_device *dev) if (devpriv->ai_dma_desc == NULL) return -ENOMEM; - DEBUG_PRINT("ai dma descriptors start at bus addr 0x%llx\n", - (unsigned long long)devpriv->ai_dma_desc_bus_addr); if (ao_cmd_is_supported(thisboard)) { devpriv->ao_dma_desc = pci_alloc_consistent(pcidev, @@ -1548,9 +1489,6 @@ static int alloc_and_init_dma_members(struct comedi_device *dev) &devpriv->ao_dma_desc_bus_addr); if (devpriv->ao_dma_desc == NULL) return -ENOMEM; - - DEBUG_PRINT("ao dma descriptors start at bus addr 0x%llx\n", - (unsigned long long)devpriv->ao_dma_desc_bus_addr); } /* initialize dma descriptors */ for (i = 0; i < ai_dma_ring_count(thisboard); i++) { @@ -1650,8 +1588,6 @@ static void i2c_write_byte(struct comedi_device *dev, uint8_t byte) uint8_t bit; unsigned int num_bits = 8; - DEBUG_PRINT("writing to i2c byte 0x%x\n", byte); - for (bit = 1 << (num_bits - 1); bit; bit >>= 1) { i2c_set_scl(dev, 0); if ((byte & bit)) @@ -1738,7 +1674,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long flags; static const int timeout = 100; - DEBUG_PRINT("chanspec 0x%x\n", insn->chanspec); channel = CR_CHAN(insn->chanspec); range = CR_RANGE(insn->chanspec); aref = CR_AREF(insn->chanspec); @@ -1766,7 +1701,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, if (insn->chanspec & CR_ALT_SOURCE) { unsigned int cal_en_bit; - DEBUG_PRINT("reading calibration source\n"); if (thisboard->layout == LAYOUT_60XX) cal_en_bit = CAL_EN_60XX_BIT; else @@ -1800,7 +1734,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, devpriv->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK; if (insn->chanspec & CR_ALT_SOURCE) { - DEBUG_PRINT("reading calibration source\n"); devpriv->i2c_cal_range_bits |= adc_src_4020_bits(devpriv->calibration_source); } else { /* select BNC inputs */ @@ -1839,7 +1772,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, /* wait for data */ for (i = 0; i < timeout; i++) { bits = readw(devpriv->main_iobase + HW_STATUS_REG); - DEBUG_PRINT(" pipe bits 0x%x\n", pipe_full_bits(bits)); if (thisboard->layout == LAYOUT_4020) { if (readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG)) @@ -1850,7 +1782,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, } udelay(1); } - DEBUG_PRINT(" looped %i times waiting for data\n", i); if (i == timeout) { comedi_error(dev, " analog input read insn timed out"); dev_info(dev->class_dev, "status 0x%x\n", bits); @@ -1884,7 +1815,6 @@ static int ai_config_calibration_source(struct comedi_device *dev, return -EINVAL; } - DEBUG_PRINT("setting calibration source to %i\n", source); devpriv->calibration_source = source; return 2; @@ -2368,7 +2298,6 @@ static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd) /* load lower 16 bits of convert interval */ writew(convert_counter & 0xffff, devpriv->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG); - DEBUG_PRINT("convert counter 0x%x\n", convert_counter); /* load upper 8 bits of convert interval */ writew((convert_counter >> 16) & 0xff, devpriv->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG); @@ -2378,7 +2307,6 @@ static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd) /* load upper 8 bits of scan delay */ writew((scan_counter >> 16) & 0xff, devpriv->main_iobase + ADC_DELAY_INTERVAL_UPPER_REG); - DEBUG_PRINT("scan counter 0x%x\n", scan_counter); } static int use_internal_queue_6xxx(const struct comedi_cmd *cmd) @@ -2469,9 +2397,6 @@ static int setup_channel_queue(struct comedi_device *dev, writew(bits, devpriv->main_iobase + ADC_QUEUE_FIFO_REG); - DEBUG_PRINT( - "wrote 0x%x to external channel queue\n", - bits); } /* doing a queue clear is not specified in board docs, * but required for reliable operation */ @@ -2593,7 +2518,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) } writew(devpriv->adc_control1_bits, devpriv->main_iobase + ADC_CONTROL1_REG); - DEBUG_PRINT("control1 bits 0x%x\n", devpriv->adc_control1_bits); spin_unlock_irqrestore(&dev->spinlock, flags); /* clear adc buffer */ @@ -2645,17 +2569,14 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) if (use_hw_sample_counter(cmd)) bits |= ADC_SAMPLE_COUNTER_EN_BIT; writew(bits, devpriv->main_iobase + ADC_CONTROL0_REG); - DEBUG_PRINT("control0 bits 0x%x\n", bits); devpriv->ai_cmd_running = 1; spin_unlock_irqrestore(&dev->spinlock, flags); /* start acquisition */ - if (cmd->start_src == TRIG_NOW) { + if (cmd->start_src == TRIG_NOW) writew(0, devpriv->main_iobase + ADC_START_REG); - DEBUG_PRINT("soft trig\n"); - } return 0; } @@ -2690,10 +2611,6 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev) read_segment = adc_upper_read_ptr_code(prepost_bits); write_segment = adc_upper_write_ptr_code(prepost_bits); - DEBUG_PRINT(" rd seg %i, wrt seg %i, rd idx %i, wrt idx %i\n", - read_segment, write_segment, read_index, - write_index); - if (read_segment != write_segment) num_samples = devpriv->ai_fifo_segment_length - read_index; @@ -2715,8 +2632,6 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev) break; } - DEBUG_PRINT(" read %i samples from fifo\n", num_samples); - for (i = 0; i < num_samples; i++) { cfc_write_to_buffer(s, readw(devpriv->main_iobase + @@ -2812,11 +2727,6 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel) num_samples * sizeof(uint16_t)); devpriv->ai_dma_index = (devpriv->ai_dma_index + 1) % ai_dma_ring_count(thisboard); - - DEBUG_PRINT("next buffer addr 0x%lx\n", - (unsigned long)devpriv-> - ai_buffer_bus_addr[devpriv->ai_dma_index]); - DEBUG_PRINT("pci addr reg 0x%x\n", next_transfer_addr); } /* XXX check for dma ring buffer overrun * (use end-of-chain bit to mark last unused buffer) */ @@ -2845,24 +2755,17 @@ static void handle_ai_interrupt(struct comedi_device *dev, if (plx_status & ICS_DMA1_A) { /* dma chan 1 interrupt */ writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT, devpriv->plx9080_iobase + PLX_DMA1_CS_REG); - DEBUG_PRINT("dma1 status 0x%x\n", dma1_status); if (dma1_status & PLX_DMA_EN_BIT) drain_dma_buffers(dev, 1); - - DEBUG_PRINT(" cleared dma ch1 interrupt\n"); } spin_unlock_irqrestore(&dev->spinlock, flags); - if (status & ADC_DONE_BIT) - DEBUG_PRINT("adc done interrupt\n"); - /* drain fifo with pio */ if ((status & ADC_DONE_BIT) || ((cmd->flags & TRIG_WAKE_EOS) && (status & ADC_INTR_PENDING_BIT) && (thisboard->layout != LAYOUT_4020))) { - DEBUG_PRINT("pio fifo drain\n"); spin_lock_irqsave(&dev->spinlock, flags); if (devpriv->ai_cmd_running) { spin_unlock_irqrestore(&dev->spinlock, flags); @@ -2947,7 +2850,6 @@ static void restart_ao_dma(struct comedi_device *dev) dma_desc_bits = readl(devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG); dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT; - DEBUG_PRINT("restarting ao dma, descriptor reg 0x%x\n", dma_desc_bits); load_first_dma_descriptor(dev, 0, dma_desc_bits); dma_start_sync(dev, 0); @@ -2963,10 +2865,6 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev, buffer_index = devpriv->ao_dma_index; prev_buffer_index = prev_ao_dma_index(dev); - DEBUG_PRINT("attempting to load ao buffer %i (0x%llx)\n", buffer_index, - (unsigned long long)devpriv->ao_buffer_bus_addr[ - buffer_index]); - num_bytes = comedi_buf_read_n_available(dev->write_subdev->async); if (num_bytes > DMA_BUFFER_SIZE) num_bytes = DMA_BUFFER_SIZE; @@ -2977,8 +2875,6 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev, if (num_bytes == 0) return 0; - DEBUG_PRINT("loading %i bytes\n", num_bytes); - num_bytes = cfc_read_array_from_buffer(dev->write_subdev, devpriv-> ao_buffer[buffer_index], @@ -3052,14 +2948,12 @@ static void handle_ao_interrupt(struct comedi_device *dev, writeb(PLX_CLEAR_DMA_INTR_BIT, devpriv->plx9080_iobase + PLX_DMA0_CS_REG); spin_unlock_irqrestore(&dev->spinlock, flags); - DEBUG_PRINT("dma0 status 0x%x\n", dma0_status); if (dma0_status & PLX_DMA_EN_BIT) { load_ao_dma(dev, cmd); /* try to recover from dma end-of-chain event */ if (ao_dma_needs_restart(dev, dma0_status)) restart_ao_dma(dev); } - DEBUG_PRINT(" cleared dma ch0 interrupt\n"); } else { spin_unlock_irqrestore(&dev->spinlock, flags); } @@ -3068,12 +2962,6 @@ static void handle_ao_interrupt(struct comedi_device *dev, async->events |= COMEDI_CB_EOA; if (ao_stopped_by_error(dev, cmd)) async->events |= COMEDI_CB_ERROR; - DEBUG_PRINT("plx dma0 desc reg 0x%x\n", - readl(devpriv->plx9080_iobase + - PLX_DMA0_DESCRIPTOR_REG)); - DEBUG_PRINT("plx dma0 address reg 0x%x\n", - readl(devpriv->plx9080_iobase + - PLX_DMA0_PCI_ADDRESS_REG)); } cfc_handle_events(dev, s); } @@ -3089,15 +2977,12 @@ static irqreturn_t handle_interrupt(int irq, void *d) plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG); status = readw(devpriv->main_iobase + HW_STATUS_REG); - DEBUG_PRINT("hw status 0x%x, plx status 0x%x\n", status, plx_status); - /* an interrupt before all the postconfig stuff gets done could * cause a NULL dereference if we continue through the * interrupt handler */ - if (!dev->attached) { - DEBUG_PRINT("premature interrupt, ignoring\n"); + if (!dev->attached) return IRQ_HANDLED; - } + handle_ai_interrupt(dev, status, plx_status); handle_ao_interrupt(dev, status, plx_status); @@ -3105,11 +2990,8 @@ static irqreturn_t handle_interrupt(int irq, void *d) if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */ plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG); writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG); - DEBUG_PRINT(" cleared local doorbell bits 0x%x\n", plx_bits); } - DEBUG_PRINT("exiting handler\n"); - return IRQ_HANDLED; } @@ -3130,7 +3012,6 @@ static int ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) abort_dma(dev, 1); - DEBUG_PRINT("ai canceled\n"); return 0; } @@ -3458,7 +3339,6 @@ static int dio_callback(int dir, int port, int data, unsigned long arg) void __iomem *iobase = (void __iomem *)arg; if (dir) { writeb(data, iobase + port); - DEBUG_PRINT("wrote 0x%x to port %i\n", data, port); return 0; } else { return readb(iobase + port); @@ -4046,11 +3926,6 @@ static int auto_attach(struct comedi_device *dev, return -ENOMEM; } - DEBUG_PRINT(" plx9080 remapped to 0x%p\n", devpriv->plx9080_iobase); - DEBUG_PRINT(" main remapped to 0x%p\n", devpriv->main_iobase); - DEBUG_PRINT(" diocounter remapped to 0x%p\n", - devpriv->dio_counter_iobase); - /* figure out what local addresses are */ local_range = readl(devpriv->plx9080_iobase + PLX_LAS0RNG_REG) & LRNG_MEM_MASK; @@ -4065,9 +3940,6 @@ static int auto_attach(struct comedi_device *dev, devpriv->local1_iobase = ((uint32_t)devpriv->dio_counter_phys_iobase & ~local_range) | local_decode; - DEBUG_PRINT(" local 0 io addr 0x%x\n", devpriv->local0_iobase); - DEBUG_PRINT(" local 1 io addr 0x%x\n", devpriv->local1_iobase); - retval = alloc_and_init_dma_members(dev); if (retval < 0) return retval; @@ -4161,7 +4033,7 @@ static int cb_pcidas64_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(cb_pcidas64_pci_table) = { +static const struct pci_device_id cb_pcidas64_pci_table[] = { { PCI_VDEVICE(CB, 0x001d), BOARD_PCIDAS6402_16 }, { PCI_VDEVICE(CB, 0x001e), BOARD_PCIDAS6402_12 }, { PCI_VDEVICE(CB, 0x0035), BOARD_PCIDAS64_M1_16 }, diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c index 94f11582027901..8cca0518cfda78 100644 --- a/drivers/staging/comedi/drivers/cb_pcidda.c +++ b/drivers/staging/comedi/drivers/cb_pcidda.c @@ -407,7 +407,7 @@ static int cb_pcidda_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(cb_pcidda_pci_table) = { +static const struct pci_device_id cb_pcidda_pci_table[] = { { PCI_VDEVICE(CB, 0x0020), BOARD_DDA02_12 }, { PCI_VDEVICE(CB, 0x0021), BOARD_DDA04_12 }, { PCI_VDEVICE(CB, 0x0022), BOARD_DDA08_12 }, diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c index 30520d4c16a61f..57295d189ff6a2 100644 --- a/drivers/staging/comedi/drivers/cb_pcimdas.c +++ b/drivers/staging/comedi/drivers/cb_pcimdas.c @@ -44,9 +44,6 @@ See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details. #include "plx9052.h" #include "8255.h" -/* #define CBPCIMDAS_DEBUG */ -#undef CBPCIMDAS_DEBUG - /* Registers for the PCIM-DAS1602/16 */ /* sizes of io regions (bytes) */ @@ -145,10 +142,9 @@ static int cb_pcimdas_ai_rinsn(struct comedi_device *dev, if (!busy) break; } - if (i == TIMEOUT) { - printk("timeout\n"); + if (i == TIMEOUT) return -ETIMEDOUT; - } + /* read data */ data[n] = inw(dev->iobase + 0); } @@ -222,15 +218,6 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev, devpriv->BADR3 = pci_resource_start(pcidev, 3); iobase_8255 = pci_resource_start(pcidev, 4); -/* Dont support IRQ yet */ -/* get irq */ -/* if(request_irq(pcidev->irq, cb_pcimdas_interrupt, IRQF_SHARED, "cb_pcimdas", dev )) */ -/* { */ -/* printk(" unable to allocate irq %u\n", pcidev->irq); */ -/* return -EINVAL; */ -/* } */ -/* dev->irq = pcidev->irq; */ - ret = comedi_alloc_subdevices(dev, 3); if (ret) return ret; @@ -288,7 +275,7 @@ static int cb_pcimdas_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(cb_pcimdas_pci_table) = { +static const struct pci_device_id cb_pcimdas_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c index edf17b63096fa6..43a86630a66e0c 100644 --- a/drivers/staging/comedi/drivers/cb_pcimdda.c +++ b/drivers/staging/comedi/drivers/cb_pcimdda.c @@ -206,7 +206,7 @@ static int cb_pcimdda_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(cb_pcimdda_pci_table) = { +static const struct pci_device_id cb_pcimdda_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_ID_PCIM_DDA06_16) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c index 16c07802107fde..d539eaf53b63fa 100644 --- a/drivers/staging/comedi/drivers/comedi_test.c +++ b/drivers/staging/comedi/drivers/comedi_test.c @@ -74,11 +74,10 @@ static const int nano_per_micro = 1000; /* fake analog input ranges */ static const struct comedi_lrange waveform_ai_ranges = { - 2, - { - BIP_RANGE(10), - BIP_RANGE(5), - } + 2, { + BIP_RANGE(10), + BIP_RANGE(5) + } }; static unsigned short fake_sawtooth(struct comedi_device *dev, diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c index 89836c0828d9cb..323a7f39cd97ae 100644 --- a/drivers/staging/comedi/drivers/contec_pci_dio.c +++ b/drivers/staging/comedi/drivers/contec_pci_dio.c @@ -111,7 +111,7 @@ static int contec_pci_dio_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(contec_pci_dio_pci_table) = { +static const struct pci_device_id contec_pci_dio_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c index de920ccff400fb..ce153fcb8b2af8 100644 --- a/drivers/staging/comedi/drivers/daqboard2000.c +++ b/drivers/staging/comedi/drivers/daqboard2000.c @@ -772,7 +772,7 @@ static int daqboard2000_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = { +static const struct pci_device_id daqboard2000_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_IOTECH, 0x0409) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c index 15dd33e3e1c796..e5c0ee9a09c214 100644 --- a/drivers/staging/comedi/drivers/das08.c +++ b/drivers/staging/comedi/drivers/das08.c @@ -120,46 +120,49 @@ /* gainlist same as _pgx_ below */ -static const struct comedi_lrange range_das08_pgl = { 9, { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25) - } +static const struct comedi_lrange range_das08_pgl = { + 9, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; -static const struct comedi_lrange range_das08_pgh = { 12, { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(1), - BIP_RANGE(0.5), - BIP_RANGE(0.1), - BIP_RANGE(0.05), - BIP_RANGE(0.01), - BIP_RANGE(0.005), - UNI_RANGE(10), - UNI_RANGE(1), - UNI_RANGE(0.1), - UNI_RANGE(0.01), - } +static const struct comedi_lrange range_das08_pgh = { + 12, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(1), + BIP_RANGE(0.5), + BIP_RANGE(0.1), + BIP_RANGE(0.05), + BIP_RANGE(0.01), + BIP_RANGE(0.005), + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.01) + } }; -static const struct comedi_lrange range_das08_pgm = { 9, { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(0.5), - BIP_RANGE(0.05), - BIP_RANGE(0.01), - UNI_RANGE(10), - UNI_RANGE(1), - UNI_RANGE(0.1), - UNI_RANGE(0.01) - } +static const struct comedi_lrange range_das08_pgm = { + 9, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05), + BIP_RANGE(0.01), + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.01) + } }; /* cio-das08jr.pdf diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c index 3a6d3725b25f90..d94af09151b05d 100644 --- a/drivers/staging/comedi/drivers/das08_pci.c +++ b/drivers/staging/comedi/drivers/das08_pci.c @@ -89,7 +89,7 @@ static int das08_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(das08_pci_table) = { +static const struct pci_device_id das08_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_DEVICE_ID_PCIDAS08) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c index fce9acfe808459..fee5facff8ddfb 100644 --- a/drivers/staging/comedi/drivers/das16m1.c +++ b/drivers/staging/comedi/drivers/das16m1.c @@ -110,18 +110,18 @@ irq can be omitted, although the cmd interface will not work without it. #define DAS16M1_82C55 0x400 #define DAS16M1_8254_THIRD 0x404 -static const struct comedi_lrange range_das16m1 = { 9, - { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - BIP_RANGE(10), - } +static const struct comedi_lrange range_das16m1 = { + 9, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25), + BIP_RANGE(10) + } }; struct das16m1_private_struct { @@ -269,11 +269,6 @@ static int das16m1_cmd_exec(struct comedi_device *dev, struct comedi_cmd *cmd = &async->cmd; unsigned int byte, i; - if (dev->irq == 0) { - comedi_error(dev, "irq required to execute comedi_cmd"); - return -1; - } - /* disable interrupts and internal pacer */ devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); @@ -508,38 +503,26 @@ static irqreturn_t das16m1_interrupt(int irq, void *d) static int das16m1_irq_bits(unsigned int irq) { - int ret; - switch (irq) { case 10: - ret = 0x0; - break; + return 0x0; case 11: - ret = 0x1; - break; + return 0x1; case 12: - ret = 0x2; - break; + return 0x2; case 15: - ret = 0x3; - break; + return 0x3; case 2: - ret = 0x4; - break; + return 0x4; case 3: - ret = 0x5; - break; + return 0x5; case 5: - ret = 0x6; - break; + return 0x6; case 7: - ret = 0x7; - break; + return 0x7; default: - return -1; - break; + return 0x0; } - return ret << 4; } /* @@ -553,7 +536,6 @@ static int das16m1_attach(struct comedi_device *dev, struct das16m1_private_struct *devpriv; struct comedi_subdevice *s; int ret; - unsigned int irq; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) @@ -569,24 +551,12 @@ static int das16m1_attach(struct comedi_device *dev, return ret; devpriv->extra_iobase = dev->iobase + DAS16M1_82C55; - /* now for the irq */ - irq = it->options[1]; - /* make sure it is valid */ - if (das16m1_irq_bits(irq) >= 0) { - ret = request_irq(irq, das16m1_interrupt, 0, - dev->driver->driver_name, dev); - if (ret < 0) - return ret; - dev->irq = irq; - printk - ("irq %u\n", irq); - } else if (irq == 0) { - printk - (", no irq\n"); - } else { - comedi_error(dev, "invalid irq\n" - " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n"); - return -EINVAL; + /* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */ + if ((1 << it->options[1]) & 0xdcfc) { + ret = request_irq(it->options[1], das16m1_interrupt, 0, + dev->board_name, dev); + if (ret == 0) + dev->irq = it->options[1]; } ret = comedi_alloc_subdevices(dev, 4); @@ -594,20 +564,22 @@ static int das16m1_attach(struct comedi_device *dev, return ret; s = &dev->subdevices[0]; - dev->read_subdev = s; /* ai */ s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_DIFF; s->n_chan = 8; - s->subdev_flags = SDF_DIFF; - s->len_chanlist = 256; s->maxdata = (1 << 12) - 1; s->range_table = &range_das16m1; s->insn_read = das16m1_ai_rinsn; - s->do_cmdtest = das16m1_cmd_test; - s->do_cmd = das16m1_cmd_exec; - s->cancel = das16m1_cancel; - s->poll = das16m1_poll; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = 256; + s->do_cmdtest = das16m1_cmd_test; + s->do_cmd = das16m1_cmd_exec; + s->cancel = das16m1_cancel; + s->poll = das16m1_poll; + } s = &dev->subdevices[1]; /* di */ @@ -640,10 +612,7 @@ static int das16m1_attach(struct comedi_device *dev, outb(0, dev->iobase + DAS16M1_DIO); /* set the interrupt level */ - if (dev->irq) - devpriv->control_state = das16m1_irq_bits(dev->irq); - else - devpriv->control_state = 0; + devpriv->control_state = das16m1_irq_bits(dev->irq) << 4; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c index 1880038956d05d..320d95a5f47b6e 100644 --- a/drivers/staging/comedi/drivers/das1800.c +++ b/drivers/staging/comedi/drivers/das1800.c @@ -178,31 +178,29 @@ enum { /* analog input ranges */ static const struct comedi_lrange range_ai_das1801 = { - 8, - { - RANGE(-5, 5), - RANGE(-1, 1), - RANGE(-0.1, 0.1), - RANGE(-0.02, 0.02), - RANGE(0, 5), - RANGE(0, 1), - RANGE(0, 0.1), - RANGE(0, 0.02), - } + 8, { + BIP_RANGE(5), + BIP_RANGE(1), + BIP_RANGE(0.1), + BIP_RANGE(0.02), + UNI_RANGE(5), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.02) + } }; static const struct comedi_lrange range_ai_das1802 = { - 8, - { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-2.5, 2.5), - RANGE(-1.25, 1.25), - RANGE(0, 10), - RANGE(0, 5), - RANGE(0, 2.5), - RANGE(0, 1.25), - } + 8, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; struct das1800_board { @@ -445,10 +443,9 @@ struct das1800_private { /* analog out range for 'ao' boards */ /* static const struct comedi_lrange range_ao_2 = { - 2, - { - RANGE(-10, 10), - RANGE(-5, 5), + 2, { + BIP_RANGE(10), + BIP_RANGE(5) } }; */ @@ -462,7 +459,7 @@ static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev, return sample; } -static void munge_data(struct comedi_device *dev, uint16_t * array, +static void munge_data(struct comedi_device *dev, uint16_t *array, unsigned int num_elements) { unsigned int i; @@ -644,7 +641,7 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s) static void das1800_ai_handler(struct comedi_device *dev) { struct das1800_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int status = inb(dev->iobase + DAS1800_STATUS); @@ -1150,12 +1147,6 @@ static int das1800_ai_do_cmd(struct comedi_device *dev, struct comedi_async *async = s->async; const struct comedi_cmd *cmd = &async->cmd; - if (!dev->irq) { - comedi_error(dev, - "no irq assigned for das-1800, cannot do hardware conversions"); - return -1; - } - /* disable dma on TRIG_WAKE_EOS, or TRIG_RT * (because dma in handler is unsafe at hard real-time priority) */ if (cmd->flags & (TRIG_WAKE_EOS | TRIG_RT)) @@ -1522,43 +1513,34 @@ static int das1800_attach(struct comedi_device *dev, devpriv->iobase2 = iobase2; } - /* grab our IRQ */ - if (irq) { - if (request_irq(irq, das1800_interrupt, 0, - dev->driver->driver_name, dev)) { - dev_dbg(dev->class_dev, "unable to allocate irq %u\n", - irq); - return -EINVAL; - } - } - dev->irq = irq; + if (irq == 3 || irq == 5 || irq == 7 || irq == 10 || irq == 11 || + irq == 15) { + ret = request_irq(irq, das1800_interrupt, 0, + dev->board_name, dev); + if (ret == 0) { + dev->irq = irq; - /* set bits that tell card which irq to use */ - switch (irq) { - case 0: - break; - case 3: - devpriv->irq_dma_bits |= 0x8; - break; - case 5: - devpriv->irq_dma_bits |= 0x10; - break; - case 7: - devpriv->irq_dma_bits |= 0x18; - break; - case 10: - devpriv->irq_dma_bits |= 0x28; - break; - case 11: - devpriv->irq_dma_bits |= 0x30; - break; - case 15: - devpriv->irq_dma_bits |= 0x38; - break; - default: - dev_err(dev->class_dev, "irq out of range\n"); - return -EINVAL; - break; + switch (irq) { + case 3: + devpriv->irq_dma_bits |= 0x8; + break; + case 5: + devpriv->irq_dma_bits |= 0x10; + break; + case 7: + devpriv->irq_dma_bits |= 0x18; + break; + case 10: + devpriv->irq_dma_bits |= 0x28; + break; + case 11: + devpriv->irq_dma_bits |= 0x30; + break; + case 15: + devpriv->irq_dma_bits |= 0x38; + break; + } + } } ret = das1800_init_dma(dev, dma0, dma1); @@ -1578,20 +1560,23 @@ static int das1800_attach(struct comedi_device *dev, /* analog input subdevice */ s = &dev->subdevices[0]; - dev->read_subdev = s; s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND; if (thisboard->common) s->subdev_flags |= SDF_COMMON; s->n_chan = thisboard->qram_len; - s->len_chanlist = thisboard->qram_len; s->maxdata = (1 << thisboard->resolution) - 1; s->range_table = thisboard->range_ai; - s->do_cmd = das1800_ai_do_cmd; - s->do_cmdtest = das1800_ai_do_cmdtest; s->insn_read = das1800_ai_rinsn; - s->poll = das1800_ai_poll; - s->cancel = das1800_cancel; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = s->n_chan; + s->do_cmd = das1800_ai_do_cmd; + s->do_cmdtest = das1800_ai_do_cmdtest; + s->poll = das1800_ai_poll; + s->cancel = das1800_cancel; + } /* analog out */ s = &dev->subdevices[1]; diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c index fb25cb847032ee..43027ee500e3ab 100644 --- a/drivers/staging/comedi/drivers/das6402.c +++ b/drivers/staging/comedi/drivers/das6402.c @@ -152,21 +152,12 @@ static irqreturn_t intr_handler(int irq, void *d) dev_warn(dev->class_dev, "BUG: spurious interrupt\n"); return IRQ_HANDLED; } -#ifdef DEBUG - printk("das6402: interrupt! das6402_irqcount=%i\n", - devpriv->das6402_irqcount); - printk("das6402: iobase+2=%i\n", inw_p(dev->iobase + 2)); -#endif das6402_ai_fifo_dregs(dev, s); if (s->async->buf_write_count >= devpriv->ai_bytes_to_read) { outw_p(SCANL, dev->iobase + 2); /* clears the fifo */ outb(0x07, dev->iobase + 8); /* clears all flip-flops */ -#ifdef DEBUG - printk("das6402: Got %i samples\n\n", - devpriv->das6402_wordsread - diff); -#endif s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); } @@ -211,7 +202,7 @@ static int das6402_ai_cancel(struct comedi_device *dev, #ifdef unused static int das6402_ai_mode2(struct comedi_device *dev, - struct comedi_subdevice *s, comedi_trig * it) + struct comedi_subdevice *s, comedi_trig *it) { struct das6402_private *devpriv = dev->private; diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c index b04a5633f754ef..78a19629ff56cb 100644 --- a/drivers/staging/comedi/drivers/dmm32at.c +++ b/drivers/staging/comedi/drivers/dmm32at.c @@ -124,13 +124,12 @@ Configuration Options: /* board AI ranges in comedi structure */ static const struct comedi_lrange dmm32at_airanges = { - 4, - { - UNI_RANGE(10), - UNI_RANGE(5), - BIP_RANGE(10), - BIP_RANGE(5), - } + 4, { + UNI_RANGE(10), + UNI_RANGE(5), + BIP_RANGE(10), + BIP_RANGE(5) + } }; /* register values for above ranges */ @@ -145,13 +144,12 @@ static const unsigned char dmm32at_rangebits[] = { * board. The application should only use the range set by the jumper */ static const struct comedi_lrange dmm32at_aoranges = { - 4, - { - UNI_RANGE(10), - UNI_RANGE(5), - BIP_RANGE(10), - BIP_RANGE(5), - } + 4, { + UNI_RANGE(10), + UNI_RANGE(5), + BIP_RANGE(10), + BIP_RANGE(5) + } }; struct dmm32at_private { @@ -182,8 +180,6 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev, chan = CR_CHAN(insn->chanspec) & (s->n_chan - 1); range = CR_RANGE(insn->chanspec); - /* printk("channel=0x%02x, range=%d\n",chan,range); */ - /* zero scan and fifo control and reset fifo */ outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL); @@ -199,10 +195,8 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev, if ((status & DMM32AT_STATUS) == 0) break; } - if (i == 40000) { - printk(KERN_WARNING "dmm32at: timeout\n"); + if (i == 40000) return -ETIMEDOUT; - } /* convert n samples */ for (n = 0; n < insn->n; n++) { @@ -214,10 +208,8 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev, if ((status & DMM32AT_STATUS) == 0) break; } - if (i == 40000) { - printk(KERN_WARNING "dmm32at: timeout\n"); + if (i == 40000) return -ETIMEDOUT; - } /* read data */ lsb = inb(dev->iobase + DMM32AT_AILSB); @@ -453,10 +445,8 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) if ((status & DMM32AT_STATUS) == 0) break; } - if (i == 40000) { - printk(KERN_WARNING "dmm32at: timeout\n"); + if (i == 40000) return -ETIMEDOUT; - } if (devpriv->ai_scans_left > 1) { /* start the clock and enable the interrupts */ @@ -467,8 +457,6 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) outb(0xff, dev->iobase + DMM32AT_CONV); } -/* printk("dmmat32 in command\n"); */ - /* for(i=0;ichanlist_len;i++) */ /* comedi_buf_put(s->async,i*100); */ @@ -556,7 +544,6 @@ static int dmm32at_ao_winsn(struct comedi_device *dev, lo = data[i] & 0x00ff; /* high byte also contains channel number */ hi = (data[i] >> 8) + chan * (1 << 6); - /* printk("writing 0x%02x 0x%02x\n",hi,lo); */ /* write the low and high values to the board */ outb(lo, dev->iobase + DMM32AT_DACLSB); outb(hi, dev->iobase + DMM32AT_DACMSB); @@ -567,10 +554,9 @@ static int dmm32at_ao_winsn(struct comedi_device *dev, if ((status & DMM32AT_DACBUSY) == 0) break; } - if (i == 40000) { - printk(KERN_WARNING "dmm32at: timeout\n"); + if (i == 40000) return -ETIMEDOUT; - } + /* dummy read to update trigger the output */ status = inb(dev->iobase + DMM32AT_DACMSB); @@ -682,9 +668,6 @@ static int dmm32at_attach(struct comedi_device *dev, int ret; struct comedi_subdevice *s; unsigned char aihi, ailo, fifostat, aistat, intstat, airback; - unsigned int irq; - - irq = it->options[1]; ret = comedi_request_region(dev, it->options[0], DMM32AT_MEMSIZE); if (ret) @@ -723,26 +706,17 @@ static int dmm32at_attach(struct comedi_device *dev, intstat = inb(dev->iobase + DMM32AT_INTCLOCK); airback = inb(dev->iobase + DMM32AT_AIRBACK); - printk(KERN_DEBUG "dmm32at: lo=0x%02x hi=0x%02x fifostat=0x%02x\n", - ailo, aihi, fifostat); - printk(KERN_DEBUG - "dmm32at: aistat=0x%02x intstat=0x%02x airback=0x%02x\n", - aistat, intstat, airback); - if ((ailo != 0x00) || (aihi != 0x1f) || (fifostat != 0x80) || (aistat != 0x60 || (intstat != 0x00) || airback != 0x0c)) { - printk(KERN_ERR "dmmat32: board detection failed\n"); + dev_err(dev->class_dev, "board detection failed\n"); return -EIO; } - /* board is there, register interrupt */ - if (irq) { - ret = request_irq(irq, dmm32at_isr, 0, dev->board_name, dev); - if (ret < 0) { - printk(KERN_ERR "dmm32at: irq conflict\n"); - return ret; - } - dev->irq = irq; + if (it->options[1]) { + ret = request_irq(it->options[1], dmm32at_isr, 0, + dev->board_name, dev); + if (ret == 0) + dev->irq = it->options[1]; } devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); @@ -754,20 +728,22 @@ static int dmm32at_attach(struct comedi_device *dev, return ret; s = &dev->subdevices[0]; - dev->read_subdev = s; /* analog input subdevice */ s->type = COMEDI_SUBD_AI; /* we support single-ended (ground) and differential */ - s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF; s->n_chan = 32; s->maxdata = 0xffff; s->range_table = &dmm32at_airanges; - s->len_chanlist = 32; /* This is the maximum chanlist length that - the board can handle */ s->insn_read = dmm32at_ai_rinsn; - s->do_cmd = dmm32at_ai_cmd; - s->do_cmdtest = dmm32at_ai_cmdtest; - s->cancel = dmm32at_ai_cancel; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = 32; + s->do_cmd = dmm32at_ai_cmd; + s->do_cmdtest = dmm32at_ai_cmdtest; + s->cancel = dmm32at_ai_cancel; + } s = &dev->subdevices[1]; /* analog output subdevice */ @@ -799,11 +775,7 @@ static int dmm32at_attach(struct comedi_device *dev, s->insn_bits = dmm32at_dio_insn_bits; s->insn_config = dmm32at_dio_insn_config; - /* success */ - printk(KERN_INFO "comedi%d: dmm32at: attached\n", dev->minor); - - return 1; - + return 0; } static struct comedi_driver dmm32at_driver = { diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c index 811c8c59c01761..d4d4e4b497dcdb 100644 --- a/drivers/staging/comedi/drivers/dt2801.c +++ b/drivers/staging/comedi/drivers/dt2801.c @@ -90,58 +90,42 @@ Configuration options: #if 0 /* ignore 'defined but not used' warning */ -static const struct comedi_lrange range_dt2801_ai_pgh_bipolar = { 4, { - RANGE(-10, - 10), - RANGE(-5, - 5), - RANGE - (-2.5, - 2.5), - RANGE - (-1.25, - 1.25), - } +static const struct comedi_lrange range_dt2801_ai_pgh_bipolar = { + 4, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25) + } }; #endif -static const struct comedi_lrange range_dt2801_ai_pgl_bipolar = { 4, { - RANGE(-10, - 10), - RANGE(-1, - 1), - RANGE - (-0.1, - 0.1), - RANGE - (-0.02, - 0.02), - } +static const struct comedi_lrange range_dt2801_ai_pgl_bipolar = { + 4, { + BIP_RANGE(10), + BIP_RANGE(1), + BIP_RANGE(0.1), + BIP_RANGE(0.02) + } }; #if 0 /* ignore 'defined but not used' warning */ -static const struct comedi_lrange range_dt2801_ai_pgh_unipolar = { 4, { - RANGE(0, - 10), - RANGE(0, - 5), - RANGE(0, - 2.5), - RANGE(0, - 1.25), - } +static const struct comedi_lrange range_dt2801_ai_pgh_unipolar = { + 4, { + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; #endif -static const struct comedi_lrange range_dt2801_ai_pgl_unipolar = { 4, { - RANGE(0, - 10), - RANGE(0, - 1), - RANGE(0, - 0.1), - RANGE(0, - 0.02), - } +static const struct comedi_lrange range_dt2801_ai_pgl_unipolar = { + 4, { + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.02) + } }; struct dt2801_board { @@ -289,13 +273,6 @@ static int dt2801_writedata(struct comedi_device *dev, unsigned int data) outb_p(data & 0xff, dev->iobase + DT2801_DATA); return 0; } -#if 0 - if (stat & DT_S_READY) { - printk - ("dt2801: ready flag set (bad!) in dt2801_writedata()\n"); - return -EIO; - } -#endif } while (--timeout > 0); return -ETIME; @@ -343,11 +320,11 @@ static int dt2801_writecmd(struct comedi_device *dev, int command) stat = inb_p(dev->iobase + DT2801_STATUS); if (stat & DT_S_COMPOSITE_ERROR) { - printk - ("dt2801: composite-error in dt2801_writecmd(), ignoring\n"); + dev_dbg(dev->class_dev, + "composite-error in %s, ignoring\n", __func__); } if (!(stat & DT_S_READY)) - printk("dt2801: !ready in dt2801_writecmd(), ignoring\n"); + dev_dbg(dev->class_dev, "!ready in %s, ignoring\n", __func__); outb_p(command, dev->iobase + DT2801_CMD); return 0; @@ -359,17 +336,12 @@ static int dt2801_reset(struct comedi_device *dev) unsigned int stat; int timeout; - DPRINTK("dt2801: resetting board...\n"); - DPRINTK("fingerprint: 0x%02x 0x%02x\n", inb_p(dev->iobase), - inb_p(dev->iobase + 1)); - /* pull random data from data port */ inb_p(dev->iobase + DT2801_DATA); inb_p(dev->iobase + DT2801_DATA); inb_p(dev->iobase + DT2801_DATA); inb_p(dev->iobase + DT2801_DATA); - DPRINTK("dt2801: stop\n"); /* dt2801_writecmd(dev,DT_C_STOP); */ outb_p(DT_C_STOP, dev->iobase + DT2801_CMD); @@ -382,12 +354,10 @@ static int dt2801_reset(struct comedi_device *dev) break; } while (timeout--); if (!timeout) - printk("dt2801: timeout 1 status=0x%02x\n", stat); + dev_dbg(dev->class_dev, "timeout 1 status=0x%02x\n", stat); - /* printk("dt2801: reading dummy\n"); */ /* dt2801_readdata(dev,&board_code); */ - DPRINTK("dt2801: reset\n"); outb_p(DT_C_RESET, dev->iobase + DT2801_CMD); /* dt2801_writecmd(dev,DT_C_RESET); */ @@ -399,13 +369,10 @@ static int dt2801_reset(struct comedi_device *dev) break; } while (timeout--); if (!timeout) - printk("dt2801: timeout 2 status=0x%02x\n", stat); + dev_dbg(dev->class_dev, "timeout 2 status=0x%02x\n", stat); - DPRINTK("dt2801: reading code\n"); dt2801_readdata(dev, &board_code); - DPRINTK("dt2801: ok. code=0x%02x\n", board_code); - return board_code; } @@ -465,12 +432,12 @@ static int dt2801_error(struct comedi_device *dev, int stat) { if (stat < 0) { if (stat == -ETIME) - printk("dt2801: timeout\n"); + dev_dbg(dev->class_dev, "timeout\n"); else - printk("dt2801: error %d\n", stat); + dev_dbg(dev->class_dev, "error %d\n", stat); return stat; } - printk("dt2801: error status 0x%02x, resetting...\n", stat); + dev_dbg(dev->class_dev, "error status 0x%02x, resetting...\n", stat); dt2801_reset(dev); dt2801_reset(dev); @@ -601,8 +568,8 @@ static int dt2801_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (boardtypes[type].boardcode == board_code) goto havetype; } - printk("dt2801: unrecognized board code=0x%02x, contact author\n", - board_code); + dev_dbg(dev->class_dev, + "unrecognized board code=0x%02x, contact author\n", board_code); type = 0; havetype: diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c index 0ca02fa7ba1b96..4271903facd7b9 100644 --- a/drivers/staging/comedi/drivers/dt2811.c +++ b/drivers/staging/comedi/drivers/dt2811.c @@ -42,60 +42,59 @@ Configuration options: */ #include -#include #include "../comedidev.h" static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = { 4, { - RANGE(0, 5), - RANGE(0, 2.5), - RANGE(0, 1.25), - RANGE(0, 0.625) + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25), + UNI_RANGE(0.625) } }; static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = { 4, { - RANGE(-2.5, 2.5), - RANGE(-1.25, 1.25), - RANGE(-0.625, 0.625), - RANGE(-0.3125, 0.3125) + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + BIP_RANGE(0.3125) } }; static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = { 4, { - RANGE(-5, 5), - RANGE(-2.5, 2.5), - RANGE(-1.25, 1.25), - RANGE(-0.625, 0.625) + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625) } }; static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = { 4, { - RANGE(0, 5), - RANGE(0, 0.5), - RANGE(0, 0.05), - RANGE(0, 0.01) + UNI_RANGE(5), + UNI_RANGE(0.5), + UNI_RANGE(0.05), + UNI_RANGE(0.01) } }; static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = { 4, { - RANGE(-2.5, 2.5), - RANGE(-0.25, 0.25), - RANGE(-0.025, 0.025), - RANGE(-0.005, 0.005) + BIP_RANGE(2.5), + BIP_RANGE(0.25), + BIP_RANGE(0.025), + BIP_RANGE(0.005) } }; static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = { 4, { - RANGE(-5, 5), - RANGE(-0.5, 0.5), - RANGE(-0.05, 0.05), - RANGE(-0.01, 0.01) + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05), + BIP_RANGE(0.01) } }; @@ -227,33 +226,6 @@ static const struct comedi_lrange *dac_range_types[] = { #define DT2811_TIMEOUT 5 -#if 0 -static irqreturn_t dt2811_interrupt(int irq, void *d) -{ - int lo, hi; - int data; - struct comedi_device *dev = d; - struct dt2811_private *devpriv = dev->private; - - if (!dev->attached) { - comedi_error(dev, "spurious interrupt"); - return IRQ_HANDLED; - } - - lo = inb(dev->iobase + DT2811_ADDATLO); - hi = inb(dev->iobase + DT2811_ADDATHI); - - data = lo + (hi << 8); - - if (!(--devpriv->ntrig)) { - /* how to turn off acquisition */ - s->async->events |= COMEDI_SB_EOA; - } - comedi_event(dev, s); - return IRQ_HANDLED; -} -#endif - static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { @@ -278,35 +250,6 @@ static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s, return i; } -#if 0 -/* Wow. This is code from the Comedi stone age. But it hasn't been - * replaced, so I'll let it stay. */ -int dt2811_adtrig(kdev_t minor, comedi_adtrig *adtrig) -{ - struct comedi_device *dev = comedi_devices + minor; - - if (adtrig->n < 1) - return 0; - dev->curadchan = adtrig->chan; - switch (dev->i_admode) { - case COMEDI_MDEMAND: - dev->ntrig = adtrig->n - 1; - /* not necessary */ - /*printk("dt2811: AD soft trigger\n"); */ - /*outb(DT2811_CLRERROR|DT2811_INTENB, - dev->iobase+DT2811_ADCSR); */ - outb(dev->curadchan, dev->iobase + DT2811_ADGCR); - do_gettimeofday(&trigtime); - break; - case COMEDI_MCONTS: - dev->ntrig = adtrig->n; - break; - } - - return 0; -} -#endif - static int dt2811_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { @@ -386,10 +329,7 @@ static int dt2811_do_insn_bits(struct comedi_device *dev, */ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it) { - /* int i, irq; */ - /* unsigned long irqs; */ - /* long flags; */ - + /* int i; */ const struct dt2811_board *board = comedi_board(dev); struct dt2811_private *devpriv; int ret; @@ -406,45 +346,6 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it) i = inb(dev->iobase + DT2811_ADDATHI); #endif -#if 0 - irq = it->options[1]; - if (irq < 0) { - save_flags(flags); - sti(); - irqs = probe_irq_on(); - - outb(DT2811_CLRERROR | DT2811_INTENB, - dev->iobase + DT2811_ADCSR); - outb(0, dev->iobase + DT2811_ADGCR); - - udelay(100); - - irq = probe_irq_off(irqs); - restore_flags(flags); - - /*outb(DT2811_CLRERROR|DT2811_INTENB, - dev->iobase+DT2811_ADCSR);*/ - - if (inb(dev->iobase + DT2811_ADCSR) & DT2811_ADERROR) - printk(KERN_ERR "error probing irq (bad)\n"); - dev->irq = 0; - if (irq > 0) { - i = inb(dev->iobase + DT2811_ADDATLO); - i = inb(dev->iobase + DT2811_ADDATHI); - printk(KERN_INFO "(irq = %d)\n", irq); - ret = request_irq(irq, dt2811_interrupt, 0, - dev->board_name, dev); - if (ret < 0) - return -EIO; - dev->irq = irq; - } else if (irq == 0) { - printk(KERN_INFO "(no irq)\n"); - } else { - printk(KERN_ERR "( multiple irq's -- this is bad! )\n"); - } - } -#endif - ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c index 6514b9e0055283..abad6e49c1c167 100644 --- a/drivers/staging/comedi/drivers/dt2814.c +++ b/drivers/staging/comedi/drivers/dt2814.c @@ -80,15 +80,12 @@ static int dt2814_ai_insn_read(struct comedi_device *dev, outb(chan, dev->iobase + DT2814_CSR); for (i = 0; i < DT2814_TIMEOUT; i++) { status = inb(dev->iobase + DT2814_CSR); - printk(KERN_INFO "dt2814: status: %02x\n", status); udelay(10); if (status & DT2814_FINISH) break; } - if (i >= DT2814_TIMEOUT) { - printk(KERN_INFO "dt2814: status: %02x\n", status); + if (i >= DT2814_TIMEOUT) return -ETIMEDOUT; - } hi = inb(dev->iobase + DT2814_DATA); lo = inb(dev->iobase + DT2814_DATA); @@ -200,7 +197,7 @@ static irqreturn_t dt2814_interrupt(int irq, void *d) int lo, hi; struct comedi_device *dev = d; struct dt2814_private *devpriv = dev->private; - struct comedi_subdevice *s; + struct comedi_subdevice *s = dev->read_subdev; int data; if (!dev->attached) { @@ -208,8 +205,6 @@ static irqreturn_t dt2814_interrupt(int irq, void *d) return IRQ_HANDLED; } - s = &dev->subdevices[0]; - hi = inb(dev->iobase + DT2814_DATA); lo = inb(dev->iobase + DT2814_DATA); @@ -238,9 +233,9 @@ static irqreturn_t dt2814_interrupt(int irq, void *d) static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct dt2814_private *devpriv; - int i, irq; - int ret; struct comedi_subdevice *s; + int ret; + int i; ret = comedi_request_region(dev, it->options[0], DT2814_SIZE); if (ret) @@ -249,49 +244,17 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it) outb(0, dev->iobase + DT2814_CSR); udelay(100); if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) { - printk(KERN_ERR "reset error (fatal)\n"); + dev_err(dev->class_dev, "reset error (fatal)\n"); return -EIO; } i = inb(dev->iobase + DT2814_DATA); i = inb(dev->iobase + DT2814_DATA); - irq = it->options[1]; -#if 0 - if (irq < 0) { - save_flags(flags); - sti(); - irqs = probe_irq_on(); - - outb(0, dev->iobase + DT2814_CSR); - - udelay(100); - - irq = probe_irq_off(irqs); - restore_flags(flags); - if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) - printk(KERN_DEBUG "error probing irq (bad)\n"); - - - i = inb(dev->iobase + DT2814_DATA); - i = inb(dev->iobase + DT2814_DATA); - } -#endif - dev->irq = 0; - if (irq > 0) { - if (request_irq(irq, dt2814_interrupt, 0, "dt2814", dev)) { - printk(KERN_WARNING "(irq %d unavailable)\n", irq); - } else { - printk(KERN_INFO "( irq = %d )\n", irq); - dev->irq = irq; - } - } else if (irq == 0) { - printk(KERN_WARNING "(no irq)\n"); - } else { -#if 0 - printk(KERN_DEBUG "(probe returned multiple irqs--bad)\n"); -#else - printk(KERN_WARNING "(irq probe not implemented)\n"); -#endif + if (it->options[1]) { + ret = request_irq(it->options[1], dt2814_interrupt, 0, + dev->board_name, dev); + if (ret == 0) + dev->irq = it->options[1]; } ret = comedi_alloc_subdevices(dev, 1); @@ -303,16 +266,19 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it) return -ENOMEM; s = &dev->subdevices[0]; - dev->read_subdev = s; s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = 16; /* XXX */ - s->len_chanlist = 1; s->insn_read = dt2814_ai_insn_read; - s->do_cmd = dt2814_ai_cmd; - s->do_cmdtest = dt2814_ai_cmdtest; s->maxdata = 0xfff; s->range_table = &range_unknown; /* XXX */ + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = 1; + s->do_cmd = dt2814_ai_cmd; + s->do_cmdtest = dt2814_ai_cmdtest; + } return 0; } diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c index 34040f0175e88d..ee24717821e187 100644 --- a/drivers/staging/comedi/drivers/dt2815.c +++ b/drivers/staging/comedi/drivers/dt2815.c @@ -107,8 +107,9 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, status = dt2815_wait_for_status(dev, 0x00); if (status != 0) { - printk(KERN_WARNING "dt2815: failed to write low byte " - "on %d reason %x\n", chan, status); + dev_dbg(dev->class_dev, + "failed to write low byte on %d reason %x\n", + chan, status); return -EBUSY; } @@ -116,8 +117,9 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, status = dt2815_wait_for_status(dev, 0x10); if (status != 0x10) { - printk(KERN_WARNING "dt2815: failed to write high byte " - "on %d reason %x\n", chan, status); + dev_dbg(dev->class_dev, + "failed to write high byte on %d reason %x\n", + chan, status); return -EBUSY; } devpriv->ao_readback[chan] = data[i]; @@ -200,12 +202,13 @@ static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it) unsigned int program; program = (it->options[4] & 0x3) << 3 | 0x7; outb(program, dev->iobase + DT2815_DATA); - printk(KERN_INFO ", program: 0x%x (@t=%d)\n", - program, i); + dev_dbg(dev->class_dev, "program: 0x%x (@t=%d)\n", + program, i); break; } else if (status != 0x00) { - printk(KERN_WARNING "dt2815: unexpected status 0x%x " - "(@t=%d)\n", status, i); + dev_dbg(dev->class_dev, + "unexpected status 0x%x (@t=%d)\n", + status, i); if (status & 0x60) outb(0x00, dev->iobase + DT2815_STATUS); } diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c index a01e6b553887ab..895f73a190233a 100644 --- a/drivers/staging/comedi/drivers/dt282x.c +++ b/drivers/staging/comedi/drivers/dt282x.c @@ -63,8 +63,6 @@ Configuration options: #include "comedi_fc.h" -#define DEBUG - #define DT2821_TIMEOUT 100 /* 500 us */ #define DT2821_SIZE 0x10 @@ -156,55 +154,55 @@ Configuration options: static const struct comedi_lrange range_dt282x_ai_lo_bipolar = { 4, { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-2.5, 2.5), - RANGE(-1.25, 1.25) + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25) } }; static const struct comedi_lrange range_dt282x_ai_lo_unipolar = { 4, { - RANGE(0, 10), - RANGE(0, 5), - RANGE(0, 2.5), - RANGE(0, 1.25) + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) } }; static const struct comedi_lrange range_dt282x_ai_5_bipolar = { 4, { - RANGE(-5, 5), - RANGE(-2.5, 2.5), - RANGE(-1.25, 1.25), - RANGE(-0.625, 0.625) + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625) } }; static const struct comedi_lrange range_dt282x_ai_5_unipolar = { 4, { - RANGE(0, 5), - RANGE(0, 2.5), - RANGE(0, 1.25), - RANGE(0, 0.625), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25), + UNI_RANGE(0.625) } }; static const struct comedi_lrange range_dt282x_ai_hi_bipolar = { 4, { - RANGE(-10, 10), - RANGE(-1, 1), - RANGE(-0.1, 0.1), - RANGE(-0.02, 0.02) + BIP_RANGE(10), + BIP_RANGE(1), + BIP_RANGE(0.1), + BIP_RANGE(0.02) } }; static const struct comedi_lrange range_dt282x_ai_hi_unipolar = { 4, { - RANGE(0, 10), - RANGE(0, 1), - RANGE(0, 0.1), - RANGE(0, 0.02) + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.02) } }; @@ -308,15 +306,15 @@ static void dt282x_munge(struct comedi_device *dev, unsigned short *buf, static void dt282x_ao_dma_interrupt(struct comedi_device *dev) { struct dt282x_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->write_subdev; void *ptr; int size; int i; - struct comedi_subdevice *s = &dev->subdevices[1]; outw(devpriv->supcsr | DT2821_CLRDMADNE, dev->iobase + DT2821_SUPCSR); if (!s->async->prealloc_buf) { - printk(KERN_ERR "async->data disappeared. dang!\n"); + dev_err(dev->class_dev, "no buffer in %s\n", __func__); return; } @@ -329,7 +327,7 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev) size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize); if (size == 0) { - printk(KERN_ERR "dt282x: AO underrun\n"); + dev_err(dev->class_dev, "AO underrun\n"); dt282x_ao_cancel(dev, s); s->async->events |= COMEDI_CB_OVERFLOW; return; @@ -341,16 +339,16 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev) static void dt282x_ai_dma_interrupt(struct comedi_device *dev) { struct dt282x_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; void *ptr; int size; int i; int ret; - struct comedi_subdevice *s = &dev->subdevices[0]; outw(devpriv->supcsr | DT2821_CLRDMADNE, dev->iobase + DT2821_SUPCSR); if (!s->async->prealloc_buf) { - printk(KERN_ERR "async->data disappeared. dang!\n"); + dev_err(dev->class_dev, "no buffer in %s\n", __func__); return; } @@ -371,7 +369,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev) devpriv->nread -= size / 2; if (devpriv->nread < 0) { - printk(KERN_INFO "dt282x: off by one\n"); + dev_info(dev->class_dev, "nread off by one\n"); devpriv->nread = 0; } if (!devpriv->nread) { @@ -450,8 +448,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct dt282x_private *devpriv = dev->private; - struct comedi_subdevice *s; - struct comedi_subdevice *s_ao; + struct comedi_subdevice *s = dev->read_subdev; + struct comedi_subdevice *s_ao = dev->write_subdev; unsigned int supcsr, adcsr, dacsr; int handled = 0; @@ -460,8 +458,6 @@ static irqreturn_t dt282x_interrupt(int irq, void *d) return IRQ_HANDLED; } - s = &dev->subdevices[0]; - s_ao = &dev->subdevices[1]; adcsr = inw(dev->iobase + DT2821_ADCSR); dacsr = inw(dev->iobase + DT2821_DACSR); supcsr = inw(dev->iobase + DT2821_SUPCSR); @@ -481,13 +477,6 @@ static irqreturn_t dt282x_interrupt(int irq, void *d) handled = 1; } if (dacsr & DT2821_DAERR) { -#if 0 - static int warn = 5; - if (--warn <= 0) { - disable_irq(dev->irq); - printk(KERN_INFO "disabling irq\n"); - } -#endif comedi_error(dev, "D/A error"); dt282x_ao_cancel(dev, s_ao); s->async->events |= COMEDI_CB_ERROR; @@ -520,8 +509,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d) } #endif comedi_event(dev, s); - /* printk("adcsr=0x%02x dacsr-0x%02x supcsr=0x%02x\n", - adcsr, dacsr, supcsr); */ + return IRQ_RETVAL(handled); } @@ -894,7 +882,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev, size = cfc_read_array_from_buffer(s, devpriv->dma[0].buf, devpriv->dma_maxsize); if (size == 0) { - printk(KERN_ERR "dt282x: AO underrun\n"); + dev_err(dev->class_dev, "AO underrun\n"); return -EPIPE; } prep_ao_dma(dev, 0, size); @@ -902,7 +890,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev, size = cfc_read_array_from_buffer(s, devpriv->dma[1].buf, devpriv->dma_maxsize); if (size == 0) { - printk(KERN_ERR "dt282x: AO underrun\n"); + dev_err(dev->class_dev, "AO underrun\n"); return -EPIPE; } prep_ao_dma(dev, 1, size); @@ -1062,10 +1050,8 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2) devpriv->usedma = 0; - if (!dma1 && !dma2) { - printk(KERN_ERR " (no dma)"); + if (!dma1 && !dma2) return 0; - } if (dma1 == dma2 || dma1 < 5 || dma2 < 5 || dma1 > 7 || dma2 > 7) return -EINVAL; @@ -1090,12 +1076,8 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2) devpriv->dma_maxsize = PAGE_SIZE; devpriv->dma[0].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); devpriv->dma[1].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); - if (!devpriv->dma[0].buf || !devpriv->dma[1].buf) { - printk(KERN_ERR " can't get DMA memory"); + if (!devpriv->dma[0].buf || !devpriv->dma[1].buf) return -ENOMEM; - } - - printk(KERN_INFO " (dma=%d,%d)", dma1, dma2); devpriv->usedma = 1; @@ -1120,9 +1102,9 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) { const struct dt282x_board *board = comedi_board(dev); struct dt282x_private *devpriv; - int i, irq; - int ret; struct comedi_subdevice *s; + int ret; + int i; ret = comedi_request_region(dev, it->options[0], DT2821_SIZE); if (ret) @@ -1130,14 +1112,6 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) outw(DT2821_BDINIT, dev->iobase + DT2821_SUPCSR); i = inw(dev->iobase + DT2821_ADCSR); -#ifdef DEBUG - printk(KERN_DEBUG " fingerprint=%x,%x,%x,%x,%x", - inw(dev->iobase + DT2821_ADCSR), - inw(dev->iobase + DT2821_CHANCSR), - inw(dev->iobase + DT2821_DACSR), - inw(dev->iobase + DT2821_SUPCSR), - inw(dev->iobase + DT2821_TMRCTR)); -#endif if (((inw(dev->iobase + DT2821_ADCSR) & DT2821_ADCSR_MASK) != DT2821_ADCSR_VAL) || @@ -1149,58 +1123,28 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) != DT2821_SUPCSR_VAL) || ((inw(dev->iobase + DT2821_TMRCTR) & DT2821_TMRCTR_MASK) != DT2821_TMRCTR_VAL)) { - printk(KERN_ERR " board not found"); + dev_err(dev->class_dev, "board not found\n"); return -EIO; } /* should do board test */ - irq = it->options[opt_irq]; -#if 0 - if (irq < 0) { - unsigned long flags; - int irqs; - - save_flags(flags); - sti(); - irqs = probe_irq_on(); - - /* trigger interrupt */ - - udelay(100); - - irq = probe_irq_off(irqs); - restore_flags(flags); - if (0 /* error */) - printk(KERN_ERR " error probing irq (bad)"); - } -#endif - if (irq > 0) { - printk(KERN_INFO " ( irq = %d )", irq); - ret = request_irq(irq, dt282x_interrupt, 0, + if (it->options[opt_irq] > 0) { + ret = request_irq(it->options[opt_irq], dt282x_interrupt, 0, dev->board_name, dev); - if (ret < 0) { - printk(KERN_ERR " failed to get irq\n"); - return -EIO; - } - dev->irq = irq; - } else if (irq == 0) { - printk(KERN_INFO " (no irq)"); - } else { -#if 0 - printk(KERN_INFO " (probe returned multiple irqs--bad)"); -#else - printk(KERN_INFO " (irq probe not implemented)"); -#endif + if (ret == 0) + dev->irq = it->options[opt_irq]; } devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; - ret = dt282x_grab_dma(dev, it->options[opt_dma1], - it->options[opt_dma2]); - if (ret < 0) - return ret; + if (dev->irq) { + ret = dt282x_grab_dma(dev, it->options[opt_dma1], + it->options[opt_dma2]); + if (ret < 0) + return ret; + } ret = comedi_alloc_subdevices(dev, 3); if (ret) @@ -1208,22 +1152,25 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) s = &dev->subdevices[0]; - dev->read_subdev = s; /* ai subdevice */ s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_CMD_READ | + s->subdev_flags = SDF_READABLE | ((it->options[opt_diff]) ? SDF_DIFF : SDF_COMMON); s->n_chan = (it->options[opt_diff]) ? board->adchan_di : board->adchan_se; s->insn_read = dt282x_ai_insn_read; - s->do_cmdtest = dt282x_ai_cmdtest; - s->do_cmd = dt282x_ai_cmd; - s->cancel = dt282x_ai_cancel; s->maxdata = (1 << board->adbits) - 1; - s->len_chanlist = 16; s->range_table = opt_ai_range_lkup(board->ispgl, it->options[opt_ai_range]); devpriv->ad_2scomp = it->options[opt_ai_twos]; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = 16; + s->do_cmdtest = dt282x_ai_cmdtest; + s->do_cmd = dt282x_ai_cmd; + s->cancel = dt282x_ai_cancel; + } s = &dev->subdevices[1]; @@ -1231,15 +1178,10 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (s->n_chan) { /* ao subsystem */ s->type = COMEDI_SUBD_AO; - dev->write_subdev = s; - s->subdev_flags = SDF_WRITABLE | SDF_CMD_WRITE; + s->subdev_flags = SDF_WRITABLE; s->insn_read = dt282x_ao_insn_read; s->insn_write = dt282x_ao_insn_write; - s->do_cmdtest = dt282x_ao_cmdtest; - s->do_cmd = dt282x_ao_cmd; - s->cancel = dt282x_ao_cancel; s->maxdata = (1 << board->dabits) - 1; - s->len_chanlist = 2; s->range_table_list = devpriv->darangelist; devpriv->darangelist[0] = opt_ao_range_lkup(it->options[opt_ao0_range]); @@ -1247,6 +1189,14 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) opt_ao_range_lkup(it->options[opt_ao1_range]); devpriv->da0_2scomp = it->options[opt_ao0_twos]; devpriv->da1_2scomp = it->options[opt_ao1_twos]; + if (dev->irq) { + dev->write_subdev = s; + s->subdev_flags |= SDF_CMD_WRITE; + s->len_chanlist = 2; + s->do_cmdtest = dt282x_ao_cmdtest; + s->do_cmd = dt282x_ao_cmd; + s->cancel = dt282x_ao_cancel; + } } else { s->type = COMEDI_SUBD_UNUSED; } @@ -1261,8 +1211,6 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->maxdata = 1; s->range_table = &range_digital; - printk(KERN_INFO "\n"); - return 0; } diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c index 292226eeff924d..f52a4476cb73b7 100644 --- a/drivers/staging/comedi/drivers/dt3000.c +++ b/drivers/staging/comedi/drivers/dt3000.c @@ -48,8 +48,6 @@ AO commands are not supported. you the docs without one, also. */ -#define DEBUG 1 - #include #include #include @@ -253,24 +251,6 @@ struct dt3k_private { unsigned int ai_rear; }; -#ifdef DEBUG -static char *intr_flags[] = { - "AdFull", "AdSwError", "AdHwError", "DaEmpty", - "DaSwError", "DaHwError", "CtDone", "CmDone", -}; - -static void debug_intr_flags(unsigned int flags) -{ - int i; - printk(KERN_DEBUG "dt3k: intr_flags:"); - for (i = 0; i < 8; i++) { - if (flags & (1 << i)) - printk(KERN_CONT " %s", intr_flags[i]); - } - printk(KERN_CONT "\n"); -} -#endif - #define TIMEOUT 100 static void dt3k_send_cmd(struct comedi_device *dev, unsigned int cmd) @@ -372,17 +352,13 @@ static irqreturn_t dt3k_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct dt3k_private *devpriv = dev->private; - struct comedi_subdevice *s; + struct comedi_subdevice *s = dev->read_subdev; unsigned int status; if (!dev->attached) return IRQ_NONE; - s = &dev->subdevices[0]; status = readw(devpriv->io_addr + DPR_Intr_Flag); -#ifdef DEBUG - debug_intr_flags(status); -#endif if (status & DT3000_ADFULL) { dt3k_ai_empty_fifo(dev, s); @@ -725,29 +701,33 @@ static int dt3000_auto_attach(struct comedi_device *dev, if (!devpriv->io_addr) return -ENOMEM; - ret = request_irq(pcidev->irq, dt3k_interrupt, IRQF_SHARED, - dev->board_name, dev); - if (ret) - return ret; - dev->irq = pcidev->irq; + if (pcidev->irq) { + ret = request_irq(pcidev->irq, dt3k_interrupt, IRQF_SHARED, + dev->board_name, dev); + if (ret == 0) + dev->irq = pcidev->irq; + } ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; s = &dev->subdevices[0]; - dev->read_subdev = s; /* ai subdevice */ s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF; s->n_chan = this_board->adchan; s->insn_read = dt3k_ai_insn; s->maxdata = (1 << this_board->adbits) - 1; - s->len_chanlist = 512; s->range_table = &range_dt3000_ai; /* XXX */ - s->do_cmd = dt3k_ai_cmd; - s->do_cmdtest = dt3k_ai_cmdtest; - s->cancel = dt3k_ai_cancel; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = 512; + s->do_cmd = dt3k_ai_cmd; + s->do_cmdtest = dt3k_ai_cmdtest; + s->cancel = dt3k_ai_cancel; + } s = &dev->subdevices[1]; /* ao subsystem */ @@ -818,7 +798,7 @@ static int dt3000_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &dt3000_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(dt3000_pci_table) = { +static const struct pci_device_id dt3000_pci_table[] = { { PCI_VDEVICE(DT, 0x0022), BOARD_DT3001 }, { PCI_VDEVICE(DT, 0x0023), BOARD_DT3002 }, { PCI_VDEVICE(DT, 0x0024), BOARD_DT3003 }, diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c index 73af600c172532..b3aeb6fb2ad0d9 100644 --- a/drivers/staging/comedi/drivers/dt9812.c +++ b/drivers/staging/comedi/drivers/dt9812.c @@ -41,7 +41,6 @@ for my needs. #include #include #include -#include #include #include diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c index f2a9f1c2f3b6df..f224825830ba9f 100644 --- a/drivers/staging/comedi/drivers/dyna_pci10xx.c +++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c @@ -42,11 +42,12 @@ #define READ_TIMEOUT 50 -static const struct comedi_lrange range_pci1050_ai = { 3, { - BIP_RANGE(10), - BIP_RANGE(5), - UNI_RANGE(10) - } +static const struct comedi_lrange range_pci1050_ai = { + 3, { + BIP_RANGE(10), + BIP_RANGE(5), + UNI_RANGE(10) + } }; static const char range_codes_pci1050_ai[] = { 0x00, 0x10, 0x30 }; @@ -90,8 +91,7 @@ static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev, goto conv_finish; } data[n] = 0; - printk(KERN_DEBUG "comedi: dyna_pci10xx: " - "timeout reading analog input\n"); + dev_dbg(dev->class_dev, "timeout reading analog input\n"); continue; conv_finish: /* mask the first 4 bits - EOC bits */ @@ -260,7 +260,7 @@ static int dyna_pci10xx_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(dyna_pci10xx_pci_table) = { +static const struct pci_device_id dyna_pci10xx_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_PLX, 0x1050) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c index e3ff4c43897978..a99ddf00ddc44a 100644 --- a/drivers/staging/comedi/drivers/fl512.c +++ b/drivers/staging/comedi/drivers/fl512.c @@ -16,8 +16,6 @@ Configuration options: [0] - I/O port base address */ -#define DEBUG 0 - #include #include "../comedidev.h" @@ -28,15 +26,16 @@ struct fl512_private { unsigned short ao_readback[2]; }; -static const struct comedi_lrange range_fl512 = { 4, { - BIP_RANGE(0.5), - BIP_RANGE(1), - BIP_RANGE(5), - BIP_RANGE(10), - UNI_RANGE(1), - UNI_RANGE(5), - UNI_RANGE(10), - } +static const struct comedi_lrange range_fl512 = { + 4, { + BIP_RANGE(0.5), + BIP_RANGE(1), + BIP_RANGE(5), + BIP_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(5), + UNI_RANGE(10) + } }; /* diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c index 559bf5583530c2..de60a2871d70ef 100644 --- a/drivers/staging/comedi/drivers/gsc_hpdi.c +++ b/drivers/staging/comedi/drivers/gsc_hpdi.c @@ -60,15 +60,6 @@ static int hpdi_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static irqreturn_t handle_interrupt(int irq, void *d); static int dio_config_block_size(struct comedi_device *dev, unsigned int *data); -#undef HPDI_DEBUG /* disable debugging messages */ -/* #define HPDI_DEBUG enable debugging code */ - -#ifdef HPDI_DEBUG -#define DEBUG_PRINT(format, args...) pr_debug(format , ## args) -#else -#define DEBUG_PRINT(format, args...) no_printk(pr_fmt(format), ## args) -#endif - #define TIMER_BASE 50 /* 20MHz master clock */ #define DMA_BUFFER_SIZE 0x10000 #define NUM_DMA_BUFFERS 4 @@ -260,32 +251,6 @@ static void init_plx9080(struct comedi_device *dev) uint32_t bits; void __iomem *plx_iobase = devpriv->plx9080_iobase; - /* plx9080 dump */ - DEBUG_PRINT(" plx interrupt status 0x%x\n", - readl(plx_iobase + PLX_INTRCS_REG)); - DEBUG_PRINT(" plx id bits 0x%x\n", readl(plx_iobase + PLX_ID_REG)); - DEBUG_PRINT(" plx control reg 0x%x\n", - readl(devpriv->plx9080_iobase + PLX_CONTROL_REG)); - - DEBUG_PRINT(" plx revision 0x%x\n", - readl(plx_iobase + PLX_REVISION_REG)); - DEBUG_PRINT(" plx dma channel 0 mode 0x%x\n", - readl(plx_iobase + PLX_DMA0_MODE_REG)); - DEBUG_PRINT(" plx dma channel 1 mode 0x%x\n", - readl(plx_iobase + PLX_DMA1_MODE_REG)); - DEBUG_PRINT(" plx dma channel 0 pci address 0x%x\n", - readl(plx_iobase + PLX_DMA0_PCI_ADDRESS_REG)); - DEBUG_PRINT(" plx dma channel 0 local address 0x%x\n", - readl(plx_iobase + PLX_DMA0_LOCAL_ADDRESS_REG)); - DEBUG_PRINT(" plx dma channel 0 transfer size 0x%x\n", - readl(plx_iobase + PLX_DMA0_TRANSFER_SIZE_REG)); - DEBUG_PRINT(" plx dma channel 0 descriptor 0x%x\n", - readl(plx_iobase + PLX_DMA0_DESCRIPTOR_REG)); - DEBUG_PRINT(" plx dma channel 0 command status 0x%x\n", - readb(plx_iobase + PLX_DMA0_CS_REG)); - DEBUG_PRINT(" plx dma channel 0 threshold 0x%x\n", - readl(plx_iobase + PLX_DMA0_THRESHOLD_REG)); - DEBUG_PRINT(" plx bigend 0x%x\n", readl(plx_iobase + PLX_BIGEND_REG)); #ifdef __BIG_ENDIAN bits = BIGEND_DMA0 | BIGEND_DMA1; #else @@ -395,10 +360,6 @@ static int setup_dma_descriptors(struct comedi_device *dev, if (transfer_size == 0) return -1; - DEBUG_PRINT(" transfer_size %i\n", transfer_size); - DEBUG_PRINT(" descriptors at 0x%lx\n", - (unsigned long)devpriv->dma_desc_phys_addr); - buffer_offset = 0; buffer_index = 0; for (i = 0; i < NUM_DMA_DESCRIPTORS && @@ -423,21 +384,11 @@ static int setup_dma_descriptors(struct comedi_device *dev, buffer_offset = 0; buffer_index++; } - - DEBUG_PRINT(" desc %i\n", i); - DEBUG_PRINT(" start addr virt 0x%p, phys 0x%lx\n", - devpriv->desc_dio_buffer[i], - (unsigned long)devpriv->dma_desc[i]. - pci_start_addr); - DEBUG_PRINT(" next 0x%lx\n", - (unsigned long)devpriv->dma_desc[i].next); } devpriv->num_dma_descriptors = i; /* fix last descriptor to point back to first */ devpriv->dma_desc[i - 1].next = cpu_to_le32(devpriv->dma_desc_phys_addr | next_bits); - DEBUG_PRINT(" desc %i next fixup 0x%lx\n", i - 1, - (unsigned long)devpriv->dma_desc[i - 1].next); devpriv->block_size = transfer_size; @@ -489,9 +440,6 @@ static int hpdi_auto_attach(struct comedi_device *dev, return -ENOMEM; } - DEBUG_PRINT(" plx9080 remapped to 0x%p\n", devpriv->plx9080_iobase); - DEBUG_PRINT(" hpdi remapped to 0x%p\n", devpriv->hpdi_iobase); - init_plx9080(dev); /* get irq */ @@ -510,9 +458,6 @@ static int hpdi_auto_attach(struct comedi_device *dev, devpriv->dio_buffer[i] = pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE, &devpriv->dio_buffer_phys_addr[i]); - DEBUG_PRINT("dio_buffer at virt 0x%p, phys 0x%lx\n", - devpriv->dio_buffer[i], - (unsigned long)devpriv->dio_buffer_phys_addr[i]); } /* allocate dma descriptors */ devpriv->dma_desc = pci_alloc_consistent(pcidev, @@ -687,8 +632,6 @@ static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s) hpdi_writel(dev, RX_FIFO_RESET_BIT, BOARD_CONTROL_REG); - DEBUG_PRINT("hpdi: in di_cmd\n"); - abort_dma(dev, 0); devpriv->dma_desc_index = 0; @@ -725,7 +668,6 @@ static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s) writel(intr_bit(RX_FULL_INTR), devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG); - DEBUG_PRINT("hpdi: starting rx\n"); hpdi_writel(dev, RX_ENABLE_BIT, BOARD_CONTROL_REG); return 0; @@ -778,11 +720,6 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel) num_samples * sizeof(uint32_t)); devpriv->dma_desc_index++; devpriv->dma_desc_index %= devpriv->num_dma_descriptors; - - DEBUG_PRINT("next desc addr 0x%lx\n", (unsigned long) - devpriv->dma_desc[devpriv->dma_desc_index]. - next); - DEBUG_PRINT("pci addr reg 0x%x\n", next_transfer_addr); } /* XXX check for buffer overrun somehow */ } @@ -812,7 +749,6 @@ static irqreturn_t handle_interrupt(int irq, void *d) async->events = 0; if (hpdi_intr_status) { - DEBUG_PRINT("hpdi: intr status 0x%x, ", hpdi_intr_status); writel(hpdi_intr_status, devpriv->hpdi_iobase + INTERRUPT_STATUS_REG); } @@ -823,10 +759,8 @@ static irqreturn_t handle_interrupt(int irq, void *d) writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT, devpriv->plx9080_iobase + PLX_DMA0_CS_REG); - DEBUG_PRINT("dma0 status 0x%x\n", dma0_status); if (dma0_status & PLX_DMA_EN_BIT) drain_dma_buffers(dev, 0); - DEBUG_PRINT(" cleared dma ch0 interrupt\n"); } spin_unlock_irqrestore(&dev->spinlock, flags); @@ -836,9 +770,6 @@ static irqreturn_t handle_interrupt(int irq, void *d) if (plx_status & ICS_DMA1_A) { /* XXX *//* dma chan 1 interrupt */ writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT, devpriv->plx9080_iobase + PLX_DMA1_CS_REG); - DEBUG_PRINT("dma1 status 0x%x\n", dma1_status); - - DEBUG_PRINT(" cleared dma ch1 interrupt\n"); } spin_unlock_irqrestore(&dev->spinlock, flags); @@ -846,15 +777,11 @@ static irqreturn_t handle_interrupt(int irq, void *d) if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */ plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG); writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG); - DEBUG_PRINT(" cleared local doorbell bits 0x%x\n", plx_bits); } if (hpdi_board_status & RX_OVERRUN_BIT) { comedi_error(dev, "rx fifo overrun"); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; - DEBUG_PRINT("dma0_status 0x%x\n", - (int)readb(devpriv->plx9080_iobase + - PLX_DMA0_CS_REG)); } if (hpdi_board_status & RX_UNDERRUN_BIT) { @@ -865,11 +792,6 @@ static irqreturn_t handle_interrupt(int irq, void *d) if (devpriv->dio_count == 0) async->events |= COMEDI_CB_EOA; - DEBUG_PRINT("board status 0x%x, ", hpdi_board_status); - DEBUG_PRINT("plx status 0x%x\n", plx_status); - if (async->events) - DEBUG_PRINT(" events 0x%x\n", async->events); - cfc_handle_events(dev, s); return IRQ_HANDLED; @@ -914,7 +836,7 @@ static int gsc_hpdi_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &gsc_hpdi_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(gsc_hpdi_pci_table) = { +static const struct pci_device_id gsc_hpdi_pci_table[] = { { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9080, PCI_VENDOR_ID_PLX, 0x2400, 0, 0, 0}, { 0 } diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c index 1e16641ec52d69..80539b2bea1a00 100644 --- a/drivers/staging/comedi/drivers/icp_multi.c +++ b/drivers/staging/comedi/drivers/icp_multi.c @@ -91,12 +91,13 @@ Configuration options: not applicable, uses PCI auto config #define Status_IRQ 0x00ff /* All interrupts */ /* Define analogue range */ -static const struct comedi_lrange range_analog = { 4, { - UNI_RANGE(5), - UNI_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(10) - } +static const struct comedi_lrange range_analog = { + 4, { + UNI_RANGE(5), + UNI_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(10) + } }; static const char range_codes_analog[] = { 0x00, 0x20, 0x10, 0x30 }; @@ -597,7 +598,7 @@ static int icp_multi_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &icp_multi_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(icp_multi_pci_table) = { +static const struct pci_device_id icp_multi_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ICP, PCI_DEVICE_ID_ICP_MULTI) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c index b52d58e5de276b..6100c12c164f4c 100644 --- a/drivers/staging/comedi/drivers/jr3_pci.c +++ b/drivers/staging/comedi/drivers/jr3_pci.c @@ -807,7 +807,7 @@ static int jr3_pci_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &jr3_pci_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = { +static const struct pci_device_id jr3_pci_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) }, { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) }, { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL) }, diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c index 15589f62a61992..6b9846fd8c485b 100644 --- a/drivers/staging/comedi/drivers/ke_counter.c +++ b/drivers/staging/comedi/drivers/ke_counter.c @@ -139,7 +139,7 @@ static int ke_counter_pci_probe(struct pci_dev *dev, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(ke_counter_pci_table) = { +static const struct pci_device_id ke_counter_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c index 3d12e913592649..e739bcd66a04e2 100644 --- a/drivers/staging/comedi/drivers/me4000.c +++ b/drivers/staging/comedi/drivers/me4000.c @@ -328,13 +328,12 @@ static const struct me4000_board me4000_boards[] = { }; static const struct comedi_lrange me4000_ai_range = { - 4, - { - UNI_RANGE(2.5), - UNI_RANGE(10), - BIP_RANGE(2.5), - BIP_RANGE(10), - } + 4, { + UNI_RANGE(2.5), + UNI_RANGE(10), + BIP_RANGE(2.5), + BIP_RANGE(10) + } }; #define FIRMWARE_NOT_AVAILABLE 1 @@ -1105,7 +1104,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id) { unsigned int tmp; struct comedi_device *dev = dev_id; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; int i; int c = 0; unsigned int lval; @@ -1116,12 +1115,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id) /* Reset all events */ s->async->events = 0; - /* Check if irq number is right */ - if (irq != dev->irq) { - dev_err(dev->class_dev, "Incorrect interrupt num: %d\n", irq); - return IRQ_HANDLED; - } - if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) & ME4000_IRQ_STATUS_BIT_AI_HF) { /* Read status register to find out what happened */ @@ -1505,6 +1498,13 @@ static int me4000_auto_attach(struct comedi_device *dev, me4000_reset(dev); + if (pcidev->irq > 0) { + result = request_irq(pcidev->irq, me4000_ai_isr, IRQF_SHARED, + dev->board_name, dev); + if (result == 0) + dev->irq = pcidev->irq; + } + result = comedi_alloc_subdevices(dev, 4); if (result) return result; @@ -1525,22 +1525,12 @@ static int me4000_auto_attach(struct comedi_device *dev, s->range_table = &me4000_ai_range; s->insn_read = me4000_ai_insn_read; - if (pcidev->irq > 0) { - if (request_irq(pcidev->irq, me4000_ai_isr, - IRQF_SHARED, dev->board_name, dev)) { - dev_warn(dev->class_dev, - "request_irq failed\n"); - } else { - dev->read_subdev = s; - s->subdev_flags |= SDF_CMD_READ; - s->cancel = me4000_ai_cancel; - s->do_cmdtest = me4000_ai_do_cmd_test; - s->do_cmd = me4000_ai_do_cmd; - - dev->irq = pcidev->irq; - } - } else { - dev_warn(dev->class_dev, "No interrupt available\n"); + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->cancel = me4000_ai_cancel; + s->do_cmdtest = me4000_ai_do_cmd_test; + s->do_cmd = me4000_ai_do_cmd; } } else { s->type = COMEDI_SUBD_UNUSED; @@ -1635,7 +1625,7 @@ static int me4000_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &me4000_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(me4000_pci_table) = { +static const struct pci_device_id me4000_pci_table[] = { { PCI_VDEVICE(MEILHAUS, 0x4650), BOARD_ME4650 }, { PCI_VDEVICE(MEILHAUS, 0x4660), BOARD_ME4660 }, { PCI_VDEVICE(MEILHAUS, 0x4661), BOARD_ME4660I }, diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c index 24ec9ef9b1a0fc..7f668789640112 100644 --- a/drivers/staging/comedi/drivers/me_daq.c +++ b/drivers/staging/comedi/drivers/me_daq.c @@ -576,7 +576,7 @@ static int me_daq_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &me_daq_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(me_daq_pci_table) = { +static const struct pci_device_id me_daq_pci_table[] = { { PCI_VDEVICE(MEILHAUS, 0x2600), BOARD_ME2600 }, { PCI_VDEVICE(MEILHAUS, 0x2000), BOARD_ME2000 }, { 0 } diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c new file mode 100644 index 00000000000000..81b78e053f4eb6 --- /dev/null +++ b/drivers/staging/comedi/drivers/mf6x4.c @@ -0,0 +1,354 @@ +/* + * comedi/drivers/mf6x4.c + * Driver for Humusoft MF634 and MF624 Data acquisition cards + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2000 David A. Schleef + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +/* + * Driver: mf6x4 + * Description: Humusoft MF634 and MF624 Data acquisition card driver + * Devices: Humusoft MF634, Humusoft MF624 + * Author: Rostislav Lisovy + * Status: works + * Updated: + * Configuration Options: none + */ + +#include +#include +#include +#include "../comedidev.h" + +/* Registers present in BAR0 memory region */ +#define MF624_GPIOC_R 0x54 + +#define MF6X4_GPIOC_EOLC /* End Of Last Conversion */ (1 << 17) +#define MF6X4_GPIOC_LDAC /* Load DACs */ (1 << 23) +#define MF6X4_GPIOC_DACEN (1 << 26) + +/* BAR1 registers */ +#define MF6X4_DIN_R 0x10 +#define MF6X4_DIN_M 0xff +#define MF6X4_DOUT_R 0x10 +#define MF6X4_DOUT_M 0xff + +#define MF6X4_ADSTART_R 0x20 +#define MF6X4_ADDATA_R 0x00 +#define MF6X4_ADCTRL_R 0x00 +#define MF6X4_ADCTRL_M 0xff + +#define MF6X4_DA0_R 0x20 +#define MF6X4_DA1_R 0x22 +#define MF6X4_DA2_R 0x24 +#define MF6X4_DA3_R 0x26 +#define MF6X4_DA4_R 0x28 +#define MF6X4_DA5_R 0x2a +#define MF6X4_DA6_R 0x2c +#define MF6X4_DA7_R 0x2e +/* Map DAC cahnnel id to real HW-dependent offset value */ +#define MF6X4_DAC_R(x) (0x20 + ((x) * 2)) +#define MF6X4_DA_M 0x3fff + +/* BAR2 registers */ +#define MF634_GPIOC_R 0x68 + +enum mf6x4_boardid { + BOARD_MF634, + BOARD_MF624, +}; + +struct mf6x4_board { + const char *name; + unsigned int bar_nums[3]; /* We need to keep track of the + order of BARs used by the cards */ +}; + +static const struct mf6x4_board mf6x4_boards[] = { + [BOARD_MF634] = { + .name = "mf634", + .bar_nums = {0, 2, 3}, + }, + [BOARD_MF624] = { + .name = "mf624", + .bar_nums = {0, 2, 4}, + }, +}; + +struct mf6x4_private { + /* + * Documentation for both MF634 and MF624 describes registers + * present in BAR0, 1 and 2 regions. + * The real (i.e. in HW) BAR numbers are different for MF624 + * and MF634 yet we will call them 0, 1, 2 + */ + void __iomem *bar0_mem; + void __iomem *bar1_mem; + void __iomem *bar2_mem; + + /* + * This configuration register has the same function and fields + * for both cards however it lies in different BARs on different + * offsets -- this variable makes the access easier + */ + void __iomem *gpioc_R; + + /* DAC value cache -- used for insn_read function */ + int ao_readback[8]; +}; + +static int mf6x4_di_insn_bits(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, unsigned int *data) +{ + struct mf6x4_private *devpriv = dev->private; + + data[1] = ioread16(devpriv->bar1_mem + MF6X4_DIN_R) & MF6X4_DIN_M; + + return insn->n; +} + +static int mf6x4_do_insn_bits(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, unsigned int *data) +{ + struct mf6x4_private *devpriv = dev->private; + + if (comedi_dio_update_state(s, data)) + iowrite16(s->state & MF6X4_DOUT_M, + devpriv->bar1_mem + MF6X4_DOUT_R); + + data[1] = s->state; + + return insn->n; +} + +static int mf6x4_ai_wait_for_eoc(struct comedi_device *dev, + unsigned int timeout) +{ + struct mf6x4_private *devpriv = dev->private; + unsigned int eolc; + + while (timeout--) { + eolc = ioread32(devpriv->gpioc_R) & MF6X4_GPIOC_EOLC; + if (eolc) + return 0; + + udelay(1); + } + + return -ETIME; +} + +static int mf6x4_ai_insn_read(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, unsigned int *data) +{ + struct mf6x4_private *devpriv = dev->private; + int chan = CR_CHAN(insn->chanspec); + int ret; + int i; + int d; + + /* Set the ADC channel number in the scan list */ + iowrite16((1 << chan) & MF6X4_ADCTRL_M, + devpriv->bar1_mem + MF6X4_ADCTRL_R); + + for (i = 0; i < insn->n; i++) { + /* Trigger ADC conversion by reading ADSTART */ + ioread16(devpriv->bar1_mem + MF6X4_ADSTART_R); + + ret = mf6x4_ai_wait_for_eoc(dev, 100); + if (ret) + return ret; + + /* Read the actual value */ + d = ioread16(devpriv->bar1_mem + MF6X4_ADDATA_R); + d &= s->maxdata; + data[i] = d; + } + + iowrite16(0x0, devpriv->bar1_mem + MF6X4_ADCTRL_R); + + return insn->n; +} + +static int mf6x4_ao_insn_write(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, unsigned int *data) +{ + struct mf6x4_private *devpriv = dev->private; + unsigned int chan = CR_CHAN(insn->chanspec); + uint32_t gpioc; + int i; + + /* Enable instantaneous update of converters outputs + Enable DACs */ + gpioc = ioread32(devpriv->gpioc_R); + iowrite32((gpioc & ~MF6X4_GPIOC_LDAC) | MF6X4_GPIOC_DACEN, + devpriv->gpioc_R); + + for (i = 0; i < insn->n; i++) { + iowrite16(data[i] & MF6X4_DA_M, + devpriv->bar1_mem + MF6X4_DAC_R(chan)); + + devpriv->ao_readback[chan] = data[i]; + } + + return insn->n; +} + +static int mf6x4_ao_insn_read(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, unsigned int *data) +{ + struct mf6x4_private *devpriv = dev->private; + unsigned int chan = CR_CHAN(insn->chanspec); + int i; + + for (i = 0; i < insn->n; i++) + data[i] = devpriv->ao_readback[chan]; + + return insn->n; +} + +static int mf6x4_auto_attach(struct comedi_device *dev, unsigned long context) +{ + struct pci_dev *pcidev = comedi_to_pci_dev(dev); + const struct mf6x4_board *board = NULL; + struct mf6x4_private *devpriv; + struct comedi_subdevice *s; + int ret; + + if (context < ARRAY_SIZE(mf6x4_boards)) + board = &mf6x4_boards[context]; + else + return -ENODEV; + + dev->board_ptr = board; + dev->board_name = board->name; + + ret = comedi_pci_enable(dev); + if (ret) + return ret; + + devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); + if (!devpriv) + return -ENOMEM; + + devpriv->bar0_mem = pci_ioremap_bar(pcidev, board->bar_nums[0]); + if (!devpriv->bar0_mem) + return -ENODEV; + + devpriv->bar1_mem = pci_ioremap_bar(pcidev, board->bar_nums[1]); + if (!devpriv->bar1_mem) + return -ENODEV; + + devpriv->bar2_mem = pci_ioremap_bar(pcidev, board->bar_nums[2]); + if (!devpriv->bar2_mem) + return -ENODEV; + + if (board == &mf6x4_boards[BOARD_MF634]) + devpriv->gpioc_R = devpriv->bar2_mem + MF634_GPIOC_R; + else + devpriv->gpioc_R = devpriv->bar0_mem + MF624_GPIOC_R; + + + ret = comedi_alloc_subdevices(dev, 4); + if (ret) + return ret; + + /* ADC */ + s = &dev->subdevices[0]; + s->type = COMEDI_SUBD_AI; + s->subdev_flags = SDF_READABLE | SDF_GROUND; + s->n_chan = 8; + s->maxdata = 0x3fff; /* 14 bits ADC */ + s->range_table = &range_bipolar10; + s->insn_read = mf6x4_ai_insn_read; + + /* DAC */ + s = &dev->subdevices[1]; + s->type = COMEDI_SUBD_AO; + s->subdev_flags = SDF_WRITABLE; + s->n_chan = 8; + s->maxdata = 0x3fff; /* 14 bits DAC */ + s->range_table = &range_bipolar10; + s->insn_write = mf6x4_ao_insn_write; + s->insn_read = mf6x4_ao_insn_read; + + /* DIN */ + s = &dev->subdevices[2]; + s->type = COMEDI_SUBD_DI; + s->subdev_flags = SDF_READABLE; + s->n_chan = 8; + s->maxdata = 1; + s->range_table = &range_digital; + s->insn_bits = mf6x4_di_insn_bits; + + /* DOUT */ + s = &dev->subdevices[3]; + s->type = COMEDI_SUBD_DO; + s->subdev_flags = SDF_WRITABLE; + s->n_chan = 8; + s->maxdata = 1; + s->range_table = &range_digital; + s->insn_bits = mf6x4_do_insn_bits; + + return 0; +} + +static void mf6x4_detach(struct comedi_device *dev) +{ + struct mf6x4_private *devpriv = dev->private; + + if (devpriv->bar0_mem) + iounmap(devpriv->bar0_mem); + if (devpriv->bar1_mem) + iounmap(devpriv->bar1_mem); + if (devpriv->bar2_mem) + iounmap(devpriv->bar2_mem); + + comedi_pci_disable(dev); +} + +static struct comedi_driver mf6x4_driver = { + .driver_name = "mf6x4", + .module = THIS_MODULE, + .auto_attach = mf6x4_auto_attach, + .detach = mf6x4_detach, +}; + +static int mf6x4_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + return comedi_pci_auto_config(dev, &mf6x4_driver, id->driver_data); +} + +static const struct pci_device_id mf6x4_pci_table[] = { + { PCI_VDEVICE(HUMUSOFT, 0x0634), BOARD_MF634 }, + { PCI_VDEVICE(HUMUSOFT, 0x0624), BOARD_MF624 }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, mf6x4_pci_table); + +static struct pci_driver mf6x4_pci_driver = { + .name = "mf6x4", + .id_table = mf6x4_pci_table, + .probe = mf6x4_pci_probe, + .remove = comedi_pci_auto_unconfig, +}; + +module_comedi_pci_driver(mf6x4_driver, mf6x4_pci_driver); + +MODULE_AUTHOR("Rostislav Lisovy "); +MODULE_DESCRIPTION("Comedi MF634 and MF624 DAQ cards driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c index 35cb4ace79701d..9c9a0ee432cf45 100644 --- a/drivers/staging/comedi/drivers/mite.c +++ b/drivers/staging/comedi/drivers/mite.c @@ -288,7 +288,6 @@ void mite_dma_arm(struct mite_channel *mite_chan) int chor; unsigned long flags; - MDPRINTK("mite_dma_arm ch%i\n", mite_chan->channel); /* * memory barrier is intended to insure any twiddling with the buffer * is done before writing to the mite to arm dma transfer @@ -329,8 +328,6 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring, n_links = async->prealloc_bufsz >> PAGE_SHIFT; - MDPRINTK("ring->hw_dev=%p, n_links=0x%04x\n", ring->hw_dev, n_links); - ring->descriptors = dma_alloc_coherent(ring->hw_dev, n_links * sizeof(struct mite_dma_descriptor), @@ -345,7 +342,7 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring, for (i = 0; i < n_links; i++) { ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE); ring->descriptors[i].addr = - cpu_to_le32(async->buf_page_list[i].dma_addr); + cpu_to_le32(async->buf_map->page_list[i].dma_addr); ring->descriptors[i].next = cpu_to_le32(ring->descriptors_dma_addr + (i + 1) * @@ -368,8 +365,6 @@ void mite_prep_dma(struct mite_channel *mite_chan, unsigned int chor, chcr, mcr, dcr, lkcr; struct mite_struct *mite = mite_chan->mite; - MDPRINTK("mite_prep_dma ch%i\n", mite_chan->channel); - /* reset DMA and FIFO */ chor = CHOR_DMARESET | CHOR_FRESET; writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); @@ -448,8 +443,6 @@ void mite_prep_dma(struct mite_channel *mite_chan, /* starting address for link chaining */ writel(mite_chan->ring->descriptors_dma_addr, mite->mite_io_addr + MITE_LKAR(mite_chan->channel)); - - MDPRINTK("exit mite_prep_dma\n"); } EXPORT_SYMBOL_GPL(mite_prep_dma); @@ -515,8 +508,6 @@ unsigned mite_dma_tcr(struct mite_channel *mite_chan) lkar = readl(mite->mite_io_addr + MITE_LKAR(mite_chan->channel)); tcr = readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel)); - MDPRINTK("mite_dma_tcr ch%i, lkar=0x%08x tcr=%d\n", mite_chan->channel, - lkar, tcr); return tcr; } @@ -642,140 +633,6 @@ int mite_done(struct mite_channel *mite_chan) } EXPORT_SYMBOL_GPL(mite_done); -#ifdef DEBUG_MITE - -/* names of bits in mite registers */ - -static const char *const mite_CHOR_strings[] = { - "start", "cont", "stop", "abort", - "freset", "clrlc", "clrrb", "clrdone", - "clr_lpause", "set_lpause", "clr_send_tc", - "set_send_tc", "12", "13", "14", - "15", "16", "17", "18", - "19", "20", "21", "22", - "23", "24", "25", "26", - "27", "28", "29", "30", - "dmareset", -}; - -static const char *const mite_CHCR_strings[] = { - "continue", "ringbuff", "2", "3", - "4", "5", "6", "7", - "8", "9", "10", "11", - "12", "13", "bursten", "fifodis", - "clr_cont_rb_ie", "set_cont_rb_ie", "clr_lc_ie", "set_lc_ie", - "clr_drdy_ie", "set_drdy_ie", "clr_mrdy_ie", "set_mrdy_ie", - "clr_done_ie", "set_done_ie", "clr_sar_ie", "set_sar_ie", - "clr_linkp_ie", "set_linkp_ie", "clr_dma_ie", "set_dma_ie", -}; - -static const char *const mite_MCR_strings[] = { - "amdevice", "1", "2", "3", - "4", "5", "portio", "portvxi", - "psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "11", - "12", "13", "blocken", "berhand", - "reqsintlim/reqs0", "reqs1", "reqs2", "rd32", - "rd512", "rl1", "rl2", "rl8", - "24", "25", "26", "27", - "28", "29", "30", "stopen", -}; - -static const char *const mite_DCR_strings[] = { - "amdevice", "1", "2", "3", - "4", "5", "portio", "portvxi", - "psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "aseqxp2", - "aseqxp8", "13", "blocken", "berhand", - "reqsintlim", "reqs1", "reqs2", "rd32", - "rd512", "rl1", "rl2", "rl8", - "23", "24", "25", "27", - "28", "wsdevc", "wsdevs", "rwdevpack", -}; - -static const char *const mite_LKCR_strings[] = { - "amdevice", "1", "2", "3", - "4", "5", "portio", "portvxi", - "psizebyte", "psizehalf (byte & half = word)", "asequp", "aseqdown", - "12", "13", "14", "berhand", - "16", "17", "18", "rd32", - "rd512", "rl1", "rl2", "rl8", - "24", "25", "26", "27", - "28", "29", "30", "chngend", -}; - -static const char *const mite_CHSR_strings[] = { - "d.err0", "d.err1", "m.err0", "m.err1", - "l.err0", "l.err1", "drq0", "drq1", - "end", "xferr", "operr0", "operr1", - "stops", "habort", "sabort", "error", - "16", "conts_rb", "18", "linkc", - "20", "drdy", "22", "mrdy", - "24", "done", "26", "sars", - "28", "lpauses", "30", "int", -}; - -static void mite_decode(const char *const *bit_str, unsigned int bits) -{ - int i; - - for (i = 31; i >= 0; i--) { - if (bits & (1 << i)) - pr_debug(" %s\n", bit_str[i]); - } -} - -void mite_dump_regs(struct mite_channel *mite_chan) -{ - void __iomem *mite_io_addr = mite_chan->mite->mite_io_addr; - unsigned int offset; - unsigned int value; - int channel = mite_chan->channel; - - pr_debug("mite_dump_regs ch%i\n", channel); - pr_debug("mite address is =%p\n", mite_io_addr); - - offset = MITE_CHOR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[CHOR] at 0x%08x =0x%08x\n", offset, value); - mite_decode(mite_CHOR_strings, value); - offset = MITE_CHCR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[CHCR] at 0x%08x =0x%08x\n", offset, value); - mite_decode(mite_CHCR_strings, value); - offset = MITE_TCR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[TCR] at 0x%08x =0x%08x\n", offset, value); - offset = MITE_MCR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[MCR] at 0x%08x =0x%08x\n", offset, value); - mite_decode(mite_MCR_strings, value); - offset = MITE_MAR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[MAR] at 0x%08x =0x%08x\n", offset, value); - offset = MITE_DCR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[DCR] at 0x%08x =0x%08x\n", offset, value); - mite_decode(mite_DCR_strings, value); - offset = MITE_DAR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[DAR] at 0x%08x =0x%08x\n", offset, value); - offset = MITE_LKCR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[LKCR] at 0x%08x =0x%08x\n", offset, value); - mite_decode(mite_LKCR_strings, value); - offset = MITE_LKAR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[LKAR] at 0x%08x =0x%08x\n", offset, value); - offset = MITE_CHSR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[CHSR] at 0x%08x =0x%08x\n", offset, value); - mite_decode(mite_CHSR_strings, value); - offset = MITE_FCR(channel); - value = readl(mite_io_addr + offset); - pr_debug("mite status[FCR] at 0x%08x =0x%08x\n", offset, value); -} -EXPORT_SYMBOL_GPL(mite_dump_regs); -#endif - static int __init mite_module_init(void) { return 0; diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h index 8423b8bf33845f..bcf2f972376e78 100644 --- a/drivers/staging/comedi/drivers/mite.h +++ b/drivers/staging/comedi/drivers/mite.h @@ -24,15 +24,8 @@ #include #include "../comedidev.h" -/* #define DEBUG_MITE */ #define PCIMIO_COMPAT -#ifdef DEBUG_MITE -#define MDPRINTK(format, args...) pr_debug(format , ## args) -#else -#define MDPRINTK(format, args...) do { } while (0) -#endif - #define MAX_MITE_DMA_CHANNELS 8 struct mite_dma_descriptor { @@ -129,11 +122,6 @@ void mite_prep_dma(struct mite_channel *mite_chan, int mite_buf_change(struct mite_dma_descriptor_ring *ring, struct comedi_async *async); -#ifdef DEBUG_MITE -void mite_print_chsr(unsigned int chsr); -void mite_dump_regs(struct mite_channel *mite_chan); -#endif - static inline int CHAN_OFFSET(int channel) { return 0x500 + 0x100 * channel; diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c index acbaeee6250c8c..fe4621ea65c356 100644 --- a/drivers/staging/comedi/drivers/mpc624.c +++ b/drivers/staging/comedi/drivers/mpc624.c @@ -159,11 +159,6 @@ static int mpc624_ai_rinsn(struct comedi_device *dev, * We always write 0 to GNSWA bit, so the channel range is +-/10.1Vdc */ outb(insn->chanspec, dev->iobase + MPC624_GNMUXCH); -/* printk("Channel %d:\n", insn->chanspec); */ - if (!insn->n) { - printk(KERN_INFO "MPC624: Warning, no data to acquire\n"); - return 0; - } for (n = 0; n < insn->n; n++) { /* Trigger the conversion */ @@ -182,11 +177,9 @@ static int mpc624_ai_rinsn(struct comedi_device *dev, else break; } - if (i == TIMEOUT) { - printk(KERN_ERR "MPC624: timeout (%dms)\n", TIMEOUT); - data[n] = 0; + if (i == TIMEOUT) return -ETIMEDOUT; - } + /* Start reading data */ data_in = 0; data_out = devpriv->ulConvertionRate; @@ -245,11 +238,11 @@ static int mpc624_ai_rinsn(struct comedi_device *dev, */ if (data_in & MPC624_EOC_BIT) - printk(KERN_INFO "MPC624:EOC bit is set (data_in=%lu)!", - data_in); + dev_dbg(dev->class_dev, + "EOC bit is set (data_in=%lu)!", data_in); if (data_in & MPC624_DMY_BIT) - printk(KERN_INFO "MPC624:DMY bit is set (data_in=%lu)!", - data_in); + dev_dbg(dev->class_dev, + "DMY bit is set (data_in=%lu)!", data_in); if (data_in & MPC624_SGN_BIT) { /* Volatge is positive */ /* * comedi operates on unsigned numbers, so mask off EOC diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 85aa9609d6a280..860fc81fb11c62 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -455,7 +455,7 @@ static int ni6527_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &ni6527_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(ni6527_pci_table) = { +static const struct pci_device_id ni6527_pci_table[] = { { PCI_VDEVICE(NI, 0x2b10), BOARD_PXI6527 }, { PCI_VDEVICE(NI, 0x2b20), BOARD_PCI6527 }, { 0 } diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c index 853f62b2b1a982..6e42001f686e39 100644 --- a/drivers/staging/comedi/drivers/ni_65xx.c +++ b/drivers/staging/comedi/drivers/ni_65xx.c @@ -43,9 +43,6 @@ except maybe the 6514. */ -#define DEBUG 1 -#define DEBUG_FLAGS - #include #include #include @@ -430,7 +427,7 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct ni_65xx_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[2]; + struct comedi_subdevice *s = dev->read_subdev; unsigned int status; status = readb(devpriv->mite->daq_io_addr + Change_Status); @@ -741,7 +738,7 @@ static int ni_65xx_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &ni_65xx_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(ni_65xx_pci_table) = { +static const struct pci_device_id ni_65xx_pci_table[] = { { PCI_VDEVICE(NI, 0x1710), BOARD_PXI6509 }, { PCI_VDEVICE(NI, 0x7085), BOARD_PCI6509 }, { PCI_VDEVICE(NI, 0x7086), BOARD_PXI6528 }, diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c index 8a991dcab24a9b..df42e390617186 100644 --- a/drivers/staging/comedi/drivers/ni_660x.c +++ b/drivers/staging/comedi/drivers/ni_660x.c @@ -55,112 +55,112 @@ for 4 */ #define MAX_DMA_CHANNEL 4 /* See Register-Level Programmer Manual page 3.1 */ -enum NI_660x_Register { - G0InterruptAcknowledge, - G0StatusRegister, - G1InterruptAcknowledge, - G1StatusRegister, - G01StatusRegister, - G0CommandRegister, - STCDIOParallelInput, - G1CommandRegister, - G0HWSaveRegister, - G1HWSaveRegister, - STCDIOOutput, - STCDIOControl, - G0SWSaveRegister, - G1SWSaveRegister, - G0ModeRegister, - G01JointStatus1Register, - G1ModeRegister, - STCDIOSerialInput, - G0LoadARegister, - G01JointStatus2Register, - G0LoadBRegister, - G1LoadARegister, - G1LoadBRegister, - G0InputSelectRegister, - G1InputSelectRegister, - G0AutoincrementRegister, - G1AutoincrementRegister, - G01JointResetRegister, - G0InterruptEnable, - G1InterruptEnable, - G0CountingModeRegister, - G1CountingModeRegister, - G0SecondGateRegister, - G1SecondGateRegister, - G0DMAConfigRegister, - G0DMAStatusRegister, - G1DMAConfigRegister, - G1DMAStatusRegister, - G2InterruptAcknowledge, - G2StatusRegister, - G3InterruptAcknowledge, - G3StatusRegister, - G23StatusRegister, - G2CommandRegister, - G3CommandRegister, - G2HWSaveRegister, - G3HWSaveRegister, - G2SWSaveRegister, - G3SWSaveRegister, - G2ModeRegister, - G23JointStatus1Register, - G3ModeRegister, - G2LoadARegister, - G23JointStatus2Register, - G2LoadBRegister, - G3LoadARegister, - G3LoadBRegister, - G2InputSelectRegister, - G3InputSelectRegister, - G2AutoincrementRegister, - G3AutoincrementRegister, - G23JointResetRegister, - G2InterruptEnable, - G3InterruptEnable, - G2CountingModeRegister, - G3CountingModeRegister, - G3SecondGateRegister, - G2SecondGateRegister, - G2DMAConfigRegister, - G2DMAStatusRegister, - G3DMAConfigRegister, - G3DMAStatusRegister, - DIO32Input, - DIO32Output, - ClockConfigRegister, - GlobalInterruptStatusRegister, - DMAConfigRegister, - GlobalInterruptConfigRegister, - IOConfigReg0_1, - IOConfigReg2_3, - IOConfigReg4_5, - IOConfigReg6_7, - IOConfigReg8_9, - IOConfigReg10_11, - IOConfigReg12_13, - IOConfigReg14_15, - IOConfigReg16_17, - IOConfigReg18_19, - IOConfigReg20_21, - IOConfigReg22_23, - IOConfigReg24_25, - IOConfigReg26_27, - IOConfigReg28_29, - IOConfigReg30_31, - IOConfigReg32_33, - IOConfigReg34_35, - IOConfigReg36_37, - IOConfigReg38_39, - NumRegisters, +enum ni_660x_register { + NI660X_G0_INT_ACK, + NI660X_G0_STATUS, + NI660X_G1_INT_ACK, + NI660X_G1_STATUS, + NI660X_G01_STATUS, + NI660X_G0_CMD, + NI660X_STC_DIO_PARALLEL_INPUT, + NI660X_G1_CMD, + NI660X_G0_HW_SAVE, + NI660X_G1_HW_SAVE, + NI660X_STC_DIO_OUTPUT, + NI660X_STC_DIO_CONTROL, + NI660X_G0_SW_SAVE, + NI660X_G1_SW_SAVE, + NI660X_G0_MODE, + NI660X_G01_STATUS1, + NI660X_G1_MODE, + NI660X_STC_DIO_SERIAL_INPUT, + NI660X_G0_LOADA, + NI660X_G01_STATUS2, + NI660X_G0_LOADB, + NI660X_G1_LOADA, + NI660X_G1_LOADB, + NI660X_G0_INPUT_SEL, + NI660X_G1_INPUT_SEL, + NI660X_G0_AUTO_INC, + NI660X_G1_AUTO_INC, + NI660X_G01_RESET, + NI660X_G0_INT_ENA, + NI660X_G1_INT_ENA, + NI660X_G0_CNT_MODE, + NI660X_G1_CNT_MODE, + NI660X_G0_GATE2, + NI660X_G1_GATE2, + NI660X_G0_DMA_CFG, + NI660X_G0_DMA_STATUS, + NI660X_G1_DMA_CFG, + NI660X_G1_DMA_STATUS, + NI660X_G2_INT_ACK, + NI660X_G2_STATUS, + NI660X_G3_INT_ACK, + NI660X_G3_STATUS, + NI660X_G23_STATUS, + NI660X_G2_CMD, + NI660X_G3_CMD, + NI660X_G2_HW_SAVE, + NI660X_G3_HW_SAVE, + NI660X_G2_SW_SAVE, + NI660X_G3_SW_SAVE, + NI660X_G2_MODE, + NI660X_G23_STATUS1, + NI660X_G3_MODE, + NI660X_G2_LOADA, + NI660X_G23_STATUS2, + NI660X_G2_LOADB, + NI660X_G3_LOADA, + NI660X_G3_LOADB, + NI660X_G2_INPUT_SEL, + NI660X_G3_INPUT_SEL, + NI660X_G2_AUTO_INC, + NI660X_G3_AUTO_INC, + NI660X_G23_RESET, + NI660X_G2_INT_ENA, + NI660X_G3_INT_ENA, + NI660X_G2_CNT_MODE, + NI660X_G3_CNT_MODE, + NI660X_G3_GATE2, + NI660X_G2_GATE2, + NI660X_G2_DMA_CFG, + NI660X_G2_DMA_STATUS, + NI660X_G3_DMA_CFG, + NI660X_G3_DMA_STATUS, + NI660X_DIO32_INPUT, + NI660X_DIO32_OUTPUT, + NI660X_CLK_CFG, + NI660X_GLOBAL_INT_STATUS, + NI660X_DMA_CFG, + NI660X_GLOBAL_INT_CFG, + NI660X_IO_CFG_0_1, + NI660X_IO_CFG_2_3, + NI660X_IO_CFG_4_5, + NI660X_IO_CFG_6_7, + NI660X_IO_CFG_8_9, + NI660X_IO_CFG_10_11, + NI660X_IO_CFG_12_13, + NI660X_IO_CFG_14_15, + NI660X_IO_CFG_16_17, + NI660X_IO_CFG_18_19, + NI660X_IO_CFG_20_21, + NI660X_IO_CFG_22_23, + NI660X_IO_CFG_24_25, + NI660X_IO_CFG_26_27, + NI660X_IO_CFG_28_29, + NI660X_IO_CFG_30_31, + NI660X_IO_CFG_32_33, + NI660X_IO_CFG_34_35, + NI660X_IO_CFG_36_37, + NI660X_IO_CFG_38_39, + NI660X_NUM_REGS, }; static inline unsigned IOConfigReg(unsigned pfi_channel) { - unsigned reg = IOConfigReg0_1 + pfi_channel / 2; - BUG_ON(reg > IOConfigReg38_39); + unsigned reg = NI660X_IO_CFG_0_1 + pfi_channel / 2; + BUG_ON(reg > NI660X_IO_CFG_38_39); return reg; } @@ -200,7 +200,7 @@ struct NI_660xRegisterData { enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */ }; -static const struct NI_660xRegisterData registerData[NumRegisters] = { +static const struct NI_660xRegisterData registerData[NI660X_NUM_REGS] = { {"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B}, {"G0 Status Register", 0x004, NI_660x_READ, DATA_2B}, {"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B}, @@ -347,11 +347,6 @@ static inline unsigned dma_select_mask(unsigned dma_channel) enum dma_selection { dma_selection_none = 0x1f, }; -static inline unsigned dma_selection_counter(unsigned counter_index) -{ - BUG_ON(counter_index >= counters_per_chip); - return counter_index; -} static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection) { @@ -444,229 +439,158 @@ static inline unsigned ni_660x_num_counters(struct comedi_device *dev) return board->n_chips * counters_per_chip; } -static enum NI_660x_Register ni_gpct_to_660x_register(enum ni_gpct_register reg) +static enum ni_660x_register ni_gpct_to_660x_register(enum ni_gpct_register reg) { - enum NI_660x_Register ni_660x_register; switch (reg) { - case NITIO_G0_Autoincrement_Reg: - ni_660x_register = G0AutoincrementRegister; - break; - case NITIO_G1_Autoincrement_Reg: - ni_660x_register = G1AutoincrementRegister; - break; - case NITIO_G2_Autoincrement_Reg: - ni_660x_register = G2AutoincrementRegister; - break; - case NITIO_G3_Autoincrement_Reg: - ni_660x_register = G3AutoincrementRegister; - break; - case NITIO_G0_Command_Reg: - ni_660x_register = G0CommandRegister; - break; - case NITIO_G1_Command_Reg: - ni_660x_register = G1CommandRegister; - break; - case NITIO_G2_Command_Reg: - ni_660x_register = G2CommandRegister; - break; - case NITIO_G3_Command_Reg: - ni_660x_register = G3CommandRegister; - break; - case NITIO_G0_HW_Save_Reg: - ni_660x_register = G0HWSaveRegister; - break; - case NITIO_G1_HW_Save_Reg: - ni_660x_register = G1HWSaveRegister; - break; - case NITIO_G2_HW_Save_Reg: - ni_660x_register = G2HWSaveRegister; - break; - case NITIO_G3_HW_Save_Reg: - ni_660x_register = G3HWSaveRegister; - break; - case NITIO_G0_SW_Save_Reg: - ni_660x_register = G0SWSaveRegister; - break; - case NITIO_G1_SW_Save_Reg: - ni_660x_register = G1SWSaveRegister; - break; - case NITIO_G2_SW_Save_Reg: - ni_660x_register = G2SWSaveRegister; - break; - case NITIO_G3_SW_Save_Reg: - ni_660x_register = G3SWSaveRegister; - break; - case NITIO_G0_Mode_Reg: - ni_660x_register = G0ModeRegister; - break; - case NITIO_G1_Mode_Reg: - ni_660x_register = G1ModeRegister; - break; - case NITIO_G2_Mode_Reg: - ni_660x_register = G2ModeRegister; - break; - case NITIO_G3_Mode_Reg: - ni_660x_register = G3ModeRegister; - break; - case NITIO_G0_LoadA_Reg: - ni_660x_register = G0LoadARegister; - break; - case NITIO_G1_LoadA_Reg: - ni_660x_register = G1LoadARegister; - break; - case NITIO_G2_LoadA_Reg: - ni_660x_register = G2LoadARegister; - break; - case NITIO_G3_LoadA_Reg: - ni_660x_register = G3LoadARegister; - break; - case NITIO_G0_LoadB_Reg: - ni_660x_register = G0LoadBRegister; - break; - case NITIO_G1_LoadB_Reg: - ni_660x_register = G1LoadBRegister; - break; - case NITIO_G2_LoadB_Reg: - ni_660x_register = G2LoadBRegister; - break; - case NITIO_G3_LoadB_Reg: - ni_660x_register = G3LoadBRegister; - break; - case NITIO_G0_Input_Select_Reg: - ni_660x_register = G0InputSelectRegister; - break; - case NITIO_G1_Input_Select_Reg: - ni_660x_register = G1InputSelectRegister; - break; - case NITIO_G2_Input_Select_Reg: - ni_660x_register = G2InputSelectRegister; - break; - case NITIO_G3_Input_Select_Reg: - ni_660x_register = G3InputSelectRegister; - break; - case NITIO_G01_Status_Reg: - ni_660x_register = G01StatusRegister; - break; - case NITIO_G23_Status_Reg: - ni_660x_register = G23StatusRegister; - break; - case NITIO_G01_Joint_Reset_Reg: - ni_660x_register = G01JointResetRegister; - break; - case NITIO_G23_Joint_Reset_Reg: - ni_660x_register = G23JointResetRegister; - break; - case NITIO_G01_Joint_Status1_Reg: - ni_660x_register = G01JointStatus1Register; - break; - case NITIO_G23_Joint_Status1_Reg: - ni_660x_register = G23JointStatus1Register; - break; - case NITIO_G01_Joint_Status2_Reg: - ni_660x_register = G01JointStatus2Register; - break; - case NITIO_G23_Joint_Status2_Reg: - ni_660x_register = G23JointStatus2Register; - break; - case NITIO_G0_Counting_Mode_Reg: - ni_660x_register = G0CountingModeRegister; - break; - case NITIO_G1_Counting_Mode_Reg: - ni_660x_register = G1CountingModeRegister; - break; - case NITIO_G2_Counting_Mode_Reg: - ni_660x_register = G2CountingModeRegister; - break; - case NITIO_G3_Counting_Mode_Reg: - ni_660x_register = G3CountingModeRegister; - break; - case NITIO_G0_Second_Gate_Reg: - ni_660x_register = G0SecondGateRegister; - break; - case NITIO_G1_Second_Gate_Reg: - ni_660x_register = G1SecondGateRegister; - break; - case NITIO_G2_Second_Gate_Reg: - ni_660x_register = G2SecondGateRegister; - break; - case NITIO_G3_Second_Gate_Reg: - ni_660x_register = G3SecondGateRegister; - break; - case NITIO_G0_DMA_Config_Reg: - ni_660x_register = G0DMAConfigRegister; - break; - case NITIO_G0_DMA_Status_Reg: - ni_660x_register = G0DMAStatusRegister; - break; - case NITIO_G1_DMA_Config_Reg: - ni_660x_register = G1DMAConfigRegister; - break; - case NITIO_G1_DMA_Status_Reg: - ni_660x_register = G1DMAStatusRegister; - break; - case NITIO_G2_DMA_Config_Reg: - ni_660x_register = G2DMAConfigRegister; - break; - case NITIO_G2_DMA_Status_Reg: - ni_660x_register = G2DMAStatusRegister; - break; - case NITIO_G3_DMA_Config_Reg: - ni_660x_register = G3DMAConfigRegister; - break; - case NITIO_G3_DMA_Status_Reg: - ni_660x_register = G3DMAStatusRegister; - break; - case NITIO_G0_Interrupt_Acknowledge_Reg: - ni_660x_register = G0InterruptAcknowledge; - break; - case NITIO_G1_Interrupt_Acknowledge_Reg: - ni_660x_register = G1InterruptAcknowledge; - break; - case NITIO_G2_Interrupt_Acknowledge_Reg: - ni_660x_register = G2InterruptAcknowledge; - break; - case NITIO_G3_Interrupt_Acknowledge_Reg: - ni_660x_register = G3InterruptAcknowledge; - break; - case NITIO_G0_Status_Reg: - ni_660x_register = G0StatusRegister; - break; - case NITIO_G1_Status_Reg: - ni_660x_register = G1StatusRegister; - break; - case NITIO_G2_Status_Reg: - ni_660x_register = G2StatusRegister; - break; - case NITIO_G3_Status_Reg: - ni_660x_register = G3StatusRegister; - break; - case NITIO_G0_Interrupt_Enable_Reg: - ni_660x_register = G0InterruptEnable; - break; - case NITIO_G1_Interrupt_Enable_Reg: - ni_660x_register = G1InterruptEnable; - break; - case NITIO_G2_Interrupt_Enable_Reg: - ni_660x_register = G2InterruptEnable; - break; - case NITIO_G3_Interrupt_Enable_Reg: - ni_660x_register = G3InterruptEnable; - break; + case NITIO_G0_AUTO_INC: + return NI660X_G0_AUTO_INC; + case NITIO_G1_AUTO_INC: + return NI660X_G1_AUTO_INC; + case NITIO_G2_AUTO_INC: + return NI660X_G2_AUTO_INC; + case NITIO_G3_AUTO_INC: + return NI660X_G3_AUTO_INC; + case NITIO_G0_CMD: + return NI660X_G0_CMD; + case NITIO_G1_CMD: + return NI660X_G1_CMD; + case NITIO_G2_CMD: + return NI660X_G2_CMD; + case NITIO_G3_CMD: + return NI660X_G3_CMD; + case NITIO_G0_HW_SAVE: + return NI660X_G0_HW_SAVE; + case NITIO_G1_HW_SAVE: + return NI660X_G1_HW_SAVE; + case NITIO_G2_HW_SAVE: + return NI660X_G2_HW_SAVE; + case NITIO_G3_HW_SAVE: + return NI660X_G3_HW_SAVE; + case NITIO_G0_SW_SAVE: + return NI660X_G0_SW_SAVE; + case NITIO_G1_SW_SAVE: + return NI660X_G1_SW_SAVE; + case NITIO_G2_SW_SAVE: + return NI660X_G2_SW_SAVE; + case NITIO_G3_SW_SAVE: + return NI660X_G3_SW_SAVE; + case NITIO_G0_MODE: + return NI660X_G0_MODE; + case NITIO_G1_MODE: + return NI660X_G1_MODE; + case NITIO_G2_MODE: + return NI660X_G2_MODE; + case NITIO_G3_MODE: + return NI660X_G3_MODE; + case NITIO_G0_LOADA: + return NI660X_G0_LOADA; + case NITIO_G1_LOADA: + return NI660X_G1_LOADA; + case NITIO_G2_LOADA: + return NI660X_G2_LOADA; + case NITIO_G3_LOADA: + return NI660X_G3_LOADA; + case NITIO_G0_LOADB: + return NI660X_G0_LOADB; + case NITIO_G1_LOADB: + return NI660X_G1_LOADB; + case NITIO_G2_LOADB: + return NI660X_G2_LOADB; + case NITIO_G3_LOADB: + return NI660X_G3_LOADB; + case NITIO_G0_INPUT_SEL: + return NI660X_G0_INPUT_SEL; + case NITIO_G1_INPUT_SEL: + return NI660X_G1_INPUT_SEL; + case NITIO_G2_INPUT_SEL: + return NI660X_G2_INPUT_SEL; + case NITIO_G3_INPUT_SEL: + return NI660X_G3_INPUT_SEL; + case NITIO_G01_STATUS: + return NI660X_G01_STATUS; + case NITIO_G23_STATUS: + return NI660X_G23_STATUS; + case NITIO_G01_RESET: + return NI660X_G01_RESET; + case NITIO_G23_RESET: + return NI660X_G23_RESET; + case NITIO_G01_STATUS1: + return NI660X_G01_STATUS1; + case NITIO_G23_STATUS1: + return NI660X_G23_STATUS1; + case NITIO_G01_STATUS2: + return NI660X_G01_STATUS2; + case NITIO_G23_STATUS2: + return NI660X_G23_STATUS2; + case NITIO_G0_CNT_MODE: + return NI660X_G0_CNT_MODE; + case NITIO_G1_CNT_MODE: + return NI660X_G1_CNT_MODE; + case NITIO_G2_CNT_MODE: + return NI660X_G2_CNT_MODE; + case NITIO_G3_CNT_MODE: + return NI660X_G3_CNT_MODE; + case NITIO_G0_GATE2: + return NI660X_G0_GATE2; + case NITIO_G1_GATE2: + return NI660X_G1_GATE2; + case NITIO_G2_GATE2: + return NI660X_G2_GATE2; + case NITIO_G3_GATE2: + return NI660X_G3_GATE2; + case NITIO_G0_DMA_CFG: + return NI660X_G0_DMA_CFG; + case NITIO_G0_DMA_STATUS: + return NI660X_G0_DMA_STATUS; + case NITIO_G1_DMA_CFG: + return NI660X_G1_DMA_CFG; + case NITIO_G1_DMA_STATUS: + return NI660X_G1_DMA_STATUS; + case NITIO_G2_DMA_CFG: + return NI660X_G2_DMA_CFG; + case NITIO_G2_DMA_STATUS: + return NI660X_G2_DMA_STATUS; + case NITIO_G3_DMA_CFG: + return NI660X_G3_DMA_CFG; + case NITIO_G3_DMA_STATUS: + return NI660X_G3_DMA_STATUS; + case NITIO_G0_INT_ACK: + return NI660X_G0_INT_ACK; + case NITIO_G1_INT_ACK: + return NI660X_G1_INT_ACK; + case NITIO_G2_INT_ACK: + return NI660X_G2_INT_ACK; + case NITIO_G3_INT_ACK: + return NI660X_G3_INT_ACK; + case NITIO_G0_STATUS: + return NI660X_G0_STATUS; + case NITIO_G1_STATUS: + return NI660X_G1_STATUS; + case NITIO_G2_STATUS: + return NI660X_G2_STATUS; + case NITIO_G3_STATUS: + return NI660X_G3_STATUS; + case NITIO_G0_INT_ENA: + return NI660X_G0_INT_ENA; + case NITIO_G1_INT_ENA: + return NI660X_G1_INT_ENA; + case NITIO_G2_INT_ENA: + return NI660X_G2_INT_ENA; + case NITIO_G3_INT_ENA: + return NI660X_G3_INT_ENA; default: BUG(); return 0; - break; } - return ni_660x_register; } static inline void ni_660x_write_register(struct comedi_device *dev, - unsigned chip_index, unsigned bits, - enum NI_660x_Register reg) + unsigned chip, unsigned bits, + enum ni_660x_register reg) { struct ni_660x_private *devpriv = dev->private; void __iomem *write_address = - devpriv->mite->daq_io_addr + GPCT_OFFSET[chip_index] + + devpriv->mite->daq_io_addr + GPCT_OFFSET[chip] + registerData[reg].offset; switch (registerData[reg].size) { @@ -683,12 +607,12 @@ static inline void ni_660x_write_register(struct comedi_device *dev, } static inline unsigned ni_660x_read_register(struct comedi_device *dev, - unsigned chip_index, - enum NI_660x_Register reg) + unsigned chip, + enum ni_660x_register reg) { struct ni_660x_private *devpriv = dev->private; void __iomem *read_address = - devpriv->mite->daq_io_addr + GPCT_OFFSET[chip_index] + + devpriv->mite->daq_io_addr + GPCT_OFFSET[chip] + registerData[reg].offset; switch (registerData[reg].size) { @@ -709,18 +633,20 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits, enum ni_gpct_register reg) { struct comedi_device *dev = counter->counter_dev->dev; - enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg); - ni_660x_write_register(dev, counter->chip_index, bits, - ni_660x_register); + enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg); + unsigned chip = counter->chip_index; + + ni_660x_write_register(dev, chip, bits, ni_660x_register); } static unsigned ni_gpct_read_register(struct ni_gpct *counter, enum ni_gpct_register reg) { struct comedi_device *dev = counter->counter_dev->dev; - enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg); - return ni_660x_read_register(dev, counter->chip_index, - ni_660x_register); + enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg); + unsigned chip = counter->chip_index; + + return ni_660x_read_register(dev, chip, ni_660x_register); } static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private @@ -728,7 +654,9 @@ static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private struct ni_gpct *counter) { - return priv->mite_rings[counter->chip_index][counter->counter_index]; + unsigned chip = counter->chip_index; + + return priv->mite_rings[chip][counter->counter_index]; } static inline void ni_660x_set_dma_channel(struct comedi_device *dev, @@ -736,18 +664,17 @@ static inline void ni_660x_set_dma_channel(struct comedi_device *dev, struct ni_gpct *counter) { struct ni_660x_private *devpriv = dev->private; + unsigned chip = counter->chip_index; unsigned long flags; spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags); - devpriv->dma_configuration_soft_copies[counter->chip_index] &= - ~dma_select_mask(mite_channel); - devpriv->dma_configuration_soft_copies[counter->chip_index] |= - dma_select_bits(mite_channel, - dma_selection_counter(counter->counter_index)); - ni_660x_write_register(dev, counter->chip_index, - devpriv->dma_configuration_soft_copies - [counter->chip_index] | - dma_reset_bit(mite_channel), DMAConfigRegister); + devpriv->dma_configuration_soft_copies[chip] &= + ~dma_select_mask(mite_channel); + devpriv->dma_configuration_soft_copies[chip] |= + dma_select_bits(mite_channel, counter->counter_index); + ni_660x_write_register(dev, chip, + devpriv->dma_configuration_soft_copies[chip] | + dma_reset_bit(mite_channel), NI660X_DMA_CFG); mmiowb(); spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags); } @@ -757,16 +684,17 @@ static inline void ni_660x_unset_dma_channel(struct comedi_device *dev, struct ni_gpct *counter) { struct ni_660x_private *devpriv = dev->private; + unsigned chip = counter->chip_index; unsigned long flags; spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags); - devpriv->dma_configuration_soft_copies[counter->chip_index] &= + devpriv->dma_configuration_soft_copies[chip] &= ~dma_select_mask(mite_channel); - devpriv->dma_configuration_soft_copies[counter->chip_index] |= + devpriv->dma_configuration_soft_copies[chip] |= dma_select_bits(mite_channel, dma_selection_none); - ni_660x_write_register(dev, counter->chip_index, - devpriv->dma_configuration_soft_copies - [counter->chip_index], DMAConfigRegister); + ni_660x_write_register(dev, chip, + devpriv->dma_configuration_soft_copies[chip], + NI660X_DMA_CFG); mmiowb(); spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags); } @@ -815,11 +743,9 @@ static void ni_660x_release_mite_channel(struct comedi_device *dev, static int ni_660x_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { + struct ni_gpct *counter = s->private; int retval; - struct ni_gpct *counter = subdev_to_counter(s); -/* const struct comedi_cmd *cmd = &s->async->cmd; */ - retval = ni_660x_request_mite_channel(dev, counter, COMEDI_INPUT); if (retval) { comedi_error(dev, @@ -827,22 +753,13 @@ static int ni_660x_cmd(struct comedi_device *dev, struct comedi_subdevice *s) return retval; } ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); - retval = ni_tio_cmd(counter, s->async); - - return retval; -} - -static int ni_660x_cmdtest(struct comedi_device *dev, - struct comedi_subdevice *s, struct comedi_cmd *cmd) -{ - struct ni_gpct *counter = subdev_to_counter(s); - return ni_tio_cmdtest(counter, cmd); + return ni_tio_cmd(dev, s); } static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { - struct ni_gpct *counter = subdev_to_counter(s); + struct ni_gpct *counter = s->private; int retval; retval = ni_tio_cancel(counter); @@ -850,23 +767,28 @@ static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s) return retval; } -static void set_tio_counterswap(struct comedi_device *dev, int chipset) +static void set_tio_counterswap(struct comedi_device *dev, int chip) { - /* See P. 3.5 of the Register-Level Programming manual. The - CounterSwap bit has to be set on the second chip, otherwise - it will try to use the same pins as the first chip. + unsigned bits = 0; + + /* + * See P. 3.5 of the Register-Level Programming manual. + * The CounterSwap bit has to be set on the second chip, + * otherwise it will try to use the same pins as the + * first chip. */ - if (chipset) - ni_660x_write_register(dev, chipset, CounterSwap, - ClockConfigRegister); - else - ni_660x_write_register(dev, chipset, 0, ClockConfigRegister); + if (chip) + bits = CounterSwap; + + ni_660x_write_register(dev, chip, bits, NI660X_CLK_CFG); } static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev, struct comedi_subdevice *s) { - ni_tio_handle_interrupt(subdev_to_counter(s), s); + struct ni_gpct *counter = s->private; + + ni_tio_handle_interrupt(counter, s); if (s->async->events) { if (s->async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)) { @@ -901,11 +823,12 @@ static int ni_660x_input_poll(struct comedi_device *dev, struct comedi_subdevice *s) { struct ni_660x_private *devpriv = dev->private; + struct ni_gpct *counter = s->private; unsigned long flags; /* lock to avoid race with comedi_poll */ spin_lock_irqsave(&devpriv->interrupt_lock, flags); - mite_sync_input_dma(subdev_to_counter(s)->mite_chan, s->async); + mite_sync_input_dma(counter->mite_chan, s->async); spin_unlock_irqrestore(&devpriv->interrupt_lock, flags); return comedi_buf_read_n_available(s->async); } @@ -915,10 +838,10 @@ static int ni_660x_buf_change(struct comedi_device *dev, unsigned long new_size) { struct ni_660x_private *devpriv = dev->private; + struct ni_gpct *counter = s->private; int ret; - ret = mite_buf_change(mite_ring(devpriv, subdev_to_counter(s)), - s->async); + ret = mite_buf_change(mite_ring(devpriv, counter), s->async); if (ret < 0) return ret; @@ -974,13 +897,6 @@ static void ni_660x_free_mite_rings(struct comedi_device *dev) } } -static int -ni_660x_GPCT_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) -{ - return ni_tio_rinsn(subdev_to_counter(s), insn, data); -} - static void init_tio_chip(struct comedi_device *dev, int chipset) { struct ni_660x_private *devpriv = dev->private; @@ -994,25 +910,11 @@ static void init_tio_chip(struct comedi_device *dev, int chipset) } ni_660x_write_register(dev, chipset, devpriv->dma_configuration_soft_copies[chipset], - DMAConfigRegister); + NI660X_DMA_CFG); for (i = 0; i < NUM_PFI_CHANNELS; ++i) ni_660x_write_register(dev, chipset, 0, IOConfigReg(i)); } -static int -ni_660x_GPCT_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) -{ - return ni_tio_insn_config(subdev_to_counter(s), insn, data); -} - -static int ni_660x_GPCT_winsn(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) -{ - return ni_tio_winsn(subdev_to_counter(s), insn, data); -} - static int ni_660x_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) @@ -1024,13 +926,13 @@ static int ni_660x_dio_insn_bits(struct comedi_device *dev, s->state &= ~(data[0] << base_bitfield_channel); s->state |= (data[0] & data[1]) << base_bitfield_channel; /* Write out the new digital output lines */ - ni_660x_write_register(dev, 0, s->state, DIO32Output); + ni_660x_write_register(dev, 0, s->state, NI660X_DIO32_OUTPUT); } /* on return, data[1] contains the value of the digital * input and output lines. */ - data[1] = - (ni_660x_read_register(dev, 0, - DIO32Input) >> base_bitfield_channel); + data[1] = (ni_660x_read_register(dev, 0, NI660X_DIO32_INPUT) >> + base_bitfield_channel); + return insn->n; } @@ -1215,7 +1117,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev, s->insn_config = ni_660x_dio_insn_config; /* we use the ioconfig registers to control dio direction, so zero output enables in stc dio control reg */ - ni_660x_write_register(dev, 0, 0, STCDIOControl); + ni_660x_write_register(dev, 0, 0, NI660X_STC_DIO_CONTROL); devpriv->counter_dev = ni_gpct_device_construct(dev, &ni_gpct_write_register, @@ -1234,12 +1136,12 @@ static int ni_660x_auto_attach(struct comedi_device *dev, SDF_CMD_READ /* | SDF_CMD_WRITE */ ; s->n_chan = 3; s->maxdata = 0xffffffff; - s->insn_read = ni_660x_GPCT_rinsn; - s->insn_write = ni_660x_GPCT_winsn; - s->insn_config = ni_660x_GPCT_insn_config; + s->insn_read = ni_tio_insn_read; + s->insn_write = ni_tio_insn_write; + s->insn_config = ni_tio_insn_config; s->do_cmd = &ni_660x_cmd; s->len_chanlist = 1; - s->do_cmdtest = &ni_660x_cmdtest; + s->do_cmdtest = ni_tio_cmdtest; s->cancel = &ni_660x_cancel; s->poll = &ni_660x_input_poll; s->async_dma_dir = DMA_BIDIRECTIONAL; @@ -1284,7 +1186,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev, if (board->n_chips > 1) global_interrupt_config_bits |= Cascade_Int_Enable_Bit; ni_660x_write_register(dev, 0, global_interrupt_config_bits, - GlobalInterruptConfigRegister); + NI660X_GLOBAL_INT_CFG); dev_info(dev->class_dev, "ni_660x: %s attached\n", dev->board_name); return 0; } @@ -1320,7 +1222,7 @@ static int ni_660x_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &ni_660x_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(ni_660x_pci_table) = { +static const struct pci_device_id ni_660x_pci_table[] = { { PCI_VDEVICE(NI, 0x1310), BOARD_PCI6602 }, { PCI_VDEVICE(NI, 0x1360), BOARD_PXI6602 }, { PCI_VDEVICE(NI, 0x2c60), BOARD_PCI6601 }, diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c index e4414cf110e7e3..8550fdc4ccd38b 100644 --- a/drivers/staging/comedi/drivers/ni_670x.c +++ b/drivers/staging/comedi/drivers/ni_670x.c @@ -282,7 +282,7 @@ static int ni_670x_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &ni_670x_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(ni_670x_pci_table) = { +static const struct pci_device_id ni_670x_pci_table[] = { { PCI_VDEVICE(NI, 0x1290), BOARD_PCI6704 }, { PCI_VDEVICE(NI, 0x1920), BOARD_PXI6704 }, { PCI_VDEVICE(NI, 0x2c90), BOARD_PCI6703 }, diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c index 63c847932eb889..f83eb9ebe27850 100644 --- a/drivers/staging/comedi/drivers/ni_at_a2150.c +++ b/drivers/staging/comedi/drivers/ni_at_a2150.c @@ -74,9 +74,6 @@ TRIG_WAKE_EOS #define A2150_SIZE 28 #define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */ -/* #define A2150_DEBUG enable debugging code */ -#undef A2150_DEBUG /* disable debugging code */ - /* Registers and bits */ #define CONFIG_REG 0x0 #define CHANNEL_BITS(x) ((x) & 0x7) @@ -127,10 +124,9 @@ struct a2150_board { /* analog input range */ static const struct comedi_lrange range_a2150 = { - 1, - { - RANGE(-2.828, 2.828), - } + 1, { + BIP_RANGE(2.828) + } }; /* enum must match board indices */ @@ -167,19 +163,6 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period, static int a2150_set_chanlist(struct comedi_device *dev, unsigned int start_channel, unsigned int num_channels); -#ifdef A2150_DEBUG - -static void ni_dump_regs(struct comedi_device *dev) -{ - struct a2150_private *devpriv = dev->private; - - printk("config bits 0x%x\n", devpriv->config_bits); - printk("irq dma bits 0x%x\n", devpriv->irq_dma_bits); - printk("status bits 0x%x\n", inw(dev->iobase + STATUS_REG)); -} - -#endif - /* interrupt service routine */ static irqreturn_t a2150_interrupt(int irq, void *d) { @@ -411,11 +394,6 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) unsigned int old_config_bits = devpriv->config_bits; unsigned int trigger_bits; - if (!dev->irq || !devpriv->dma) { - comedi_error(dev, - " irq and dma required, cannot do hardware conversions"); - return -1; - } if (cmd->flags & TRIG_RT) { comedi_error(dev, " dma incompatible with hard real-time interrupt (TRIG_RT), aborting"); @@ -506,9 +484,6 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) /* start acquisition for soft trigger */ if (cmd->start_src == TRIG_NOW) outw(0, dev->iobase + FIFO_START_REG); -#ifdef A2150_DEBUG - ni_dump_regs(dev); -#endif return 0; } @@ -573,13 +548,7 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, comedi_error(dev, "timeout"); return -ETIME; } -#ifdef A2150_DEBUG - ni_dump_regs(dev); -#endif data[n] = inw(dev->iobase + FIFO_DATA_REG); -#ifdef A2150_DEBUG - printk(" data is %i\n", data[n]); -#endif data[n] ^= 0x8000; } @@ -728,46 +697,35 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (ret) return ret; - /* grab our IRQ */ - if (irq) { - /* check that irq is supported */ - if (irq < 3 || irq == 8 || irq == 13 || irq > 15) { - printk(" invalid irq line %u\n", irq); - return -EINVAL; - } - if (request_irq(irq, a2150_interrupt, 0, - dev->driver->driver_name, dev)) { - printk("unable to allocate irq %u\n", irq); - return -EINVAL; + dev->board_ptr = a2150_boards + a2150_probe(dev); + thisboard = comedi_board(dev); + dev->board_name = thisboard->name; + + if ((irq >= 3 && irq <= 7) || (irq >= 9 && irq <= 12) || + irq == 14 || irq == 15) { + ret = request_irq(irq, a2150_interrupt, 0, + dev->board_name, dev); + if (ret == 0) { + devpriv->irq_dma_bits |= IRQ_LVL_BITS(irq); + dev->irq = irq; } - devpriv->irq_dma_bits |= IRQ_LVL_BITS(irq); - dev->irq = irq; } - /* initialize dma */ - if (dma) { - if (dma == 4 || dma > 7) { - printk(" invalid dma channel %u\n", dma); - return -EINVAL; - } - if (request_dma(dma, dev->driver->driver_name)) { - printk(" failed to allocate dma channel %u\n", dma); - return -EINVAL; - } - devpriv->dma = dma; - devpriv->dma_buffer = - kmalloc(A2150_DMA_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); - if (devpriv->dma_buffer == NULL) - return -ENOMEM; - disable_dma(dma); - set_dma_mode(dma, DMA_MODE_READ); + if (dev->irq && dma <= 7 && dma != 4) { + ret = request_dma(dma, dev->board_name); + if (ret == 0) { + devpriv->dma = dma; + devpriv->dma_buffer = kmalloc(A2150_DMA_BUFFER_SIZE, + GFP_KERNEL | GFP_DMA); + if (!devpriv->dma_buffer) + return -ENOMEM; - devpriv->irq_dma_bits |= DMA_CHAN_BITS(dma); - } + disable_dma(dma); + set_dma_mode(dma, DMA_MODE_READ); - dev->board_ptr = a2150_boards + a2150_probe(dev); - thisboard = comedi_board(dev); - dev->board_name = thisboard->name; + devpriv->irq_dma_bits |= DMA_CHAN_BITS(dma); + } + } ret = comedi_alloc_subdevices(dev, 1); if (ret) @@ -775,17 +733,20 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it) /* analog input subdevice */ s = &dev->subdevices[0]; - dev->read_subdev = s; s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER; s->n_chan = 4; - s->len_chanlist = 4; s->maxdata = 0xffff; s->range_table = &range_a2150; - s->do_cmd = a2150_ai_cmd; - s->do_cmdtest = a2150_ai_cmdtest; s->insn_read = a2150_ai_rinsn; - s->cancel = a2150_cancel; + if (dev->irq && devpriv->dma) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = s->n_chan; + s->do_cmd = a2150_ai_cmd; + s->do_cmdtest = a2150_ai_cmdtest; + s->cancel = a2150_cancel; + } /* need to do this for software counting of completed conversions, to * prevent hardware count from stopping acquisition */ diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c index 856c73d8b7cd3a..d03935257b974e 100644 --- a/drivers/staging/comedi/drivers/ni_atmio.c +++ b/drivers/staging/comedi/drivers/ni_atmio.c @@ -98,8 +98,6 @@ are not supported. #include "ni_stc.h" #include "8255.h" -#undef DEBUG - #define ATMIO 1 #undef PCIMIO @@ -437,19 +435,6 @@ static int ni_atmio_attach(struct comedi_device *dev, if (ret) return ret; -#ifdef DEBUG - /* board existence sanity check */ - { - int i; - - printk(" board fingerprint:"); - for (i = 0; i < 16; i += 2) { - printk(" %04x %02x", inw(dev->iobase + i), - inb(dev->iobase + i + 1)); - } - } -#endif - /* get board type */ board = ni_getboardtype(dev); diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c index a9f7d40d6db24e..e8cd5ddb85c5ab 100644 --- a/drivers/staging/comedi/drivers/ni_atmio16d.c +++ b/drivers/staging/comedi/drivers/ni_atmio16d.c @@ -105,40 +105,31 @@ struct atmio16_board_t { }; /* range structs */ -static const struct comedi_lrange range_atmio16d_ai_10_bipolar = { 4, { - BIP_RANGE - (10), - BIP_RANGE - (1), - BIP_RANGE - (0.1), - BIP_RANGE - (0.02) - } +static const struct comedi_lrange range_atmio16d_ai_10_bipolar = { + 4, { + BIP_RANGE(10), + BIP_RANGE(1), + BIP_RANGE(0.1), + BIP_RANGE(0.02) + } }; -static const struct comedi_lrange range_atmio16d_ai_5_bipolar = { 4, { - BIP_RANGE - (5), - BIP_RANGE - (0.5), - BIP_RANGE - (0.05), - BIP_RANGE - (0.01) - } +static const struct comedi_lrange range_atmio16d_ai_5_bipolar = { + 4, { + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05), + BIP_RANGE(0.01) + } }; -static const struct comedi_lrange range_atmio16d_ai_unipolar = { 4, { - UNI_RANGE - (10), - UNI_RANGE - (1), - UNI_RANGE - (0.1), - UNI_RANGE - (0.02) - } +static const struct comedi_lrange range_atmio16d_ai_unipolar = { + 4, { + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.02) + } }; /* private data struct */ @@ -229,7 +220,7 @@ static void reset_atmio16d(struct comedi_device *dev) static irqreturn_t atmio16d_interrupt(int irq, void *d) { struct comedi_device *dev = d; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; comedi_buf_put(s->async, inw(dev->iobase + AD_FIFO_REG)); @@ -495,18 +486,13 @@ static int atmio16d_ai_insn_read(struct comedi_device *dev, break; } if (status & STAT_AD_OVERFLOW) { - printk(KERN_INFO "atmio16d: a/d FIFO overflow\n"); outw(0, dev->iobase + AD_CLEAR_REG); - return -ETIME; } } /* end waiting, now check if it timed out */ - if (t == ATMIO16D_TIMEOUT) { - printk(KERN_INFO "atmio16d: timeout\n"); - + if (t == ATMIO16D_TIMEOUT) return -ETIME; - } } return i; @@ -636,7 +622,6 @@ static int atmio16d_attach(struct comedi_device *dev, const struct atmio16_board_t *board = comedi_board(dev); struct atmio16d_private *devpriv; struct comedi_subdevice *s; - unsigned int irq; int ret; ret = comedi_request_region(dev, it->options[0], ATMIO16D_SIZE); @@ -654,19 +639,11 @@ static int atmio16d_attach(struct comedi_device *dev, /* reset the atmio16d hardware */ reset_atmio16d(dev); - /* check if our interrupt is available and get it */ - irq = it->options[1]; - if (irq) { - - ret = request_irq(irq, atmio16d_interrupt, 0, "atmio16d", dev); - if (ret < 0) { - printk(KERN_INFO "failed to allocate irq %u\n", irq); - return ret; - } - dev->irq = irq; - printk(KERN_INFO "( irq = %u )\n", irq); - } else { - printk(KERN_INFO "( no irq )"); + if (it->options[1]) { + ret = request_irq(it->options[1], atmio16d_interrupt, 0, + dev->board_name, dev); + if (ret == 0) + dev->irq = it->options[1]; } /* set device options */ @@ -682,16 +659,11 @@ static int atmio16d_attach(struct comedi_device *dev, /* setup sub-devices */ s = &dev->subdevices[0]; - dev->read_subdev = s; /* ai subdevice */ s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = (devpriv->adc_mux ? 16 : 8); - s->len_chanlist = 16; s->insn_read = atmio16d_ai_insn_read; - s->do_cmdtest = atmio16d_ai_cmdtest; - s->do_cmd = atmio16d_ai_cmd; - s->cancel = atmio16d_ai_cancel; s->maxdata = 0xfff; /* 4095 decimal */ switch (devpriv->adc_range) { case adc_bipolar10: @@ -704,6 +676,14 @@ static int atmio16d_attach(struct comedi_device *dev, s->range_table = &range_atmio16d_ai_unipolar; break; } + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = 16; + s->do_cmdtest = atmio16d_ai_cmdtest; + s->do_cmd = atmio16d_ai_cmd; + s->cancel = atmio16d_ai_cancel; + } /* ao subdevice */ s = &dev->subdevices[1]; @@ -756,7 +736,6 @@ static int atmio16d_attach(struct comedi_device *dev, s->n_chan = 0; s->maxdata = 0 #endif - printk("\n"); return 0; } diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c index 8be681fca9078b..739597068297d2 100644 --- a/drivers/staging/comedi/drivers/ni_labpc_pci.c +++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c @@ -107,7 +107,7 @@ static struct comedi_driver labpc_pci_comedi_driver = { .detach = labpc_pci_detach, }; -static DEFINE_PCI_DEVICE_TABLE(labpc_pci_table) = { +static const struct pci_device_id labpc_pci_table[] = { { PCI_VDEVICE(NI, 0x161), BOARD_NI_PCI1200 }, { 0 } }; diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 5113397bfecf4c..457b88481db0a9 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -52,10 +52,6 @@ fully tested as yet. Terry Barnaby, BEAM Ltd. */ -/* #define DEBUG_INTERRUPT */ -/* #define DEBUG_STATUS_A */ -/* #define DEBUG_STATUS_B */ - #include #include #include @@ -63,10 +59,6 @@ #include "mite.h" #include "comedi_fc.h" -#ifndef MDPRINTK -#define MDPRINTK(format, args...) -#endif - /* A timeout count */ #define NI_TIMEOUT 1000 static const unsigned old_RTSI_clock_channel = 7; @@ -86,111 +78,109 @@ static const short ni_gainlkup[][16] = { [ai_gain_6143] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, }; -static const struct comedi_lrange range_ni_E_ai = { 16, { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-2.5, 2.5), - RANGE(-1, 1), - RANGE(-0.5, 0.5), - RANGE(-0.25, 0.25), - RANGE(-0.1, 0.1), - RANGE(-0.05, 0.05), - RANGE(0, 20), - RANGE(0, 10), - RANGE(0, 5), - RANGE(0, 2), - RANGE(0, 1), - RANGE(0, 0.5), - RANGE(0, 0.2), - RANGE(0, 0.1), - } +static const struct comedi_lrange range_ni_E_ai = { + 16, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1), + BIP_RANGE(0.5), + BIP_RANGE(0.25), + BIP_RANGE(0.1), + BIP_RANGE(0.05), + UNI_RANGE(20), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2), + UNI_RANGE(1), + UNI_RANGE(0.5), + UNI_RANGE(0.2), + UNI_RANGE(0.1) + } }; -static const struct comedi_lrange range_ni_E_ai_limited = { 8, { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-1, 1), - RANGE(-0.1, - 0.1), - RANGE(0, 10), - RANGE(0, 5), - RANGE(0, 1), - RANGE(0, 0.1), - } +static const struct comedi_lrange range_ni_E_ai_limited = { + 8, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(1), + BIP_RANGE(0.1), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(1), + UNI_RANGE(0.1) + } }; -static const struct comedi_lrange range_ni_E_ai_limited14 = { 14, { - RANGE(-10, - 10), - RANGE(-5, 5), - RANGE(-2, 2), - RANGE(-1, 1), - RANGE(-0.5, - 0.5), - RANGE(-0.2, - 0.2), - RANGE(-0.1, - 0.1), - RANGE(0, 10), - RANGE(0, 5), - RANGE(0, 2), - RANGE(0, 1), - RANGE(0, - 0.5), - RANGE(0, - 0.2), - RANGE(0, - 0.1), - } +static const struct comedi_lrange range_ni_E_ai_limited14 = { + 14, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2), + BIP_RANGE(1), + BIP_RANGE(0.5), + BIP_RANGE(0.2), + BIP_RANGE(0.1), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2), + UNI_RANGE(1), + UNI_RANGE(0.5), + UNI_RANGE(0.2), + UNI_RANGE(0.1) + } }; -static const struct comedi_lrange range_ni_E_ai_bipolar4 = { 4, { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-0.5, - 0.5), - RANGE(-0.05, - 0.05), - } +static const struct comedi_lrange range_ni_E_ai_bipolar4 = { + 4, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05) + } }; -static const struct comedi_lrange range_ni_E_ai_611x = { 8, { - RANGE(-50, 50), - RANGE(-20, 20), - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-2, 2), - RANGE(-1, 1), - RANGE(-0.5, 0.5), - RANGE(-0.2, 0.2), - } +static const struct comedi_lrange range_ni_E_ai_611x = { + 8, { + BIP_RANGE(50), + BIP_RANGE(20), + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2), + BIP_RANGE(1), + BIP_RANGE(0.5), + BIP_RANGE(0.2) + } }; -static const struct comedi_lrange range_ni_M_ai_622x = { 4, { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-1, 1), - RANGE(-0.2, 0.2), - } +static const struct comedi_lrange range_ni_M_ai_622x = { + 4, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(1), + BIP_RANGE(0.2) + } }; -static const struct comedi_lrange range_ni_M_ai_628x = { 7, { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-2, 2), - RANGE(-1, 1), - RANGE(-0.5, 0.5), - RANGE(-0.2, 0.2), - RANGE(-0.1, 0.1), - } +static const struct comedi_lrange range_ni_M_ai_628x = { + 7, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2), + BIP_RANGE(1), + BIP_RANGE(0.5), + BIP_RANGE(0.2), + BIP_RANGE(0.1) + } }; -static const struct comedi_lrange range_ni_E_ao_ext = { 4, { - RANGE(-10, 10), - RANGE(0, 10), - RANGE_ext(-1, 1), - RANGE_ext(0, 1), - } +static const struct comedi_lrange range_ni_E_ao_ext = { + 4, { + BIP_RANGE(10), + UNI_RANGE(10), + RANGE_ext(-1, 1), + RANGE_ext(0, 1) + } }; static const struct comedi_lrange *const ni_range_lkup[] = { @@ -266,17 +256,6 @@ static int ni_rtsi_insn_config(struct comedi_device *dev, static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_read_eeprom(struct comedi_device *dev, int addr); -#ifdef DEBUG_STATUS_A -static void ni_mio_print_status_a(int status); -#else -#define ni_mio_print_status_a(a) -#endif -#ifdef DEBUG_STATUS_B -static void ni_mio_print_status_b(int status); -#else -#define ni_mio_print_status_b(a) -#endif - static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s); #ifndef PCIDMA static void ni_handle_fifo_half_full(struct comedi_device *dev); @@ -297,19 +276,8 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_8255_callback(int dir, int port, int data, unsigned long arg); -static int ni_gpct_insn_write(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data); -static int ni_gpct_insn_read(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data); -static int ni_gpct_insn_config(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data); #ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s); -static int ni_gpct_cmdtest(struct comedi_device *dev, - struct comedi_subdevice *s, struct comedi_cmd *cmd); #endif static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s); @@ -322,10 +290,6 @@ static int cs5529_do_conversion(struct comedi_device *dev, static int cs5529_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); -#ifdef NI_CS5529_DEBUG -static unsigned int cs5529_config_read(struct comedi_device *dev, - unsigned int reg_select_bits); -#endif static void cs5529_config_write(struct comedi_device *dev, unsigned int value, unsigned int reg_select_bits); @@ -487,11 +451,10 @@ static inline void ni_set_gpct_dma_channel(struct comedi_device *dev, { unsigned bitfield; - if (mite_channel >= 0) { + if (mite_channel >= 0) bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel); - } else { + else bitfield = 0; - } ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index), bitfield); } @@ -907,9 +870,8 @@ static void mite_handle_b_linkc(struct mite_struct *mite, unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); - if (devpriv->ao_mite_chan) { + if (devpriv->ao_mite_chan) mite_sync_output_dma(devpriv->ao_mite_chan, s->async); - } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); } @@ -957,9 +919,8 @@ static void ni_handle_eos(struct comedi_device *dev, struct comedi_subdevice *s) #endif } /* handle special case of single scan using AI_End_On_End_Of_Scan */ - if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) { + if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) shutdown_ai_command(dev); - } } static void shutdown_ai_command(struct comedi_device *dev) @@ -1023,19 +984,15 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status) struct ni_private *devpriv = dev->private; unsigned short ack = 0; - if (a_status & AI_SC_TC_St) { + if (a_status & AI_SC_TC_St) ack |= AI_SC_TC_Interrupt_Ack; - } - if (a_status & AI_START1_St) { + if (a_status & AI_START1_St) ack |= AI_START1_Interrupt_Ack; - } - if (a_status & AI_START_St) { + if (a_status & AI_START_St) ack |= AI_START_Interrupt_Ack; - } - if (a_status & AI_STOP_St) { + if (a_status & AI_STOP_St) /* not sure why we used to ack the START here also, instead of doing it independently. Frank Hess 2007-07-06 */ - ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack */ ; - } + ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack */; if (ack) devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register); } @@ -1050,16 +1007,9 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status, if (s->type == COMEDI_SUBD_UNUSED) return; -#ifdef DEBUG_INTERRUPT - printk - ("ni_mio_common: interrupt: a_status=%04x ai_mite_status=%08x\n", - status, ai_mite_status); - ni_mio_print_status_a(status); -#endif #ifdef PCIDMA - if (ai_mite_status & CHSR_LINKC) { + if (ai_mite_status & CHSR_LINKC) ni_sync_ai_dma(dev); - } if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY | CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR | @@ -1067,7 +1017,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status, printk ("unknown mite interrupt, ack! (ai_mite_status=%08x)\n", ai_mite_status); - /* mite_print_chsr(ai_mite_status); */ s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; /* disable_irq(dev->irq); */ } @@ -1092,7 +1041,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status, AI_SC_TC_Error_St)) { printk("ni_mio_common: ai error a_status=%04x\n", status); - ni_mio_print_status_a(status); shutdown_ai_command(dev); @@ -1105,12 +1053,8 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status, return; } if (status & AI_SC_TC_St) { -#ifdef DEBUG_INTERRUPT - printk("ni_mio_common: SC_TC interrupt\n"); -#endif - if (!devpriv->ai_continuous) { + if (!devpriv->ai_continuous) shutdown_ai_command(dev); - } } } #ifndef PCIDMA @@ -1129,20 +1073,10 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status, } #endif /* !PCIDMA */ - if ((status & AI_STOP_St)) { + if ((status & AI_STOP_St)) ni_handle_eos(dev, s); - } ni_event(dev, s); - -#ifdef DEBUG_INTERRUPT - status = devpriv->stc_readw(dev, AI_Status_1_Register); - if (status & Interrupt_A_St) { - printk - ("handle_a_interrupt: didn't clear interrupt? status=0x%x\n", - status); - } -#endif } static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status) @@ -1150,27 +1084,20 @@ static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status) struct ni_private *devpriv = dev->private; unsigned short ack = 0; - if (b_status & AO_BC_TC_St) { + if (b_status & AO_BC_TC_St) ack |= AO_BC_TC_Interrupt_Ack; - } - if (b_status & AO_Overrun_St) { + if (b_status & AO_Overrun_St) ack |= AO_Error_Interrupt_Ack; - } - if (b_status & AO_START_St) { + if (b_status & AO_START_St) ack |= AO_START_Interrupt_Ack; - } - if (b_status & AO_START1_St) { + if (b_status & AO_START1_St) ack |= AO_START1_Interrupt_Ack; - } - if (b_status & AO_UC_TC_St) { + if (b_status & AO_UC_TC_St) ack |= AO_UC_TC_Interrupt_Ack; - } - if (b_status & AO_UI2_TC_St) { + if (b_status & AO_UI2_TC_St) ack |= AO_UI2_TC_Interrupt_Ack; - } - if (b_status & AO_UPDATE_St) { + if (b_status & AO_UPDATE_St) ack |= AO_UPDATE_Interrupt_Ack; - } if (ack) devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register); } @@ -1182,17 +1109,10 @@ static void handle_b_interrupt(struct comedi_device *dev, struct comedi_subdevice *s = &dev->subdevices[NI_AO_SUBDEV]; /* unsigned short ack=0; */ -#ifdef DEBUG_INTERRUPT - printk("ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n", - b_status, ao_mite_status); - ni_mio_print_status_b(b_status); -#endif - #ifdef PCIDMA /* Currently, mite.c requires us to handle LINKC */ - if (ao_mite_status & CHSR_LINKC) { + if (ao_mite_status & CHSR_LINKC) mite_handle_b_linkc(devpriv->mite, dev); - } if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY | CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR | @@ -1200,7 +1120,6 @@ static void handle_b_interrupt(struct comedi_device *dev, printk ("unknown mite interrupt, ack! (ao_mite_status=%08x)\n", ao_mite_status); - /* mite_print_chsr(ao_mite_status); */ s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; } #endif @@ -1214,12 +1133,9 @@ static void handle_b_interrupt(struct comedi_device *dev, s->async->events |= COMEDI_CB_OVERFLOW; } - if (b_status & AO_BC_TC_St) { - MDPRINTK - ("ni_mio_common: AO BC_TC status=0x%04x status2=0x%04x\n", - b_status, devpriv->stc_readw(dev, AO_Status_2_Register)); + if (b_status & AO_BC_TC_St) s->async->events |= COMEDI_CB_EOA; - } + #ifndef PCIDMA if (b_status & AO_FIFO_Request_St) { int ret; @@ -1238,50 +1154,6 @@ static void handle_b_interrupt(struct comedi_device *dev, ni_event(dev, s); } -#ifdef DEBUG_STATUS_A -static const char *const status_a_strings[] = { - "passthru0", "fifo", "G0_gate", "G0_TC", - "stop", "start", "sc_tc", "start1", - "start2", "sc_tc_error", "overflow", "overrun", - "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a" -}; - -static void ni_mio_print_status_a(int status) -{ - int i; - - printk("A status:"); - for (i = 15; i >= 0; i--) { - if (status & (1 << i)) { - printk(" %s", status_a_strings[i]); - } - } - printk("\n"); -} -#endif - -#ifdef DEBUG_STATUS_B -static const char *const status_b_strings[] = { - "passthru1", "fifo", "G1_gate", "G1_TC", - "UI2_TC", "UPDATE", "UC_TC", "BC_TC", - "start1", "overrun", "start", "bc_tc_error", - "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b" -}; - -static void ni_mio_print_status_b(int status) -{ - int i; - - printk("B status:"); - for (i = 15; i >= 0; i--) { - if (status & (1 << i)) { - printk(" %s", status_b_strings[i]); - } - } - printk("\n"); -} -#endif - #ifndef PCIDMA static void ni_ao_fifo_load(struct comedi_device *dev, @@ -1324,9 +1196,8 @@ static void ni_ao_fifo_load(struct comedi_device *dev, chan %= cmd->chanlist_len; } async->cur_chan = chan; - if (err == 0) { + if (err == 0) async->events |= COMEDI_CB_OVERFLOW; - } } /* @@ -2392,7 +2263,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) unsigned int stop_count; int interrupt_a_enable = 0; - MDPRINTK("ni_ai_cmd\n"); if (dev->irq == 0) { comedi_error(dev, "cannot run command without an irq"); return -EIO; @@ -2630,15 +2500,11 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) ni_set_bits(dev, Interrupt_A_Enable_Register, interrupt_a_enable, 1); - - MDPRINTK("Interrupt_A_Enable_Register = 0x%04x\n", - devpriv->int_a_enable_reg); } else { /* interrupt on nothing */ ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0); /* XXX start polling if necessary */ - MDPRINTK("interrupting on nothing\n"); } /* end configuration */ @@ -2664,7 +2530,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) if (retval) return retval; } - /* mite_dump_regs(devpriv->mite); */ #endif switch (cmd->start_src) { @@ -2682,8 +2547,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) break; } - MDPRINTK("exit ni_ai_cmd\n"); - return 0; } @@ -3248,11 +3111,10 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source; devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); - if (cmd->stop_src == TRIG_NONE) { + if (cmd->stop_src == TRIG_NONE) devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register); - } else { + else devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register); - } devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register); devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source; devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); @@ -3513,9 +3375,8 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s) if (board->reg_type & ni_reg_6xxx_mask) { unsigned immediate_bits = 0; unsigned i; - for (i = 0; i < s->n_chan; ++i) { + for (i = 0; i < s->n_chan; ++i) immediate_bits |= 1 << i; - } ao_win_out(immediate_bits, AO_Immediate_671x); ao_win_out(CLEAR_WG, AO_Misc_611x); } @@ -3689,9 +3550,8 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) return -EIO; } retval = ni_request_cdo_mite_channel(dev); - if (retval < 0) { + if (retval < 0) return retval; - } s->async->inttrig = &ni_cdo_inttrig; return 0; } @@ -3773,9 +3633,8 @@ static void handle_cdio_interrupt(struct comedi_device *dev) unsigned long flags; #endif - if ((board->reg_type & ni_reg_m_series_mask) == 0) { + if ((board->reg_type & ni_reg_m_series_mask) == 0) return; - } #ifdef PCIDMA spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->cdo_mite_chan) { @@ -3793,15 +3652,15 @@ static void handle_cdio_interrupt(struct comedi_device *dev) cdio_status = ni_readl(M_Offset_CDIO_Status); if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) { -/* printk("cdio error: statux=0x%x\n", cdio_status); */ + /* printk("cdio error: statux=0x%x\n", cdio_status); */ ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command); /* XXX just guessing this is needed and does something useful */ s->async->events |= COMEDI_CB_OVERFLOW; } if (cdio_status & CDO_FIFO_Empty_Bit) { -/* printk("cdio fifo empty\n"); */ + /* printk("cdio fifo empty\n"); */ ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit, M_Offset_CDIO_Command); -/* s->async->events |= COMEDI_CB_EOA; */ + /* s->async->events |= COMEDI_CB_EOA; */ } ni_event(dev, s); } @@ -3819,10 +3678,6 @@ static int ni_serial_insn_config(struct comedi_device *dev, switch (data[0]) { case INSN_CONFIG_SERIAL_CLOCK: - -#ifdef DEBUG_DIO - printk("SPI serial clock Config cd\n", data[1]); -#endif devpriv->serial_hw_mode = 1; devpriv->dio_control |= DIO_HW_Serial_Enable; @@ -3874,9 +3729,8 @@ static int ni_serial_insn_config(struct comedi_device *dev, case INSN_CONFIG_BIDIRECTIONAL_DATA: - if (devpriv->serial_interval_ns == 0) { + if (devpriv->serial_interval_ns == 0) return -EINVAL; - } byte_out = data[1] & 0xFF; @@ -3911,10 +3765,6 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev, unsigned int status1; int err = 0, count = 20; -#ifdef DEBUG_DIO - printk("ni_serial_hw_readwrite8: outputting 0x%x\n", data_out); -#endif - devpriv->dio_output &= ~DIO_Serial_Data_Mask; devpriv->dio_output |= DIO_Serial_Data_Out(data_out); devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register); @@ -3948,12 +3798,8 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev, DIO_Serial_IO_In_Progress_St goes high one bit too early. */ udelay((devpriv->serial_interval_ns + 999) / 1000); - if (data_in != NULL) { + if (data_in != NULL) *data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register); -#ifdef DEBUG_DIO - printk("ni_serial_hw_readwrite8: inputted 0x%x\n", *data_in); -#endif - } Error: devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); @@ -3969,10 +3815,6 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev, struct ni_private *devpriv = dev->private; unsigned char mask, input = 0; -#ifdef DEBUG_DIO - printk("ni_serial_sw_readwrite8: outputting 0x%x\n", data_out); -#endif - /* Wait for one bit before transfer */ udelay((devpriv->serial_interval_ns + 999) / 1000); @@ -3981,9 +3823,8 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev, because it is a per-subdevice field, and serial is a separate subdevice from DIO. */ devpriv->dio_output &= ~DIO_SDOUT; - if (data_out & mask) { + if (data_out & mask) devpriv->dio_output |= DIO_SDOUT; - } devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register); @@ -4003,15 +3844,12 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev, /* Input current bit */ if (devpriv->stc_readw(dev, - DIO_Parallel_Input_Register) & DIO_SDIN) - { -/* printk("DIO_P_I_R: 0x%x\n", devpriv->stc_readw(dev, DIO_Parallel_Input_Register)); */ + DIO_Parallel_Input_Register) & DIO_SDIN) { + /* printk("DIO_P_I_R: 0x%x\n", devpriv->stc_readw(dev, DIO_Parallel_Input_Register)); */ input |= mask; } } -#ifdef DEBUG_DIO - printk("ni_serial_sw_readwrite8: inputted 0x%x\n", input); -#endif + if (data_in) *data_in = input; @@ -4023,9 +3861,8 @@ static void mio_common_detach(struct comedi_device *dev) struct ni_private *devpriv = dev->private; if (devpriv) { - if (devpriv->counter_dev) { + if (devpriv->counter_dev) ni_gpct_device_destroy(devpriv->counter_dev); - } } } @@ -4044,82 +3881,82 @@ static unsigned ni_gpct_to_stc_register(enum ni_gpct_register reg) { unsigned stc_register; switch (reg) { - case NITIO_G0_Autoincrement_Reg: + case NITIO_G0_AUTO_INC: stc_register = G_Autoincrement_Register(0); break; - case NITIO_G1_Autoincrement_Reg: + case NITIO_G1_AUTO_INC: stc_register = G_Autoincrement_Register(1); break; - case NITIO_G0_Command_Reg: + case NITIO_G0_CMD: stc_register = G_Command_Register(0); break; - case NITIO_G1_Command_Reg: + case NITIO_G1_CMD: stc_register = G_Command_Register(1); break; - case NITIO_G0_HW_Save_Reg: + case NITIO_G0_HW_SAVE: stc_register = G_HW_Save_Register(0); break; - case NITIO_G1_HW_Save_Reg: + case NITIO_G1_HW_SAVE: stc_register = G_HW_Save_Register(1); break; - case NITIO_G0_SW_Save_Reg: + case NITIO_G0_SW_SAVE: stc_register = G_Save_Register(0); break; - case NITIO_G1_SW_Save_Reg: + case NITIO_G1_SW_SAVE: stc_register = G_Save_Register(1); break; - case NITIO_G0_Mode_Reg: + case NITIO_G0_MODE: stc_register = G_Mode_Register(0); break; - case NITIO_G1_Mode_Reg: + case NITIO_G1_MODE: stc_register = G_Mode_Register(1); break; - case NITIO_G0_LoadA_Reg: + case NITIO_G0_LOADA: stc_register = G_Load_A_Register(0); break; - case NITIO_G1_LoadA_Reg: + case NITIO_G1_LOADA: stc_register = G_Load_A_Register(1); break; - case NITIO_G0_LoadB_Reg: + case NITIO_G0_LOADB: stc_register = G_Load_B_Register(0); break; - case NITIO_G1_LoadB_Reg: + case NITIO_G1_LOADB: stc_register = G_Load_B_Register(1); break; - case NITIO_G0_Input_Select_Reg: + case NITIO_G0_INPUT_SEL: stc_register = G_Input_Select_Register(0); break; - case NITIO_G1_Input_Select_Reg: + case NITIO_G1_INPUT_SEL: stc_register = G_Input_Select_Register(1); break; - case NITIO_G01_Status_Reg: + case NITIO_G01_STATUS: stc_register = G_Status_Register; break; - case NITIO_G01_Joint_Reset_Reg: + case NITIO_G01_RESET: stc_register = Joint_Reset_Register; break; - case NITIO_G01_Joint_Status1_Reg: + case NITIO_G01_STATUS1: stc_register = Joint_Status_1_Register; break; - case NITIO_G01_Joint_Status2_Reg: + case NITIO_G01_STATUS2: stc_register = Joint_Status_2_Register; break; - case NITIO_G0_Interrupt_Acknowledge_Reg: + case NITIO_G0_INT_ACK: stc_register = Interrupt_A_Ack_Register; break; - case NITIO_G1_Interrupt_Acknowledge_Reg: + case NITIO_G1_INT_ACK: stc_register = Interrupt_B_Ack_Register; break; - case NITIO_G0_Status_Reg: + case NITIO_G0_STATUS: stc_register = AI_Status_1_Register; break; - case NITIO_G1_Status_Reg: + case NITIO_G1_STATUS: stc_register = AO_Status_1_Register; break; - case NITIO_G0_Interrupt_Enable_Reg: + case NITIO_G0_INT_ENA: stc_register = Interrupt_A_Enable_Register; break; - case NITIO_G1_Interrupt_Enable_Reg: + case NITIO_G1_INT_ENA: stc_register = Interrupt_B_Enable_Register; break; default: @@ -4147,52 +3984,52 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits, switch (reg) { /* m-series-only registers */ - case NITIO_G0_Counting_Mode_Reg: + case NITIO_G0_CNT_MODE: ni_writew(bits, M_Offset_G0_Counting_Mode); break; - case NITIO_G1_Counting_Mode_Reg: + case NITIO_G1_CNT_MODE: ni_writew(bits, M_Offset_G1_Counting_Mode); break; - case NITIO_G0_Second_Gate_Reg: + case NITIO_G0_GATE2: ni_writew(bits, M_Offset_G0_Second_Gate); break; - case NITIO_G1_Second_Gate_Reg: + case NITIO_G1_GATE2: ni_writew(bits, M_Offset_G1_Second_Gate); break; - case NITIO_G0_DMA_Config_Reg: + case NITIO_G0_DMA_CFG: ni_writew(bits, M_Offset_G0_DMA_Config); break; - case NITIO_G1_DMA_Config_Reg: + case NITIO_G1_DMA_CFG: ni_writew(bits, M_Offset_G1_DMA_Config); break; - case NITIO_G0_ABZ_Reg: + case NITIO_G0_ABZ: ni_writew(bits, M_Offset_G0_MSeries_ABZ); break; - case NITIO_G1_ABZ_Reg: + case NITIO_G1_ABZ: ni_writew(bits, M_Offset_G1_MSeries_ABZ); break; /* 32 bit registers */ - case NITIO_G0_LoadA_Reg: - case NITIO_G1_LoadA_Reg: - case NITIO_G0_LoadB_Reg: - case NITIO_G1_LoadB_Reg: + case NITIO_G0_LOADA: + case NITIO_G1_LOADA: + case NITIO_G0_LOADB: + case NITIO_G1_LOADB: stc_register = ni_gpct_to_stc_register(reg); devpriv->stc_writel(dev, bits, stc_register); break; /* 16 bit registers */ - case NITIO_G0_Interrupt_Enable_Reg: + case NITIO_G0_INT_ENA: BUG_ON(bits & ~gpct_interrupt_a_enable_mask); ni_set_bitfield(dev, Interrupt_A_Enable_Register, gpct_interrupt_a_enable_mask, bits); break; - case NITIO_G1_Interrupt_Enable_Reg: + case NITIO_G1_INT_ENA: BUG_ON(bits & ~gpct_interrupt_b_enable_mask); ni_set_bitfield(dev, Interrupt_B_Enable_Register, gpct_interrupt_b_enable_mask, bits); break; - case NITIO_G01_Joint_Reset_Reg: + case NITIO_G01_RESET: BUG_ON(bits & ~gpct_joint_reset_mask); /* fall-through */ default: @@ -4210,21 +4047,18 @@ static unsigned ni_gpct_read_register(struct ni_gpct *counter, switch (reg) { /* m-series only registers */ - case NITIO_G0_DMA_Status_Reg: + case NITIO_G0_DMA_STATUS: return ni_readw(M_Offset_G0_DMA_Status); - break; - case NITIO_G1_DMA_Status_Reg: + case NITIO_G1_DMA_STATUS: return ni_readw(M_Offset_G1_DMA_Status); - break; /* 32 bit registers */ - case NITIO_G0_HW_Save_Reg: - case NITIO_G1_HW_Save_Reg: - case NITIO_G0_SW_Save_Reg: - case NITIO_G1_SW_Save_Reg: + case NITIO_G0_HW_SAVE: + case NITIO_G1_HW_SAVE: + case NITIO_G0_SW_SAVE: + case NITIO_G1_SW_SAVE: stc_register = ni_gpct_to_stc_register(reg); return devpriv->stc_readl(dev, stc_register); - break; /* 16 bit registers */ default: @@ -4391,11 +4225,10 @@ static int ni_E_init(struct comedi_device *dev) s->maxdata = (1 << board->aobits) - 1; s->range_table = board->ao_range_table; s->insn_read = &ni_ao_insn_read; - if (board->reg_type & ni_reg_6xxx_mask) { + if (board->reg_type & ni_reg_6xxx_mask) s->insn_write = &ni_ao_insn_write_671x; - } else { + else s->insn_write = &ni_ao_insn_write; - } s->insn_config = &ni_ao_insn_config; #ifdef PCIDMA if (board->n_aochan) { @@ -4429,7 +4262,7 @@ static int ni_E_init(struct comedi_device *dev) s->n_chan = board->num_p0_dio_channels; if (board->reg_type & ni_reg_m_series_mask) { s->subdev_flags |= - SDF_LSAMPL | SDF_CMD_WRITE /* | SDF_CMD_READ */ ; + SDF_LSAMPL | SDF_CMD_WRITE /* | SDF_CMD_READ */; s->insn_bits = &ni_m_series_dio_insn_bits; s->insn_config = &ni_m_series_dio_insn_config; s->do_cmd = &ni_cdio_cmd; @@ -4449,11 +4282,10 @@ static int ni_E_init(struct comedi_device *dev) /* 8255 device */ s = &dev->subdevices[NI_8255_DIO_SUBDEV]; - if (board->has_8255) { + if (board->has_8255) subdev_8255_init(dev, s, ni_8255_callback, (unsigned long)dev); - } else { + else s->type = COMEDI_SUBD_UNUSED; - } /* formerly general purpose counter/timer device, but no longer used */ s = &dev->subdevices[NI_UNUSED_SUBDEV]; @@ -4511,9 +4343,8 @@ static int ni_E_init(struct comedi_device *dev) s->n_chan = 10; } s->maxdata = 1; - if (board->reg_type & ni_reg_m_series_mask) { + if (board->reg_type & ni_reg_m_series_mask) s->insn_bits = &ni_pfi_insn_bits; - } s->insn_config = &ni_pfi_insn_config; ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0); @@ -4553,11 +4384,10 @@ static int ni_E_init(struct comedi_device *dev) s->insn_config = ni_rtsi_insn_config; ni_rtsi_init(dev); - if (board->reg_type & ni_reg_m_series_mask) { + if (board->reg_type & ni_reg_m_series_mask) counter_variant = ni_gpct_variant_m_series; - } else { + else counter_variant = ni_gpct_variant_e_series; - } devpriv->counter_dev = ni_gpct_device_construct(dev, &ni_gpct_write_register, &ni_gpct_read_register, @@ -4573,14 +4403,14 @@ static int ni_E_init(struct comedi_device *dev) s->maxdata = 0xffffffff; else s->maxdata = 0xffffff; - s->insn_read = &ni_gpct_insn_read; - s->insn_write = &ni_gpct_insn_write; - s->insn_config = &ni_gpct_insn_config; + s->insn_read = ni_tio_insn_read; + s->insn_write = ni_tio_insn_read; + s->insn_config = ni_tio_insn_config; #ifdef PCIDMA s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */; s->do_cmd = &ni_gpct_cmd; s->len_chanlist = 1; - s->do_cmdtest = &ni_gpct_cmdtest; + s->do_cmdtest = ni_tio_cmdtest; s->cancel = &ni_gpct_cancel; s->async_dma_dir = DMA_BIDIRECTIONAL; #endif @@ -4901,7 +4731,7 @@ static int pack_ad8842(int addr, int val, int *bitstring); struct caldac_struct { int n_chans; int n_bits; - int (*packbits) (int, int, int *); + int (*packbits)(int, int, int *); }; static struct caldac_struct caldacs[] = { @@ -4944,9 +4774,8 @@ static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s) if (diffbits) { unsigned int *maxdata_list; - if (n_chans > MAX_N_CALDACS) { + if (n_chans > MAX_N_CALDACS) printk("BUG! MAX_N_CALDACS too small\n"); - } s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list; chan = 0; for (i = 0; i < n_dacs; i++) { @@ -5143,36 +4972,11 @@ static void GPCT_Reset(struct comedi_device *dev, int chan) #endif -static int ni_gpct_insn_config(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) -{ - struct ni_gpct *counter = s->private; - return ni_tio_insn_config(counter, insn, data); -} - -static int ni_gpct_insn_read(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) -{ - struct ni_gpct *counter = s->private; - return ni_tio_rinsn(counter, insn, data); -} - -static int ni_gpct_insn_write(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) -{ - struct ni_gpct *counter = s->private; - return ni_tio_winsn(counter, insn, data); -} - #ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { - int retval; struct ni_gpct *counter = s->private; -/* const struct comedi_cmd *cmd = &s->async->cmd; */ + int retval; retval = ni_request_gpct_mite_channel(dev, counter->counter_index, COMEDI_INPUT); @@ -5183,19 +4987,8 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) } ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); ni_e_series_enable_second_irq(dev, counter->counter_index, 1); - retval = ni_tio_cmd(counter, s->async); - return retval; -} -#endif -#ifdef PCIDMA -static int ni_gpct_cmdtest(struct comedi_device *dev, - struct comedi_subdevice *s, struct comedi_cmd *cmd) -{ - struct ni_gpct *counter = s->private; - - return ni_tio_cmdtest(counter, cmd); - return -ENOTSUPP; + return ni_tio_cmd(dev, s); } #endif @@ -5330,9 +5123,8 @@ static int ni_config_filter(struct comedi_device *dev, unsigned pfi_channel, struct ni_private *devpriv __maybe_unused = dev->private; unsigned bits; - if ((board->reg_type & ni_reg_m_series_mask) == 0) { + if ((board->reg_type & ni_reg_m_series_mask) == 0) return -ENOTSUPP; - } bits = ni_readl(M_Offset_PFI_Filter); bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel); bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter); @@ -5413,9 +5205,8 @@ static void ni_rtsi_init(struct comedi_device *dev) /* Set clock mode to internal */ devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit; - if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) { + if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) printk("ni_set_master_clock failed, bug?"); - } /* default internal lines routing to RTSI bus lines */ devpriv->rtsi_trig_a_output_reg = RTSI_Trig_Output_Bits(0, @@ -5598,9 +5389,8 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev, devpriv->clock_source = source; /* it seems to typically take a few hundred microseconds for PLL to lock */ for (i = 0; i < timeout; ++i) { - if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) { + if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) break; - } udelay(1); } if (i == timeout) { @@ -5822,13 +5612,11 @@ static int cs5529_wait_for_idle(struct comedi_device *dev) for (i = 0; i < timeout; i++) { status = ni_ao_win_inw(dev, CAL_ADC_Status_67xx); - if ((status & CSS_ADC_BUSY) == 0) { + if ((status & CSS_ADC_BUSY) == 0) break; - } set_current_state(TASK_INTERRUPTIBLE); - if (schedule_timeout(1)) { + if (schedule_timeout(1)) return -EIO; - } } /* printk("looped %i times waiting for idle\n", i); */ if (i == timeout) { @@ -5854,9 +5642,8 @@ static void cs5529_command(struct comedi_device *dev, unsigned short value) udelay(1); } /* printk("looped %i times writing command to cs5529\n", i); */ - if (i == timeout) { + if (i == timeout) comedi_error(dev, "possible problem - never saw adc go busy?"); - } } /* write to cs5529 register */ @@ -5873,25 +5660,6 @@ static void cs5529_config_write(struct comedi_device *dev, unsigned int value, comedi_error(dev, "time or signal in cs5529_config_write()"); } -#ifdef NI_CS5529_DEBUG -/* read from cs5529 register */ -static unsigned int cs5529_config_read(struct comedi_device *dev, - unsigned int reg_select_bits) -{ - unsigned int value; - - reg_select_bits &= CSCMD_REGISTER_SELECT_MASK; - cs5529_command(dev, CSCMD_COMMAND | CSCMD_READ | reg_select_bits); - if (cs5529_wait_for_idle(dev)) - comedi_error(dev, "timeout or signal in cs5529_config_read()"); - value = (ni_ao_win_inw(dev, - CAL_ADC_Config_Data_High_Word_67xx) << 16) & - 0xff0000; - value |= ni_ao_win_inw(dev, CAL_ADC_Config_Data_Low_Word_67xx) & 0xffff; - return value; -} -#endif - static int cs5529_do_conversion(struct comedi_device *dev, unsigned short *data) { int retval; @@ -5967,13 +5735,6 @@ static int init_cs5529(struct comedi_device *dev) CSCMD_CONFIG_REGISTER); if (cs5529_wait_for_idle(dev)) comedi_error(dev, "timeout or signal in init_cs5529()\n"); -#endif -#ifdef NI_CS5529_DEBUG - printk("config: 0x%x\n", cs5529_config_read(dev, - CSCMD_CONFIG_REGISTER)); - printk("gain: 0x%x\n", cs5529_config_read(dev, CSCMD_GAIN_REGISTER)); - printk("offset: 0x%x\n", cs5529_config_read(dev, - CSCMD_OFFSET_REGISTER)); #endif return 0; } diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c index 229a273f201683..de421486b7585b 100644 --- a/drivers/staging/comedi/drivers/ni_mio_cs.c +++ b/drivers/staging/comedi/drivers/ni_mio_cs.c @@ -47,8 +47,6 @@ See the notes in the ni_atmio.o driver. #include #include -#undef DEBUG - #define ATMIO 1 #undef PCIMIO diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c index e3a8fa96d9b398..30c46a3c1767db 100644 --- a/drivers/staging/comedi/drivers/ni_pcidio.c +++ b/drivers/staging/comedi/drivers/ni_pcidio.c @@ -47,8 +47,6 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org */ #define USE_DMA -/* #define DEBUG 1 */ -/* #define DEBUG_FLAGS */ #include #include @@ -60,13 +58,6 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org #include "comedi_fc.h" #include "mite.h" -#undef DPRINTK -#ifdef DEBUG -#define DPRINTK(format, args...) pr_debug(format, ## args) -#else -#define DPRINTK(format, args...) do { } while (0) -#endif - #define PCI_DIO_SIZE 4096 #define PCI_MITE_SIZE 4096 @@ -319,14 +310,6 @@ static int ni_pcidio_ns_to_timer(int *nanosec, int round_mode); static int setup_mite_dma(struct comedi_device *dev, struct comedi_subdevice *s); -#ifdef DEBUG_FLAGS -static void ni_pcidio_print_flags(unsigned int flags); -static void ni_pcidio_print_status(unsigned int status); -#else -#define ni_pcidio_print_flags(x) -#define ni_pcidio_print_status(x) -#endif - static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev) { struct nidio96_private *devpriv = dev->private; @@ -401,7 +384,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct nidio96_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct mite_struct *mite = devpriv->mite; @@ -427,19 +410,10 @@ static irqreturn_t nidio_interrupt(int irq, void *d) Interrupt_And_Window_Status); flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags); - DPRINTK("ni_pcidio_interrupt: status=0x%02x,flags=0x%02x\n", - status, flags); - ni_pcidio_print_flags(flags); - ni_pcidio_print_status(status); - spin_lock(&devpriv->mite_channel_lock); if (devpriv->di_mite_chan) m_status = mite_get_status(devpriv->di_mite_chan); -#ifdef MITE_DEBUG - mite_print_chsr(m_status); -#endif - /* mite_dump_regs(mite); */ if (m_status & CHSR_INT) { if (m_status & CHSR_LINKC) { writel(CHOR_CLRLC, @@ -450,7 +424,8 @@ static irqreturn_t nidio_interrupt(int irq, void *d) } if (m_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_DRDY | CHSR_DRQ1 | CHSR_MRDY)) { - DPRINTK("unknown mite interrupt, disabling IRQ\n"); + dev_dbg(dev->class_dev, + "unknown mite interrupt, disabling IRQ\n"); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; disable_irq(dev->irq); } @@ -460,7 +435,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d) while (status & DataLeft) { work++; if (work > 20) { - DPRINTK("too much work in interrupt\n"); + dev_dbg(dev->class_dev, "too much work in interrupt\n"); writeb(0x00, devpriv->mite->daq_io_addr + Master_DMA_And_Interrupt_Control); @@ -470,11 +445,11 @@ static irqreturn_t nidio_interrupt(int irq, void *d) flags &= IntEn; if (flags & TransferReady) { - /* DPRINTK("TransferReady\n"); */ while (flags & TransferReady) { work++; if (work > 100) { - DPRINTK("too much work in interrupt\n"); + dev_dbg(dev->class_dev, + "too much work in interrupt\n"); writeb(0x00, devpriv->mite->daq_io_addr + Master_DMA_And_Interrupt_Control @@ -488,21 +463,13 @@ static irqreturn_t nidio_interrupt(int irq, void *d) data2 = (auxdata & 0xffff0000) >> 16; comedi_buf_put(async, data1); comedi_buf_put(async, data2); - /* DPRINTK("read:%d, %d\n",data1,data2); */ flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags); } - /* DPRINTK("buf_int_count: %d\n", - async->buf_int_count); */ - /* DPRINTK("1) IntEn=%d,flags=%d,status=%d\n", - IntEn,flags,status); */ - /* ni_pcidio_print_flags(flags); */ - /* ni_pcidio_print_status(status); */ async->events |= COMEDI_CB_BLOCK; } if (flags & CountExpired) { - DPRINTK("CountExpired\n"); writeb(ClearExpired, devpriv->mite->daq_io_addr + Group_1_Second_Clear); @@ -511,41 +478,26 @@ static irqreturn_t nidio_interrupt(int irq, void *d) writeb(0x00, devpriv->mite->daq_io_addr + OpMode); break; } else if (flags & Waited) { - DPRINTK("Waited\n"); writeb(ClearWaited, devpriv->mite->daq_io_addr + Group_1_First_Clear); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; break; } else if (flags & PrimaryTC) { - DPRINTK("PrimaryTC\n"); writeb(ClearPrimaryTC, devpriv->mite->daq_io_addr + Group_1_First_Clear); async->events |= COMEDI_CB_EOA; } else if (flags & SecondaryTC) { - DPRINTK("SecondaryTC\n"); writeb(ClearSecondaryTC, devpriv->mite->daq_io_addr + Group_1_First_Clear); async->events |= COMEDI_CB_EOA; } -#if 0 - else { - DPRINTK("ni_pcidio: unknown interrupt\n"); - async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; - writeb(0x00, - devpriv->mite->daq_io_addr + - Master_DMA_And_Interrupt_Control); - } -#endif + flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags); status = readb(devpriv->mite->daq_io_addr + Interrupt_And_Window_Status); - /* DPRINTK("loop end: IntEn=0x%02x,flags=0x%02x," - "status=0x%02x\n", IntEn, flags, status); */ - /* ni_pcidio_print_flags(flags); */ - /* ni_pcidio_print_status(status); */ } out: @@ -562,82 +514,6 @@ static irqreturn_t nidio_interrupt(int irq, void *d) return IRQ_HANDLED; } -#ifdef DEBUG_FLAGS -static const char *bit_set_string(unsigned int bits, unsigned int bit, - const char *const strings[]) -{ - return (bits & (1U << bit)) ? strings[bit] : ""; -} - -static const char *const flags_strings[] = { - " TransferReady", " CountExpired", " 2", " 3", - " 4", " Waited", " PrimaryTC", " SecondaryTC", -}; - - -static void ni_pcidio_print_flags(unsigned int flags) -{ - pr_debug("group_1_flags:%s%s%s%s%s%s%s%s\n", - bit_set_string(flags, 7, flags_strings), - bit_set_string(flags, 6, flags_strings), - bit_set_string(flags, 5, flags_strings), - bit_set_string(flags, 4, flags_strings), - bit_set_string(flags, 3, flags_strings), - bit_set_string(flags, 2, flags_strings), - bit_set_string(flags, 1, flags_strings), - bit_set_string(flags, 0, flags_strings)); -} - -static const char *const status_strings[] = { - " DataLeft1", " Reserved1", " Req1", " StopTrig1", - " DataLeft2", " Reserved2", " Req2", " StopTrig2", -}; - -static void ni_pcidio_print_status(unsigned int flags) -{ - pr_debug("group_status:%s%s%s%s%s%s%s%s\n", - bit_set_string(flags, 7, status_strings), - bit_set_string(flags, 6, status_strings), - bit_set_string(flags, 5, status_strings), - bit_set_string(flags, 4, status_strings), - bit_set_string(flags, 3, status_strings), - bit_set_string(flags, 2, status_strings), - bit_set_string(flags, 1, status_strings), - bit_set_string(flags, 0, status_strings)); -} -#endif - -#ifdef unused -static void debug_int(struct comedi_device *dev) -{ - struct nidio96_private *devpriv = dev->private; - int a, b; - static int n_int; - struct timeval tv; - - do_gettimeofday(&tv); - a = readb(devpriv->mite->daq_io_addr + Group_Status); - b = readb(devpriv->mite->daq_io_addr + Group_1_Flags); - - if (n_int < 10) { - DPRINTK("status 0x%02x flags 0x%02x time %06d\n", a, b, - (int)tv.tv_usec); - } - - while (b & 1) { - writew(0xff, devpriv->mite->daq_io_addr + Group_1_FIFO); - b = readb(devpriv->mite->daq_io_addr + Group_1_Flags); - } - - b = readb(devpriv->mite->daq_io_addr + Group_1_Flags); - - if (n_int < 10) { - DPRINTK("new status 0x%02x\n", b); - n_int++; - } -} -#endif - static int ni_pcidio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, @@ -883,7 +759,6 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) s->async->inttrig = ni_pcidio_inttrig; } - DPRINTK("ni_pcidio: command started\n"); return 0; } @@ -1074,6 +949,19 @@ static int pci_6534_upload_firmware(struct comedi_device *dev) return ret; } +static void nidio_reset_board(struct comedi_device *dev) +{ + struct nidio96_private *devpriv = dev->private; + void __iomem *daq_mmio = devpriv->mite->daq_io_addr; + + writel(0, daq_mmio + Port_IO(0)); + writel(0, daq_mmio + Port_Pin_Directions(0)); + writel(0, daq_mmio + Port_Pin_Mask(0)); + + /* disable interrupts on board */ + writeb(0, daq_mmio + Master_DMA_And_Interrupt_Control); +} + static int nidio_auto_attach(struct comedi_device *dev, unsigned long context) { @@ -1115,13 +1003,14 @@ static int nidio_auto_attach(struct comedi_device *dev, if (devpriv->di_mite_ring == NULL) return -ENOMEM; - irq = mite_irq(devpriv->mite); if (board->uses_firmware) { ret = pci_6534_upload_firmware(dev); if (ret < 0) return ret; } + nidio_reset_board(dev); + ret = comedi_alloc_subdevices(dev, 1); if (ret) return ret; @@ -1149,21 +1038,13 @@ static int nidio_auto_attach(struct comedi_device *dev, s->async_dma_dir = DMA_BIDIRECTIONAL; s->poll = &ni_pcidio_poll; - writel(0, devpriv->mite->daq_io_addr + Port_IO(0)); - writel(0, devpriv->mite->daq_io_addr + Port_Pin_Directions(0)); - writel(0, devpriv->mite->daq_io_addr + Port_Pin_Mask(0)); - - /* disable interrupts on board */ - writeb(0x00, - devpriv->mite->daq_io_addr + - Master_DMA_And_Interrupt_Control); - - ret = request_irq(irq, nidio_interrupt, IRQF_SHARED, - "ni_pcidio", dev); - if (ret < 0) - dev_warn(dev->class_dev, "irq not available\n"); - - dev->irq = irq; + irq = mite_irq(devpriv->mite); + if (irq) { + ret = request_irq(irq, nidio_interrupt, IRQF_SHARED, + dev->board_name, dev); + if (ret == 0) + dev->irq = irq; + } return 0; } @@ -1200,7 +1081,7 @@ static int ni_pcidio_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &ni_pcidio_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(ni_pcidio_pci_table) = { +static const struct pci_device_id ni_pcidio_pci_table[] = { { PCI_VDEVICE(NI, 0x1150), BOARD_PCIDIO_32HS }, { PCI_VDEVICE(NI, 0x12b0), BOARD_PCI6534 }, { PCI_VDEVICE(NI, 0x1320), BOARD_PXI6533 }, diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c index 536be83af54949..0ed9804558753a 100644 --- a/drivers/staging/comedi/drivers/ni_pcimio.c +++ b/drivers/staging/comedi/drivers/ni_pcimio.c @@ -116,8 +116,6 @@ SCXI is probably broken for m-series boards. #include "ni_stc.h" #include "mite.h" -/* #define PCI_DEBUG */ - #define PCIDMA #define PCIMIO 1 @@ -134,24 +132,26 @@ SCXI is probably broken for m-series boards. 63 different possibilities. An AO channel can not act as it's own OFFSET or REFERENCE. */ -static const struct comedi_lrange range_ni_M_628x_ao = { 8, { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE(-2, 2), - RANGE(-1, 1), - RANGE(-5, 15), - RANGE(0, 10), - RANGE(3, 7), - RANGE(4, 6), - RANGE_ext(-1, 1) - } +static const struct comedi_lrange range_ni_M_628x_ao = { + 8, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2), + BIP_RANGE(1), + RANGE(-5, 15), + UNI_RANGE(10), + RANGE(3, 7), + RANGE(4, 6), + RANGE_ext(-1, 1) + } }; -static const struct comedi_lrange range_ni_M_625x_ao = { 3, { - RANGE(-10, 10), - RANGE(-5, 5), - RANGE_ext(-1, 1) - } +static const struct comedi_lrange range_ni_M_625x_ao = { + 3, { + BIP_RANGE(10), + BIP_RANGE(5), + RANGE_ext(-1, 1) + } }; enum ni_pcimio_boardid { @@ -1178,9 +1178,9 @@ static void m_series_stc_writew(struct comedi_device *dev, uint16_t data, offset = M_Offset_AO_FIFO_Clear; break; case DIO_Control_Register: - printk - ("%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n", - __func__, reg); + dev_dbg(dev->class_dev, + "%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n", + __func__, reg); return; break; case G_Autoincrement_Register(0): @@ -1471,6 +1471,7 @@ static int pcimio_auto_attach(struct comedi_device *dev, struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct ni_board_struct *board = NULL; struct ni_private *devpriv; + unsigned int irq; int ret; if (context < ARRAY_SIZE(ni_boards)) @@ -1532,18 +1533,12 @@ static int pcimio_auto_attach(struct comedi_device *dev, if (board->reg_type == ni_reg_6143) init_6143(dev); - dev->irq = mite_irq(devpriv->mite); - - if (dev->irq == 0) { - pr_warn("unknown irq (bad)\n"); - } else { - pr_debug("( irq = %u )\n", dev->irq); - ret = request_irq(dev->irq, ni_E_interrupt, NI_E_IRQ_FLAGS, - DRV_NAME, dev); - if (ret < 0) { - pr_warn("irq not available\n"); - dev->irq = 0; - } + irq = mite_irq(devpriv->mite); + if (irq) { + ret = request_irq(irq, ni_E_interrupt, NI_E_IRQ_FLAGS, + dev->board_name, dev); + if (ret == 0) + dev->irq = irq; } ret = ni_E_init(dev); @@ -1639,7 +1634,7 @@ static int ni_pcimio_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &ni_pcimio_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(ni_pcimio_pci_table) = { +static const struct pci_device_id ni_pcimio_pci_table[] = { { PCI_VDEVICE(NI, 0x0162), BOARD_PCIMIO_16XE_50 }, /* 0x1620? */ { PCI_VDEVICE(NI, 0x1170), BOARD_PCIMIO_16XE_10 }, { PCI_VDEVICE(NI, 0x1180), BOARD_PCIMIO_16E_1 }, diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c index 9b120c77d83a86..92691b491c2477 100644 --- a/drivers/staging/comedi/drivers/ni_tio.c +++ b/drivers/staging/comedi/drivers/ni_tio.c @@ -53,10 +53,6 @@ static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter, unsigned generic_clock_source); static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter); -MODULE_AUTHOR("Comedi "); -MODULE_DESCRIPTION("Comedi support for NI general-purpose counters"); -MODULE_LICENSE("GPL"); - static inline enum Gi_Counting_Mode_Reg_Bits Gi_Alternate_Sync_Bit(enum ni_gpct_variant variant) @@ -277,19 +273,6 @@ static inline unsigned NI_660x_RTSI_Second_Gate_Select(unsigned n) static const unsigned int counter_status_mask = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING; -static int __init ni_tio_init_module(void) -{ - return 0; -} - -module_init(ni_tio_init_module); - -static void __exit ni_tio_cleanup_module(void) -{ -} - -module_exit(ni_tio_cleanup_module); - struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device *dev, void (*write_register) (struct ni_gpct @@ -362,74 +345,64 @@ static int ni_tio_second_gate_registers_present(const struct ni_gpct_device static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter) { - write_register(counter, Gi_Reset_Bit(counter->counter_index), - NITIO_Gxx_Joint_Reset_Reg(counter->counter_index)); + unsigned cidx = counter->counter_index; + + write_register(counter, Gi_Reset_Bit(cidx), NITIO_RESET_REG(cidx)); } void ni_tio_init_counter(struct ni_gpct *counter) { struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned cidx = counter->counter_index; ni_tio_reset_count_and_disarm(counter); + /* initialize counter registers */ - counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)] = - 0x0; - write_register(counter, - counter_dev-> - regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)], - NITIO_Gi_Autoincrement_Reg(counter->counter_index)); - ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), + counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0; + write_register(counter, counter_dev->regs[NITIO_AUTO_INC_REG(cidx)], + NITIO_AUTO_INC_REG(cidx)); + + ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), ~0, Gi_Synchronize_Gate_Bit); - ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), ~0, - 0); - counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = 0x0; - write_register(counter, - counter_dev-> - regs[NITIO_Gi_LoadA_Reg(counter->counter_index)], - NITIO_Gi_LoadA_Reg(counter->counter_index)); - counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = 0x0; - write_register(counter, - counter_dev-> - regs[NITIO_Gi_LoadB_Reg(counter->counter_index)], - NITIO_Gi_LoadB_Reg(counter->counter_index)); - ni_tio_set_bits(counter, - NITIO_Gi_Input_Select_Reg(counter->counter_index), ~0, - 0); - if (ni_tio_counting_mode_registers_present(counter_dev)) { - ni_tio_set_bits(counter, - NITIO_Gi_Counting_Mode_Reg(counter-> - counter_index), ~0, - 0); - } + + ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0); + + counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0; + write_register(counter, counter_dev->regs[NITIO_LOADA_REG(cidx)], + NITIO_LOADA_REG(cidx)); + + counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0; + write_register(counter, counter_dev->regs[NITIO_LOADB_REG(cidx)], + NITIO_LOADB_REG(cidx)); + + ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0); + + if (ni_tio_counting_mode_registers_present(counter_dev)) + ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), ~0, 0); + if (ni_tio_second_gate_registers_present(counter_dev)) { - counter_dev-> - regs[NITIO_Gi_Second_Gate_Reg(counter->counter_index)] = - 0x0; + counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0; write_register(counter, - counter_dev-> - regs[NITIO_Gi_Second_Gate_Reg - (counter->counter_index)], - NITIO_Gi_Second_Gate_Reg(counter-> - counter_index)); + counter_dev->regs[NITIO_GATE2_REG(cidx)], + NITIO_GATE2_REG(cidx)); } - ni_tio_set_bits(counter, - NITIO_Gi_DMA_Config_Reg(counter->counter_index), ~0, - 0x0); - ni_tio_set_bits(counter, - NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), - ~0, 0x0); + + ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0); + + ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx), ~0, 0x0); } EXPORT_SYMBOL_GPL(ni_tio_init_counter); static unsigned int ni_tio_counter_status(struct ni_gpct *counter) { - unsigned int status = 0; + unsigned cidx = counter->counter_index; const unsigned bits = read_register(counter, - NITIO_Gxx_Status_Reg(counter-> - counter_index)); - if (bits & Gi_Armed_Bit(counter->counter_index)) { + NITIO_SHARED_STATUS_REG(cidx)); + unsigned int status = 0; + + if (bits & Gi_Armed_Bit(cidx)) { status |= COMEDI_COUNTER_ARMED; - if (bits & Gi_Counting_Bit(counter->counter_index)) + if (bits & Gi_Counting_Bit(cidx)) status |= COMEDI_COUNTER_COUNTING; } return status; @@ -438,8 +411,8 @@ static unsigned int ni_tio_counter_status(struct ni_gpct *counter) static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync) { struct ni_gpct_device *counter_dev = counter->counter_dev; - const unsigned counting_mode_reg = - NITIO_Gi_Counting_Mode_Reg(counter->counter_index); + unsigned cidx = counter->counter_index; + const unsigned counting_mode_reg = NITIO_CNT_MODE_REG(cidx); static const uint64_t min_normal_sync_period_ps = 25000; const uint64_t clock_period_ps = ni_tio_clock_period_ps(counter, ni_tio_generic_clock_src_select @@ -476,6 +449,7 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync) static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode) { struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned cidx = counter->counter_index; unsigned mode_reg_mask; unsigned mode_reg_values; unsigned input_select_bits = 0; @@ -502,7 +476,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode) default: break; } - ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), mode_reg_mask, mode_reg_values); if (ni_tio_counting_mode_registers_present(counter_dev)) { @@ -515,15 +489,13 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode) Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask; if (mode & NI_GPCT_INDEX_ENABLE_BIT) counting_mode_bits |= Gi_Index_Mode_Bit; - ni_tio_set_bits(counter, - NITIO_Gi_Counting_Mode_Reg(counter-> - counter_index), + ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), Gi_Counting_Mode_Mask | Gi_Index_Phase_Mask | Gi_Index_Mode_Bit, counting_mode_bits); ni_tio_set_sync_mode(counter, 0); } - ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), Gi_Up_Down_Mask, (mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) << Gi_Up_Down_Shift); @@ -532,8 +504,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode) input_select_bits |= Gi_Or_Gate_Bit; if (mode & NI_GPCT_INVERT_OUTPUT_BIT) input_select_bits |= Gi_Output_Polarity_Bit; - ni_tio_set_bits(counter, - NITIO_Gi_Input_Select_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit | Gi_Output_Polarity_Bit, input_select_bits); @@ -543,7 +514,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode) int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger) { struct ni_gpct_device *counter_dev = counter->counter_dev; - + unsigned cidx = counter->counter_index; unsigned command_transient_bits = 0; if (arm) { @@ -581,9 +552,7 @@ int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger) } break; } - ni_tio_set_bits(counter, - NITIO_Gi_Counting_Mode_Reg - (counter->counter_index), + ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), Gi_HW_Arm_Select_Mask (counter_dev->variant) | Gi_HW_Arm_Enable_Bit, @@ -592,8 +561,7 @@ int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger) } else { command_transient_bits |= Gi_Disarm_Bit; } - ni_tio_set_bits_transient(counter, - NITIO_Gi_Command_Reg(counter->counter_index), + ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx), 0, 0, command_transient_bits); return 0; } @@ -717,8 +685,8 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter, unsigned int clock_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; - const unsigned second_gate_reg = - NITIO_Gi_Second_Gate_Reg(counter->counter_index); + unsigned cidx = counter->counter_index; + const unsigned second_gate_reg = NITIO_GATE2_REG(cidx); if (counter_dev->variant != ni_gpct_variant_m_series) return; @@ -747,6 +715,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter, unsigned int period_ns) { struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned cidx = counter->counter_index; unsigned input_select_bits = 0; static const uint64_t pico_per_nano = 1000; @@ -766,8 +735,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter, } if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT) input_select_bits |= Gi_Source_Polarity_Bit; - ni_tio_set_bits(counter, - NITIO_Gi_Input_Select_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), Gi_Source_Select_Mask | Gi_Source_Polarity_Bit, input_select_bits); ni_tio_set_source_subselect(counter, clock_source); @@ -791,9 +759,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter, return -EINVAL; break; } - ni_tio_set_bits(counter, - NITIO_Gi_Counting_Mode_Reg(counter-> - counter_index), + ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), Gi_Prescale_X2_Bit(counter_dev->variant) | Gi_Prescale_X8_Bit(counter_dev->variant), counting_mode_bits); @@ -806,15 +772,12 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter, static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter) { struct ni_gpct_device *counter_dev = counter->counter_dev; - const unsigned counting_mode_bits = ni_tio_get_soft_copy(counter, - NITIO_Gi_Counting_Mode_Reg - (counter-> - counter_index)); + unsigned cidx = counter->counter_index; + const unsigned counting_mode_bits = + ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx)); unsigned bits = 0; - if (ni_tio_get_soft_copy(counter, - NITIO_Gi_Input_Select_Reg - (counter->counter_index)) & + if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) & Gi_Source_Polarity_Bit) bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT; if (counting_mode_bits & Gi_Prescale_X2_Bit(counter_dev->variant)) @@ -827,15 +790,13 @@ static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter) static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter) { struct ni_gpct_device *counter_dev = counter->counter_dev; - const unsigned second_gate_reg = - NITIO_Gi_Second_Gate_Reg(counter->counter_index); + unsigned cidx = counter->counter_index; + const unsigned second_gate_reg = NITIO_GATE2_REG(cidx); unsigned clock_source = 0; unsigned i; - const unsigned input_select = (ni_tio_get_soft_copy(counter, - NITIO_Gi_Input_Select_Reg - (counter->counter_index)) - & Gi_Source_Select_Mask) >> - Gi_Source_Select_Shift; + const unsigned input_select = + (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) & + Gi_Source_Select_Mask) >> Gi_Source_Select_Shift; switch (input_select) { case NI_M_Series_Timebase_1_Clock: @@ -895,12 +856,11 @@ static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter) static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter) { unsigned clock_source = 0; + unsigned cidx = counter->counter_index; + const unsigned input_select = + (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) & + Gi_Source_Select_Mask) >> Gi_Source_Select_Shift; unsigned i; - const unsigned input_select = (ni_tio_get_soft_copy(counter, - NITIO_Gi_Input_Select_Reg - (counter->counter_index)) - & Gi_Source_Select_Mask) >> - Gi_Source_Select_Shift; switch (input_select) { case NI_660x_Timebase_1_Clock: @@ -1022,6 +982,7 @@ static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter, unsigned int gate_source) { const unsigned mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask; + unsigned cidx = counter->counter_index; unsigned mode_values = 0; if (gate_source & CR_INVERT) @@ -1030,7 +991,7 @@ static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter, mode_values |= Gi_Rising_Edge_Gating_Bits; else mode_values |= Gi_Level_Gating_Bits; - ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), mode_mask, mode_values); } @@ -1038,6 +999,7 @@ static int ni_660x_set_first_gate(struct ni_gpct *counter, unsigned int gate_source) { const unsigned selected_gate = CR_CHAN(gate_source); + unsigned cidx = counter->counter_index; /* bits of selected_gate that may be meaningful to input select register */ const unsigned selected_gate_mask = 0x1f; unsigned ni_660x_gate_select; @@ -1075,8 +1037,7 @@ static int ni_660x_set_first_gate(struct ni_gpct *counter, return -EINVAL; break; } - ni_tio_set_bits(counter, - NITIO_Gi_Input_Select_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), Gi_Gate_Select_Mask, Gi_Gate_Select_Bits(ni_660x_gate_select)); return 0; @@ -1086,6 +1047,7 @@ static int ni_m_series_set_first_gate(struct ni_gpct *counter, unsigned int gate_source) { const unsigned selected_gate = CR_CHAN(gate_source); + unsigned cidx = counter->counter_index; /* bits of selected_gate that may be meaningful to input select register */ const unsigned selected_gate_mask = 0x1f; unsigned ni_m_series_gate_select; @@ -1124,8 +1086,7 @@ static int ni_m_series_set_first_gate(struct ni_gpct *counter, return -EINVAL; break; } - ni_tio_set_bits(counter, - NITIO_Gi_Input_Select_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), Gi_Gate_Select_Mask, Gi_Gate_Select_Bits(ni_m_series_gate_select)); return 0; @@ -1135,8 +1096,8 @@ static int ni_660x_set_second_gate(struct ni_gpct *counter, unsigned int gate_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; - const unsigned second_gate_reg = - NITIO_Gi_Second_Gate_Reg(counter->counter_index); + unsigned cidx = counter->counter_index; + const unsigned second_gate_reg = NITIO_GATE2_REG(cidx); const unsigned selected_second_gate = CR_CHAN(gate_source); /* bits of second_gate that may be meaningful to second gate register */ static const unsigned selected_second_gate_mask = 0x1f; @@ -1194,8 +1155,8 @@ static int ni_m_series_set_second_gate(struct ni_gpct *counter, unsigned int gate_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; - const unsigned second_gate_reg = - NITIO_Gi_Second_Gate_Reg(counter->counter_index); + unsigned cidx = counter->counter_index; + const unsigned second_gate_reg = NITIO_GATE2_REG(cidx); const unsigned selected_second_gate = CR_CHAN(gate_source); /* bits of second_gate that may be meaningful to second gate register */ static const unsigned selected_second_gate_mask = 0x1f; @@ -1222,15 +1183,13 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index, unsigned int gate_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; - const unsigned second_gate_reg = - NITIO_Gi_Second_Gate_Reg(counter->counter_index); + unsigned cidx = counter->counter_index; + const unsigned second_gate_reg = NITIO_GATE2_REG(cidx); switch (gate_index) { case 0: if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) { - ni_tio_set_bits(counter, - NITIO_Gi_Mode_Reg(counter-> - counter_index), + ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), Gi_Gating_Mode_Mask, Gi_Gating_Disabled_Bits); return 0; @@ -1292,11 +1251,12 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index, unsigned int source) { struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned cidx = counter->counter_index; if (counter_dev->variant == ni_gpct_variant_m_series) { unsigned int abz_reg, shift, mask; - abz_reg = NITIO_Gi_ABZ_Reg(counter->counter_index); + abz_reg = NITIO_ABZ_REG(cidx); switch (index) { case NI_GPCT_SOURCE_ENCODER_A: shift = 10; @@ -1319,7 +1279,6 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index, counter_dev->regs[abz_reg] &= ~mask; counter_dev->regs[abz_reg] |= (source << shift) & mask; write_register(counter, counter_dev->regs[abz_reg], abz_reg); -/* printk("%s %x %d %d\n", __func__, counter_dev->regs[abz_reg], index, source); */ return 0; } return -EINVAL; @@ -1491,12 +1450,10 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index, unsigned int *gate_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; - const unsigned mode_bits = ni_tio_get_soft_copy(counter, - NITIO_Gi_Mode_Reg - (counter-> - counter_index)); - const unsigned second_gate_reg = - NITIO_Gi_Second_Gate_Reg(counter->counter_index); + unsigned cidx = counter->counter_index; + const unsigned mode_bits = + ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)); + const unsigned second_gate_reg = NITIO_GATE2_REG(cidx); unsigned gate_select_bits; switch (gate_index) { @@ -1508,8 +1465,7 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index, } else { gate_select_bits = (ni_tio_get_soft_copy(counter, - NITIO_Gi_Input_Select_Reg - (counter->counter_index)) & + NITIO_INPUT_SEL_REG(cidx)) & Gi_Gate_Select_Mask) >> Gi_Gate_Select_Shift; } switch (counter_dev->variant) { @@ -1577,9 +1533,13 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index, return 0; } -int ni_tio_insn_config(struct ni_gpct *counter, - struct comedi_insn *insn, unsigned int *data) +int ni_tio_insn_config(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, + unsigned int *data) { + struct ni_gpct *counter = s->private; + switch (data[0]) { case INSN_CONFIG_SET_COUNTER_MODE: return ni_tio_set_counter_mode(counter, data[1]); @@ -1623,11 +1583,15 @@ int ni_tio_insn_config(struct ni_gpct *counter, } EXPORT_SYMBOL_GPL(ni_tio_insn_config); -int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn, - unsigned int *data) +int ni_tio_insn_read(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, + unsigned int *data) { + struct ni_gpct *counter = s->private; struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned channel = CR_CHAN(insn->chanspec); + unsigned cidx = counter->counter_index; unsigned first_read; unsigned second_read; unsigned correct_read; @@ -1636,65 +1600,57 @@ int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn, return 0; switch (channel) { case 0: - ni_tio_set_bits(counter, - NITIO_Gi_Command_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), Gi_Save_Trace_Bit, 0); - ni_tio_set_bits(counter, - NITIO_Gi_Command_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), Gi_Save_Trace_Bit, Gi_Save_Trace_Bit); /* The count doesn't get latched until the next clock edge, so it is possible the count may change (once) while we are reading. Since the read of the SW_Save_Reg isn't atomic (apparently even when it's a 32 bit register according to 660x docs), we need to read twice and make sure the reading hasn't changed. If it has, a third read will be correct since the count value will definitely have latched by then. */ - first_read = - read_register(counter, - NITIO_Gi_SW_Save_Reg(counter->counter_index)); - second_read = - read_register(counter, - NITIO_Gi_SW_Save_Reg(counter->counter_index)); + first_read = read_register(counter, NITIO_SW_SAVE_REG(cidx)); + second_read = read_register(counter, NITIO_SW_SAVE_REG(cidx)); if (first_read != second_read) correct_read = - read_register(counter, - NITIO_Gi_SW_Save_Reg(counter-> - counter_index)); + read_register(counter, NITIO_SW_SAVE_REG(cidx)); else correct_read = first_read; data[0] = correct_read; return 0; break; case 1: - data[0] = - counter_dev-> - regs[NITIO_Gi_LoadA_Reg(counter->counter_index)]; + data[0] = counter_dev->regs[NITIO_LOADA_REG(cidx)]; break; case 2: - data[0] = - counter_dev-> - regs[NITIO_Gi_LoadB_Reg(counter->counter_index)]; + data[0] = counter_dev->regs[NITIO_LOADB_REG(cidx)]; break; } return 0; } -EXPORT_SYMBOL_GPL(ni_tio_rinsn); +EXPORT_SYMBOL_GPL(ni_tio_insn_read); static unsigned ni_tio_next_load_register(struct ni_gpct *counter) { - const unsigned bits = read_register(counter, - NITIO_Gxx_Status_Reg(counter-> - counter_index)); + unsigned cidx = counter->counter_index; + const unsigned bits = + read_register(counter, NITIO_SHARED_STATUS_REG(cidx)); - if (bits & Gi_Next_Load_Source_Bit(counter->counter_index)) - return NITIO_Gi_LoadB_Reg(counter->counter_index); + if (bits & Gi_Next_Load_Source_Bit(cidx)) + return NITIO_LOADB_REG(cidx); else - return NITIO_Gi_LoadA_Reg(counter->counter_index); + return NITIO_LOADA_REG(cidx); } -int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn, - unsigned int *data) +int ni_tio_insn_write(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, + unsigned int *data) { + struct ni_gpct *counter = s->private; struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned channel = CR_CHAN(insn->chanspec); + unsigned cidx = counter->counter_index; unsigned load_reg; if (insn->n < 1) @@ -1705,24 +1661,18 @@ int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn, /* Don't disturb load source select, just use whichever load register is already selected. */ load_reg = ni_tio_next_load_register(counter); write_register(counter, data[0], load_reg); - ni_tio_set_bits_transient(counter, - NITIO_Gi_Command_Reg(counter-> - counter_index), + ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx), 0, 0, Gi_Load_Bit); /* restore state of load reg to whatever the user set last set it to */ write_register(counter, counter_dev->regs[load_reg], load_reg); break; case 1: - counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = - data[0]; - write_register(counter, data[0], - NITIO_Gi_LoadA_Reg(counter->counter_index)); + counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0]; + write_register(counter, data[0], NITIO_LOADA_REG(cidx)); break; case 2: - counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = - data[0]; - write_register(counter, data[0], - NITIO_Gi_LoadB_Reg(counter->counter_index)); + counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0]; + write_register(counter, data[0], NITIO_LOADB_REG(cidx)); break; default: return -EINVAL; @@ -1730,4 +1680,19 @@ int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn, } return 0; } -EXPORT_SYMBOL_GPL(ni_tio_winsn); +EXPORT_SYMBOL_GPL(ni_tio_insn_write); + +static int __init ni_tio_init_module(void) +{ + return 0; +} +module_init(ni_tio_init_module); + +static void __exit ni_tio_cleanup_module(void) +{ +} +module_exit(ni_tio_cleanup_module); + +MODULE_AUTHOR("Comedi "); +MODULE_DESCRIPTION("Comedi support for NI general-purpose counters"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h index 7e13697b32544a..68378dab4e70e3 100644 --- a/drivers/staging/comedi/drivers/ni_tio.h +++ b/drivers/staging/comedi/drivers/ni_tio.h @@ -25,77 +25,77 @@ struct mite_struct; struct ni_gpct_device; enum ni_gpct_register { - NITIO_G0_Autoincrement_Reg, - NITIO_G1_Autoincrement_Reg, - NITIO_G2_Autoincrement_Reg, - NITIO_G3_Autoincrement_Reg, - NITIO_G0_Command_Reg, - NITIO_G1_Command_Reg, - NITIO_G2_Command_Reg, - NITIO_G3_Command_Reg, - NITIO_G0_HW_Save_Reg, - NITIO_G1_HW_Save_Reg, - NITIO_G2_HW_Save_Reg, - NITIO_G3_HW_Save_Reg, - NITIO_G0_SW_Save_Reg, - NITIO_G1_SW_Save_Reg, - NITIO_G2_SW_Save_Reg, - NITIO_G3_SW_Save_Reg, - NITIO_G0_Mode_Reg, - NITIO_G1_Mode_Reg, - NITIO_G2_Mode_Reg, - NITIO_G3_Mode_Reg, - NITIO_G0_LoadA_Reg, - NITIO_G1_LoadA_Reg, - NITIO_G2_LoadA_Reg, - NITIO_G3_LoadA_Reg, - NITIO_G0_LoadB_Reg, - NITIO_G1_LoadB_Reg, - NITIO_G2_LoadB_Reg, - NITIO_G3_LoadB_Reg, - NITIO_G0_Input_Select_Reg, - NITIO_G1_Input_Select_Reg, - NITIO_G2_Input_Select_Reg, - NITIO_G3_Input_Select_Reg, - NITIO_G0_Counting_Mode_Reg, - NITIO_G1_Counting_Mode_Reg, - NITIO_G2_Counting_Mode_Reg, - NITIO_G3_Counting_Mode_Reg, - NITIO_G0_Second_Gate_Reg, - NITIO_G1_Second_Gate_Reg, - NITIO_G2_Second_Gate_Reg, - NITIO_G3_Second_Gate_Reg, - NITIO_G01_Status_Reg, - NITIO_G23_Status_Reg, - NITIO_G01_Joint_Reset_Reg, - NITIO_G23_Joint_Reset_Reg, - NITIO_G01_Joint_Status1_Reg, - NITIO_G23_Joint_Status1_Reg, - NITIO_G01_Joint_Status2_Reg, - NITIO_G23_Joint_Status2_Reg, - NITIO_G0_DMA_Config_Reg, - NITIO_G1_DMA_Config_Reg, - NITIO_G2_DMA_Config_Reg, - NITIO_G3_DMA_Config_Reg, - NITIO_G0_DMA_Status_Reg, - NITIO_G1_DMA_Status_Reg, - NITIO_G2_DMA_Status_Reg, - NITIO_G3_DMA_Status_Reg, - NITIO_G0_ABZ_Reg, - NITIO_G1_ABZ_Reg, - NITIO_G0_Interrupt_Acknowledge_Reg, - NITIO_G1_Interrupt_Acknowledge_Reg, - NITIO_G2_Interrupt_Acknowledge_Reg, - NITIO_G3_Interrupt_Acknowledge_Reg, - NITIO_G0_Status_Reg, - NITIO_G1_Status_Reg, - NITIO_G2_Status_Reg, - NITIO_G3_Status_Reg, - NITIO_G0_Interrupt_Enable_Reg, - NITIO_G1_Interrupt_Enable_Reg, - NITIO_G2_Interrupt_Enable_Reg, - NITIO_G3_Interrupt_Enable_Reg, - NITIO_Num_Registers, + NITIO_G0_AUTO_INC, + NITIO_G1_AUTO_INC, + NITIO_G2_AUTO_INC, + NITIO_G3_AUTO_INC, + NITIO_G0_CMD, + NITIO_G1_CMD, + NITIO_G2_CMD, + NITIO_G3_CMD, + NITIO_G0_HW_SAVE, + NITIO_G1_HW_SAVE, + NITIO_G2_HW_SAVE, + NITIO_G3_HW_SAVE, + NITIO_G0_SW_SAVE, + NITIO_G1_SW_SAVE, + NITIO_G2_SW_SAVE, + NITIO_G3_SW_SAVE, + NITIO_G0_MODE, + NITIO_G1_MODE, + NITIO_G2_MODE, + NITIO_G3_MODE, + NITIO_G0_LOADA, + NITIO_G1_LOADA, + NITIO_G2_LOADA, + NITIO_G3_LOADA, + NITIO_G0_LOADB, + NITIO_G1_LOADB, + NITIO_G2_LOADB, + NITIO_G3_LOADB, + NITIO_G0_INPUT_SEL, + NITIO_G1_INPUT_SEL, + NITIO_G2_INPUT_SEL, + NITIO_G3_INPUT_SEL, + NITIO_G0_CNT_MODE, + NITIO_G1_CNT_MODE, + NITIO_G2_CNT_MODE, + NITIO_G3_CNT_MODE, + NITIO_G0_GATE2, + NITIO_G1_GATE2, + NITIO_G2_GATE2, + NITIO_G3_GATE2, + NITIO_G01_STATUS, + NITIO_G23_STATUS, + NITIO_G01_RESET, + NITIO_G23_RESET, + NITIO_G01_STATUS1, + NITIO_G23_STATUS1, + NITIO_G01_STATUS2, + NITIO_G23_STATUS2, + NITIO_G0_DMA_CFG, + NITIO_G1_DMA_CFG, + NITIO_G2_DMA_CFG, + NITIO_G3_DMA_CFG, + NITIO_G0_DMA_STATUS, + NITIO_G1_DMA_STATUS, + NITIO_G2_DMA_STATUS, + NITIO_G3_DMA_STATUS, + NITIO_G0_ABZ, + NITIO_G1_ABZ, + NITIO_G0_INT_ACK, + NITIO_G1_INT_ACK, + NITIO_G2_INT_ACK, + NITIO_G3_INT_ACK, + NITIO_G0_STATUS, + NITIO_G1_STATUS, + NITIO_G2_STATUS, + NITIO_G3_STATUS, + NITIO_G0_INT_ENA, + NITIO_G1_INT_ENA, + NITIO_G2_INT_ENA, + NITIO_G3_INT_ENA, + NITIO_NUM_REGS, }; enum ni_gpct_variant { @@ -122,48 +122,35 @@ struct ni_gpct_device { enum ni_gpct_variant variant; struct ni_gpct *counters; unsigned num_counters; - unsigned regs[NITIO_Num_Registers]; + unsigned regs[NITIO_NUM_REGS]; spinlock_t regs_lock; }; -extern struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device - *dev, - void (*write_register) - (struct ni_gpct * - counter, unsigned bits, - enum ni_gpct_register - reg), - unsigned (*read_register) - (struct ni_gpct * - counter, - enum ni_gpct_register - reg), - enum ni_gpct_variant - variant, - unsigned num_counters); -extern void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev); -extern void ni_tio_init_counter(struct ni_gpct *counter); -extern int ni_tio_rinsn(struct ni_gpct *counter, - struct comedi_insn *insn, unsigned int *data); -extern int ni_tio_insn_config(struct ni_gpct *counter, - struct comedi_insn *insn, unsigned int *data); -extern int ni_tio_winsn(struct ni_gpct *counter, - struct comedi_insn *insn, unsigned int *data); -extern int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async); -extern int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd); -extern int ni_tio_cancel(struct ni_gpct *counter); -extern void ni_tio_handle_interrupt(struct ni_gpct *counter, - struct comedi_subdevice *s); -extern void ni_tio_set_mite_channel(struct ni_gpct *counter, - struct mite_channel *mite_chan); -extern void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, - int *gate_error, int *tc_error, - int *perm_stale_data, - int *stale_data); - -static inline struct ni_gpct *subdev_to_counter(struct comedi_subdevice *s) -{ - return s->private; -} +struct ni_gpct_device * +ni_gpct_device_construct(struct comedi_device *, + void (*write_register)(struct ni_gpct *, + unsigned bits, + enum ni_gpct_register), + unsigned (*read_register)(struct ni_gpct *, + enum ni_gpct_register), + enum ni_gpct_variant, + unsigned num_counters); +void ni_gpct_device_destroy(struct ni_gpct_device *); +void ni_tio_init_counter(struct ni_gpct *); +int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *, + struct comedi_insn *, unsigned int *data); +int ni_tio_insn_config(struct comedi_device *, struct comedi_subdevice *, + struct comedi_insn *, unsigned int *data); +int ni_tio_insn_write(struct comedi_device *, struct comedi_subdevice *, + struct comedi_insn *, unsigned int *data); +int ni_tio_cmd(struct comedi_device *, struct comedi_subdevice *); +int ni_tio_cmdtest(struct comedi_device *, struct comedi_subdevice *, + struct comedi_cmd *); +int ni_tio_cancel(struct ni_gpct *); +void ni_tio_handle_interrupt(struct ni_gpct *, struct comedi_subdevice *); +void ni_tio_set_mite_channel(struct ni_gpct *, struct mite_channel *); +void ni_tio_acknowledge_and_confirm(struct ni_gpct *, + int *gate_error, int *tc_error, + int *perm_stale_data, int *stale_data); #endif /* _COMEDI_NI_TIO_H */ diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h index b009876754a8a1..15b81b8fc5c49b 100644 --- a/drivers/staging/comedi/drivers/ni_tio_internal.h +++ b/drivers/staging/comedi/drivers/ni_tio_internal.h @@ -21,409 +21,26 @@ #include "ni_tio.h" -static inline enum ni_gpct_register NITIO_Gi_Autoincrement_Reg(unsigned - counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Autoincrement_Reg; - break; - case 1: - return NITIO_G1_Autoincrement_Reg; - break; - case 2: - return NITIO_G2_Autoincrement_Reg; - break; - case 3: - return NITIO_G3_Autoincrement_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_Command_Reg(unsigned counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Command_Reg; - break; - case 1: - return NITIO_G1_Command_Reg; - break; - case 2: - return NITIO_G2_Command_Reg; - break; - case 3: - return NITIO_G3_Command_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_Counting_Mode_Reg(unsigned - counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Counting_Mode_Reg; - break; - case 1: - return NITIO_G1_Counting_Mode_Reg; - break; - case 2: - return NITIO_G2_Counting_Mode_Reg; - break; - case 3: - return NITIO_G3_Counting_Mode_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_Input_Select_Reg(unsigned - counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Input_Select_Reg; - break; - case 1: - return NITIO_G1_Input_Select_Reg; - break; - case 2: - return NITIO_G2_Input_Select_Reg; - break; - case 3: - return NITIO_G3_Input_Select_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gxx_Joint_Reset_Reg(unsigned - counter_index) -{ - switch (counter_index) { - case 0: - case 1: - return NITIO_G01_Joint_Reset_Reg; - break; - case 2: - case 3: - return NITIO_G23_Joint_Reset_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gxx_Joint_Status1_Reg(unsigned - counter_index) -{ - switch (counter_index) { - case 0: - case 1: - return NITIO_G01_Joint_Status1_Reg; - break; - case 2: - case 3: - return NITIO_G23_Joint_Status1_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gxx_Joint_Status2_Reg(unsigned - counter_index) -{ - switch (counter_index) { - case 0: - case 1: - return NITIO_G01_Joint_Status2_Reg; - break; - case 2: - case 3: - return NITIO_G23_Joint_Status2_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gxx_Status_Reg(unsigned counter_index) -{ - switch (counter_index) { - case 0: - case 1: - return NITIO_G01_Status_Reg; - break; - case 2: - case 3: - return NITIO_G23_Status_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_LoadA_Reg(unsigned counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_LoadA_Reg; - break; - case 1: - return NITIO_G1_LoadA_Reg; - break; - case 2: - return NITIO_G2_LoadA_Reg; - break; - case 3: - return NITIO_G3_LoadA_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_LoadB_Reg(unsigned counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_LoadB_Reg; - break; - case 1: - return NITIO_G1_LoadB_Reg; - break; - case 2: - return NITIO_G2_LoadB_Reg; - break; - case 3: - return NITIO_G3_LoadB_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_Mode_Reg(unsigned counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Mode_Reg; - break; - case 1: - return NITIO_G1_Mode_Reg; - break; - case 2: - return NITIO_G2_Mode_Reg; - break; - case 3: - return NITIO_G3_Mode_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_SW_Save_Reg(int counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_SW_Save_Reg; - break; - case 1: - return NITIO_G1_SW_Save_Reg; - break; - case 2: - return NITIO_G2_SW_Save_Reg; - break; - case 3: - return NITIO_G3_SW_Save_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_Second_Gate_Reg(int counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Second_Gate_Reg; - break; - case 1: - return NITIO_G1_Second_Gate_Reg; - break; - case 2: - return NITIO_G2_Second_Gate_Reg; - break; - case 3: - return NITIO_G3_Second_Gate_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_DMA_Config_Reg(int counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_DMA_Config_Reg; - break; - case 1: - return NITIO_G1_DMA_Config_Reg; - break; - case 2: - return NITIO_G2_DMA_Config_Reg; - break; - case 3: - return NITIO_G3_DMA_Config_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_DMA_Status_Reg(int counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_DMA_Status_Reg; - break; - case 1: - return NITIO_G1_DMA_Status_Reg; - break; - case 2: - return NITIO_G2_DMA_Status_Reg; - break; - case 3: - return NITIO_G3_DMA_Status_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_ABZ_Reg(int counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_ABZ_Reg; - break; - case 1: - return NITIO_G1_ABZ_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_Interrupt_Acknowledge_Reg( - int counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Interrupt_Acknowledge_Reg; - break; - case 1: - return NITIO_G1_Interrupt_Acknowledge_Reg; - break; - case 2: - return NITIO_G2_Interrupt_Acknowledge_Reg; - break; - case 3: - return NITIO_G3_Interrupt_Acknowledge_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_Status_Reg(int counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Status_Reg; - break; - case 1: - return NITIO_G1_Status_Reg; - break; - case 2: - return NITIO_G2_Status_Reg; - break; - case 3: - return NITIO_G3_Status_Reg; - break; - default: - BUG(); - break; - } - return 0; -} - -static inline enum ni_gpct_register NITIO_Gi_Interrupt_Enable_Reg( - int counter_index) -{ - switch (counter_index) { - case 0: - return NITIO_G0_Interrupt_Enable_Reg; - break; - case 1: - return NITIO_G1_Interrupt_Enable_Reg; - break; - case 2: - return NITIO_G2_Interrupt_Enable_Reg; - break; - case 3: - return NITIO_G3_Interrupt_Enable_Reg; - break; - default: - BUG(); - break; - } - return 0; -} +#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x)) +#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x)) +#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x)) +#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x)) +#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x)) +#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x)) +#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x)) +#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x)) +#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x)) +#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x)) +#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2)) +#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2)) +#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2)) +#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2)) +#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x)) +#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x)) +#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x)) +#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x)) +#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x)) +#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x)) enum Gi_Auto_Increment_Reg_Bits { Gi_Auto_Increment_Mask = 0xff @@ -699,14 +316,14 @@ static inline unsigned Gi_Gate_Interrupt_Enable_Bit(unsigned counter_index) static inline void write_register(struct ni_gpct *counter, unsigned bits, enum ni_gpct_register reg) { - BUG_ON(reg >= NITIO_Num_Registers); + BUG_ON(reg >= NITIO_NUM_REGS); counter->counter_dev->write_register(counter, bits, reg); } static inline unsigned read_register(struct ni_gpct *counter, enum ni_gpct_register reg) { - BUG_ON(reg >= NITIO_Num_Registers); + BUG_ON(reg >= NITIO_NUM_REGS); return counter->counter_dev->read_register(counter, reg); } @@ -738,7 +355,7 @@ static inline void ni_tio_set_bits_transient(struct ni_gpct *counter, struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned long flags; - BUG_ON(register_index >= NITIO_Num_Registers); + BUG_ON(register_index >= NITIO_NUM_REGS); spin_lock_irqsave(&counter_dev->regs_lock, flags); counter_dev->regs[register_index] &= ~bit_mask; counter_dev->regs[register_index] |= (bit_values & bit_mask); @@ -773,7 +390,7 @@ static inline unsigned ni_tio_get_soft_copy(const struct ni_gpct *counter, unsigned long flags; unsigned value; - BUG_ON(register_index >= NITIO_Num_Registers); + BUG_ON(register_index >= NITIO_NUM_REGS); spin_lock_irqsave(&counter_dev->regs_lock, flags); value = counter_dev->regs[register_index]; spin_unlock_irqrestore(&counter_dev->regs_lock, flags); diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c index 45691efefd056a..7d64f8892f0819 100644 --- a/drivers/staging/comedi/drivers/ni_tiocmd.c +++ b/drivers/staging/comedi/drivers/ni_tiocmd.c @@ -49,14 +49,11 @@ DAQ 6601/6602 User Manual (NI 322137B-01) #include "ni_tio_internal.h" #include "mite.h" -MODULE_AUTHOR("Comedi "); -MODULE_DESCRIPTION("Comedi command support for NI general-purpose counters"); -MODULE_LICENSE("GPL"); - static void ni_tio_configure_dma(struct ni_gpct *counter, short enable, short read_not_write) { struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned cidx = counter->counter_index; unsigned input_select_bits = 0; if (enable) { @@ -65,8 +62,7 @@ static void ni_tio_configure_dma(struct ni_gpct *counter, short enable, else input_select_bits |= Gi_Write_Acknowledges_Irq; } - ni_tio_set_bits(counter, - NITIO_Gi_Input_Select_Reg(counter->counter_index), + ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), Gi_Read_Acknowledges_Irq | Gi_Write_Acknowledges_Irq, input_select_bits); switch (counter_dev->variant) { @@ -83,9 +79,7 @@ static void ni_tio_configure_dma(struct ni_gpct *counter, short enable, } if (read_not_write == 0) gi_dma_config_bits |= Gi_DMA_Write_Bit; - ni_tio_set_bits(counter, - NITIO_Gi_DMA_Config_Reg(counter-> - counter_index), + ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), Gi_DMA_Enable_Bit | Gi_DMA_Int_Bit | Gi_DMA_Write_Bit, gi_dma_config_bits); } @@ -122,6 +116,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev, static int ni_tio_input_cmd(struct ni_gpct *counter, struct comedi_async *async) { struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned cidx = counter->counter_index; struct comedi_cmd *cmd = &async->cmd; int retval = 0; @@ -140,8 +135,7 @@ static int ni_tio_input_cmd(struct ni_gpct *counter, struct comedi_async *async) BUG(); break; } - ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), - Gi_Save_Trace_Bit, 0); + ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), Gi_Save_Trace_Bit, 0); ni_tio_configure_dma(counter, 1, 1); switch (cmd->start_src) { case TRIG_NOW: @@ -185,6 +179,7 @@ static int ni_tio_output_cmd(struct ni_gpct *counter, static int ni_tio_cmd_setup(struct ni_gpct *counter, struct comedi_async *async) { struct comedi_cmd *cmd = &async->cmd; + unsigned cidx = counter->counter_index; int set_gate_source = 0; unsigned gate_source; int retval = 0; @@ -199,19 +194,17 @@ static int ni_tio_cmd_setup(struct ni_gpct *counter, struct comedi_async *async) if (set_gate_source) retval = ni_tio_set_gate_src(counter, 0, gate_source); if (cmd->flags & TRIG_WAKE_EOS) { - ni_tio_set_bits(counter, - NITIO_Gi_Interrupt_Enable_Reg(counter-> - counter_index), - Gi_Gate_Interrupt_Enable_Bit(counter-> - counter_index), - Gi_Gate_Interrupt_Enable_Bit(counter-> - counter_index)); + ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx), + Gi_Gate_Interrupt_Enable_Bit(cidx), + Gi_Gate_Interrupt_Enable_Bit(cidx)); } return retval; } -int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async) +int ni_tio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { + struct ni_gpct *counter = s->private; + struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; int retval = 0; unsigned long flags; @@ -237,8 +230,11 @@ int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async) } EXPORT_SYMBOL_GPL(ni_tio_cmd); -int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd) +int ni_tio_cmdtest(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_cmd *cmd) { + struct ni_gpct *counter = s->private; int err = 0; unsigned int sources; @@ -301,6 +297,7 @@ EXPORT_SYMBOL_GPL(ni_tio_cmdtest); int ni_tio_cancel(struct ni_gpct *counter) { + unsigned cidx = counter->counter_index; unsigned long flags; ni_tio_arm(counter, 0, 0); @@ -310,10 +307,8 @@ int ni_tio_cancel(struct ni_gpct *counter) spin_unlock_irqrestore(&counter->lock, flags); ni_tio_configure_dma(counter, 0, 0); - ni_tio_set_bits(counter, - NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), - Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), - 0x0); + ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx), + Gi_Gate_Interrupt_Enable_Bit(cidx), 0x0); return 0; } EXPORT_SYMBOL_GPL(ni_tio_cancel); @@ -353,14 +348,11 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error, int *tc_error, int *perm_stale_data, int *stale_data) { + unsigned cidx = counter->counter_index; const unsigned short gxx_status = read_register(counter, - NITIO_Gxx_Status_Reg - (counter-> - counter_index)); + NITIO_SHARED_STATUS_REG(cidx)); const unsigned short gi_status = read_register(counter, - NITIO_Gi_Status_Reg - (counter-> - counter_index)); + NITIO_STATUS_REG(cidx)); unsigned ack = 0; if (gate_error) @@ -372,8 +364,8 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error, if (stale_data) *stale_data = 0; - if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) { - ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index); + if (gxx_status & Gi_Gate_Error_Bit(cidx)) { + ack |= Gi_Gate_Error_Confirm_Bit(cidx); if (gate_error) { /*660x don't support automatic acknowledgement of gate interrupt via dma read/write @@ -384,8 +376,8 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error, } } } - if (gxx_status & Gi_TC_Error_Bit(counter->counter_index)) { - ack |= Gi_TC_Error_Confirm_Bit(counter->counter_index); + if (gxx_status & Gi_TC_Error_Bit(cidx)) { + ack |= Gi_TC_Error_Confirm_Bit(cidx); if (tc_error) *tc_error = 1; } @@ -396,21 +388,15 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error, ack |= Gi_Gate_Interrupt_Ack_Bit; } if (ack) - write_register(counter, ack, - NITIO_Gi_Interrupt_Acknowledge_Reg - (counter->counter_index)); - if (ni_tio_get_soft_copy - (counter, - NITIO_Gi_Mode_Reg(counter->counter_index)) & + write_register(counter, ack, NITIO_INT_ACK_REG(cidx)); + if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) & Gi_Loading_On_Gate_Bit) { - if (gxx_status & Gi_Stale_Data_Bit(counter->counter_index)) { + if (gxx_status & Gi_Stale_Data_Bit(cidx)) { if (stale_data) *stale_data = 1; } - if (read_register(counter, - NITIO_Gxx_Joint_Status2_Reg - (counter->counter_index)) & - Gi_Permanent_Stale_Bit(counter->counter_index)) { + if (read_register(counter, NITIO_STATUS2_REG(cidx)) & + Gi_Permanent_Stale_Bit(cidx)) { dev_info(counter->counter_dev->dev->class_dev, "%s: Gi_Permanent_Stale_Data detected.\n", __func__); @@ -424,6 +410,7 @@ EXPORT_SYMBOL_GPL(ni_tio_acknowledge_and_confirm); void ni_tio_handle_interrupt(struct ni_gpct *counter, struct comedi_subdevice *s) { + unsigned cidx = counter->counter_index; unsigned gpct_mite_status; unsigned long flags; int gate_error; @@ -442,9 +429,8 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter, switch (counter->counter_dev->variant) { case ni_gpct_variant_m_series: case ni_gpct_variant_660x: - if (read_register(counter, - NITIO_Gi_DMA_Status_Reg - (counter->counter_index)) & Gi_DRQ_Error_Bit) { + if (read_register(counter, NITIO_DMA_STATUS_REG(cidx)) & + Gi_DRQ_Error_Bit) { dev_notice(counter->counter_dev->dev->class_dev, "%s: Gi_DRQ_Error detected.\n", __func__); s->async->events |= COMEDI_CB_OVERFLOW; @@ -484,11 +470,13 @@ static int __init ni_tiocmd_init_module(void) { return 0; } - module_init(ni_tiocmd_init_module); static void __exit ni_tiocmd_cleanup_module(void) { } - module_exit(ni_tiocmd_cleanup_module); + +MODULE_AUTHOR("Comedi "); +MODULE_DESCRIPTION("Comedi command support for NI general-purpose counters"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c index 03315abcca1918..53613b385f35c1 100644 --- a/drivers/staging/comedi/drivers/pcl812.c +++ b/drivers/staging/comedi/drivers/pcl812.c @@ -162,156 +162,172 @@ #define MAX_CHANLIST_LEN 256 /* length of scan list */ -static const struct comedi_lrange range_pcl812pg_ai = { 5, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - BIP_RANGE(0.3125), - } +static const struct comedi_lrange range_pcl812pg_ai = { + 5, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + BIP_RANGE(0.3125) + } }; -static const struct comedi_lrange range_pcl812pg2_ai = { 5, { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - } +static const struct comedi_lrange range_pcl812pg2_ai = { + 5, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625) + } }; -static const struct comedi_lrange range812_bipolar1_25 = { 1, { - BIP_RANGE(1.25), - } +static const struct comedi_lrange range812_bipolar1_25 = { + 1, { + BIP_RANGE(1.25) + } }; -static const struct comedi_lrange range812_bipolar0_625 = { 1, { - BIP_RANGE - (0.625), - } +static const struct comedi_lrange range812_bipolar0_625 = { + 1, { + BIP_RANGE(0.625) + } }; -static const struct comedi_lrange range812_bipolar0_3125 = { 1, { - BIP_RANGE - (0.3125), - } +static const struct comedi_lrange range812_bipolar0_3125 = { + 1, { + BIP_RANGE(0.3125) + } }; -static const struct comedi_lrange range_pcl813b_ai = { 4, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - } +static const struct comedi_lrange range_pcl813b_ai = { + 4, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625) + } }; -static const struct comedi_lrange range_pcl813b2_ai = { 4, { - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - } +static const struct comedi_lrange range_pcl813b2_ai = { + 4, { + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; -static const struct comedi_lrange range_iso813_1_ai = { 5, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - BIP_RANGE(0.3125), - } +static const struct comedi_lrange range_iso813_1_ai = { + 5, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + BIP_RANGE(0.3125) + } }; -static const struct comedi_lrange range_iso813_1_2_ai = { 5, { - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - UNI_RANGE(0.625), - } +static const struct comedi_lrange range_iso813_1_2_ai = { + 5, { + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25), + UNI_RANGE(0.625) + } }; -static const struct comedi_lrange range_iso813_2_ai = { 4, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - } +static const struct comedi_lrange range_iso813_2_ai = { + 4, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625) + } }; -static const struct comedi_lrange range_iso813_2_2_ai = { 4, { - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - } +static const struct comedi_lrange range_iso813_2_2_ai = { + 4, { + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; -static const struct comedi_lrange range_acl8113_1_ai = { 4, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - } +static const struct comedi_lrange range_acl8113_1_ai = { + 4, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625) + } }; -static const struct comedi_lrange range_acl8113_1_2_ai = { 4, { - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - } +static const struct comedi_lrange range_acl8113_1_2_ai = { + 4, { + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; -static const struct comedi_lrange range_acl8113_2_ai = { 3, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - } +static const struct comedi_lrange range_acl8113_2_ai = { + 3, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25) + } }; -static const struct comedi_lrange range_acl8113_2_2_ai = { 3, { - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - } +static const struct comedi_lrange range_acl8113_2_2_ai = { + 3, { + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5) + } }; -static const struct comedi_lrange range_acl8112dg_ai = { 9, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - BIP_RANGE(10), - } +static const struct comedi_lrange range_acl8112dg_ai = { + 9, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25), + BIP_RANGE(10) + } }; -static const struct comedi_lrange range_acl8112hg_ai = { 12, { - BIP_RANGE(5), - BIP_RANGE(0.5), - BIP_RANGE(0.05), - BIP_RANGE(0.005), - UNI_RANGE(10), - UNI_RANGE(1), - UNI_RANGE(0.1), - UNI_RANGE(0.01), - BIP_RANGE(10), - BIP_RANGE(1), - BIP_RANGE(0.1), - BIP_RANGE(0.01), - } +static const struct comedi_lrange range_acl8112hg_ai = { + 12, { + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05), + BIP_RANGE(0.005), + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.01), + BIP_RANGE(10), + BIP_RANGE(1), + BIP_RANGE(0.1), + BIP_RANGE(0.01) + } }; -static const struct comedi_lrange range_a821pgh_ai = { 4, { - BIP_RANGE(5), - BIP_RANGE(0.5), - BIP_RANGE(0.05), - BIP_RANGE(0.005), - } +static const struct comedi_lrange range_a821pgh_ai = { + 4, { + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05), + BIP_RANGE(0.005) + } }; struct pcl812_board { @@ -404,9 +420,7 @@ static int pcl812_ai_insn_read(struct comedi_device *dev, goto conv_finish; udelay(1); } - printk - ("comedi%d: pcl812: (%s at 0x%lx) A/D insn read timeout\n", - dev->minor, dev->board_name, dev->iobase); + dev_dbg(dev->class_dev, "A/D insn read timeout\n"); outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE); return -ETIME; @@ -441,9 +455,7 @@ static int acl8216_ai_insn_read(struct comedi_device *dev, goto conv_finish; udelay(1); } - printk - ("comedi%d: pcl812: (%s at 0x%lx) A/D insn read timeout\n", - dev->minor, dev->board_name, dev->iobase); + dev_dbg(dev->class_dev, "A/D insn read timeout\n"); outb(0, dev->iobase + PCL812_MODE); return -ETIME; @@ -759,7 +771,7 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d) unsigned int mask, timeout; struct comedi_device *dev = d; struct pcl812_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; unsigned int next_chan; s->async->events = 0; @@ -786,10 +798,7 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d) } if (err) { - printk - ("comedi%d: pcl812: (%s at 0x%lx) " - "A/D cmd IRQ without DRDY!\n", - dev->minor, dev->board_name, dev->iobase); + dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n"); pcl812_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); @@ -865,7 +874,7 @@ static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d) { struct comedi_device *dev = d; struct pcl812_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; unsigned long dma_flags; int len, bufptr; unsigned short *ptr; @@ -1095,7 +1104,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) const struct pcl812_board *board = comedi_board(dev); struct pcl812_private *devpriv; int ret, subdev; - unsigned int irq; unsigned int dma; unsigned long pages; struct comedi_subdevice *s; @@ -1109,31 +1117,13 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (!devpriv) return -ENOMEM; - irq = 0; - if (board->IRQbits != 0) { /* board support IRQ */ - irq = it->options[1]; - if (irq) { /* we want to use IRQ */ - if (((1 << irq) & board->IRQbits) == 0) { - printk - (", IRQ %u is out of allowed range, " - "DISABLING IT", irq); - irq = 0; /* Bad IRQ */ - } else { - if (request_irq(irq, interrupt_pcl812, 0, - dev->board_name, dev)) { - printk - (", unable to allocate IRQ %u, " - "DISABLING IT", irq); - irq = 0; /* Can't use IRQ */ - } else { - printk(KERN_INFO ", irq=%u", irq); - } - } - } + if ((1 << it->options[1]) & board->IRQbits) { + ret = request_irq(it->options[1], interrupt_pcl812, 0, + dev->board_name, dev); + if (ret == 0) + dev->irq = it->options[1]; } - dev->irq = irq; - dma = 0; devpriv->dma = dma; if (!dev->irq) @@ -1141,21 +1131,22 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (board->DMAbits != 0) { /* board support DMA */ dma = it->options[2]; if (((1 << dma) & board->DMAbits) == 0) { - printk(", DMA is out of allowed range, FAIL!\n"); + dev_err(dev->class_dev, + "DMA is out of allowed range, FAIL!\n"); return -EINVAL; /* Bad DMA */ } ret = request_dma(dma, dev->board_name); if (ret) { - printk(KERN_ERR ", unable to allocate DMA %u, FAIL!\n", - dma); + dev_err(dev->class_dev, + "unable to allocate DMA %u, FAIL!\n", dma); return -EBUSY; /* DMA isn't free */ } devpriv->dma = dma; - printk(KERN_INFO ", dma=%u", dma); pages = 1; /* we want 8KB */ devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages); if (!devpriv->dmabuf[0]) { - printk(", unable to allocate DMA buffer, FAIL!\n"); + dev_err(dev->class_dev, + "unable to allocate DMA buffer, FAIL!\n"); /* * maybe experiment with try_to_free_pages() * will help .... @@ -1167,7 +1158,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) devpriv->hwdmasize[0] = PAGE_SIZE * (1 << pages); devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages); if (!devpriv->dmabuf[1]) { - printk(KERN_ERR ", unable to allocate DMA buffer, FAIL!\n"); + dev_err(dev->class_dev, + "unable to allocate DMA buffer, FAIL!\n"); return -EBUSY; } devpriv->dmapages[1] = pages; @@ -1225,7 +1217,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) break; } s->maxdata = board->ai_maxdata; - s->len_chanlist = MAX_CHANLIST_LEN; s->range_table = board->rangelist_ai; if (board->board_type == boardACL8216) s->insn_read = acl8216_ai_insn_read; @@ -1233,13 +1224,14 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->insn_read = pcl812_ai_insn_read; devpriv->use_MPC = board->haveMPC508; - s->cancel = pcl812_ai_cancel; if (dev->irq) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = MAX_CHANLIST_LEN; s->do_cmdtest = pcl812_ai_cmdtest; s->do_cmd = pcl812_ai_cmd; s->poll = pcl812_ai_poll; + s->cancel = pcl812_ai_cancel; } switch (board->board_type) { case boardPCL812PG: @@ -1269,10 +1261,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) default: s->range_table = &range_bipolar10; break; - printk - (", incorrect range number %d, changing " - "to 0 (+/-10V)", it->options[4]); - break; } break; break; @@ -1299,10 +1287,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) default: s->range_table = &range_iso813_1_ai; break; - printk - (", incorrect range number %d, " - "changing to 0 ", it->options[1]); - break; } break; case boardACL8113: @@ -1324,10 +1308,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) default: s->range_table = &range_acl8113_1_ai; break; - printk - (", incorrect range number %d, " - "changing to 0 ", it->options[1]); - break; } break; } @@ -1341,7 +1321,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->subdev_flags = SDF_WRITABLE | SDF_GROUND; s->n_chan = board->n_aochan; s->maxdata = 0xfff; - s->len_chanlist = 1; s->range_table = board->rangelist_ao; s->insn_read = pcl812_ao_insn_read; s->insn_write = pcl812_ao_insn_write; @@ -1370,7 +1349,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->subdev_flags = SDF_READABLE; s->n_chan = board->n_dichan; s->maxdata = 1; - s->len_chanlist = board->n_dichan; s->range_table = &range_digital; s->insn_bits = pcl812_di_insn_bits; subdev++; @@ -1383,7 +1361,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->subdev_flags = SDF_WRITABLE; s->n_chan = board->n_dochan; s->maxdata = 1; - s->len_chanlist = board->n_dochan; s->range_table = &range_digital; s->insn_bits = pcl812_do_insn_bits; subdev++; @@ -1402,7 +1379,7 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) break; case boardA821: devpriv->max_812_ai_mode0_rangewait = 1; - devpriv->mode_reg_int = (irq << 4) & 0xf0; + devpriv->mode_reg_int = (dev->irq << 4) & 0xf0; break; case boardPCL813B: case boardPCL813: @@ -1413,7 +1390,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) break; } - printk(KERN_INFO "\n"); devpriv->valid = 1; pcl812_reset(dev); diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c index ab9d2bd26a207b..e9d47045993308 100644 --- a/drivers/staging/comedi/drivers/pcl816.c +++ b/drivers/staging/comedi/drivers/pcl816.c @@ -44,14 +44,10 @@ Configuration Options: #include "comedi_fc.h" #include "8253.h" -#define DEBUG(x) x - /* boards constants */ /* IO space len */ #define PCLx1x_RANGE 16 -/* #define outb(x,y) printk("OUTB(%x, 200+%d)\n", x,y-0x200); outb(x,y) */ - /* INTEL 8254 counters */ #define PCL816_CTR0 4 #define PCL816_CTR1 5 @@ -85,16 +81,17 @@ Configuration Options: #define MAGIC_DMA_WORD 0x5a5a -static const struct comedi_lrange range_pcl816 = { 8, { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - } +static const struct comedi_lrange range_pcl816 = { + 8, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25) + } }; struct pcl816_board { @@ -132,7 +129,6 @@ struct pcl816_private { unsigned int ai_scans; /* len of scanlist */ unsigned char ai_neverending; /* if=1, then we do neverending record (you must use cancel()) */ - int irq_free; /* 1=have allocated IRQ */ int irq_blocked; /* 1=IRQ now uses any subdev */ int irq_was_now_closed; /* when IRQ finish, there's stored int816_mode for last interrupt */ int int816_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */ @@ -143,7 +139,6 @@ struct pcl816_private { unsigned int ai_act_chanlist_pos; /* actual position in MUX list */ unsigned int ai_n_chan; /* how many channels per scan */ unsigned int ai_poll_ptr; /* how many sampes transfer poll */ - struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */ }; /* @@ -176,7 +171,6 @@ static int pcl816_ai_insn_read(struct comedi_device *dev, int n; int timeout; - DPRINTK("mode 0 analog input\n"); /* software trigger, DMA and INT off */ outb(0, dev->iobase + PCL816_CONTROL); /* clear INT (conversion end) flag */ @@ -228,7 +222,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d) { struct comedi_device *dev = d; struct pcl816_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; unsigned char low, hi; int timeout = 50; /* wait max 50us */ @@ -322,7 +316,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d) { struct comedi_device *dev = d; struct pcl816_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; int len, bufptr, this_dma_buf; unsigned long dma_flags; unsigned short *ptr; @@ -372,8 +366,6 @@ static irqreturn_t interrupt_pcl816(int irq, void *d) struct comedi_device *dev = d; struct pcl816_private *devpriv = dev->private; - DPRINTK(""); - if (!dev->attached) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; @@ -389,8 +381,7 @@ static irqreturn_t interrupt_pcl816(int irq, void *d) } outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */ - if (!dev->irq || !devpriv->irq_free || !devpriv->irq_blocked || - !devpriv->int816_mode) { + if (!dev->irq || !devpriv->irq_blocked || !devpriv->int816_mode) { if (devpriv->irq_was_now_closed) { devpriv->irq_was_now_closed = 0; /* comedi_error(dev,"last IRQ.."); */ @@ -403,22 +394,6 @@ static irqreturn_t interrupt_pcl816(int irq, void *d) return IRQ_NONE; } -/* -============================================================================== - COMMAND MODE -*/ -static void pcl816_cmdtest_out(int e, struct comedi_cmd *cmd) -{ - printk(KERN_INFO "pcl816 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e, - cmd->start_src, cmd->scan_begin_src, cmd->convert_src); - printk(KERN_INFO "pcl816 e=%d startarg=%d scanarg=%d convarg=%d\n", e, - cmd->start_arg, cmd->scan_begin_arg, cmd->convert_arg); - printk(KERN_INFO "pcl816 e=%d stopsrc=%x scanend=%x\n", e, - cmd->stop_src, cmd->scan_end_src); - printk(KERN_INFO "pcl816 e=%d stoparg=%d scanendarg=%d chanlistlen=%d\n", - e, cmd->stop_arg, cmd->scan_end_arg, cmd->chanlist_len); -} - /* ============================================================================== */ @@ -429,10 +404,6 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev, int err = 0; int tmp, divisor1 = 0, divisor2 = 0; - DEBUG(printk(KERN_INFO "pcl816 pcl812_ai_cmdtest\n"); - pcl816_cmdtest_out(-1, cmd); - ); - /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW); @@ -566,15 +537,6 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->ai_neverending = 1; } - /* don't we want wake up every scan? */ - if ((cmd->flags & TRIG_WAKE_EOS)) { - printk(KERN_INFO - "pl816: You wankt WAKE_EOS but I dont want handle it"); - /* devpriv->ai_eos=1; */ - /* if (devpriv->ai_n_chan==1) */ - /* devpriv->dma=0; // DMA is useless for this situation */ - } - if (devpriv->dma) { bytes = devpriv->hwdmasize[0]; if (!devpriv->ai_neverending) { @@ -630,7 +592,6 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) break; } - DPRINTK("pcl816 END: pcl812_ai_cmd()\n"); return 0; } @@ -685,8 +646,6 @@ static int pcl816_ai_cancel(struct comedi_device *dev, { struct pcl816_private *devpriv = dev->private; -/* DEBUG(printk("pcl816_ai_cancel()\n");) */ - if (devpriv->irq_blocked > 0) { switch (devpriv->int816_mode) { case INT_TYPE_AI1_DMA: @@ -719,9 +678,7 @@ static int pcl816_ai_cancel(struct comedi_device *dev, break; } } - - DEBUG(printk("comedi: pcl816_ai_cancel() successful\n");) - return 0; + return 0; } /* @@ -788,8 +745,8 @@ start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1, udelay(1); if (mode == 1) { - DPRINTK("mode %d, divisor1 %d, divisor2 %d\n", mode, divisor1, - divisor2); + dev_dbg(dev->class_dev, "mode %d, divisor1 %d, divisor2 %d\n", + mode, divisor1, divisor2); outb(divisor2 & 0xff, dev->iobase + PCL816_CTR2); outb((divisor2 >> 8) & 0xff, dev->iobase + PCL816_CTR2); outb(divisor1 & 0xff, dev->iobase + PCL816_CTR1); @@ -823,11 +780,6 @@ check_channel_list(struct comedi_device *dev, /* first channel is every time ok */ chansegment[0] = chanlist[0]; for (i = 1, seglen = 1; i < chanlen; i++, seglen++) { - /* build part of chanlist */ - DEBUG(printk(KERN_INFO "%d. %d %d\n", i, - CR_CHAN(chanlist[i]), - CR_RANGE(chanlist[i]));) - /* we detect loop, this must by finish */ if (chanlist[0] == chanlist[i]) break; @@ -835,12 +787,10 @@ check_channel_list(struct comedi_device *dev, (CR_CHAN(chansegment[i - 1]) + 1) % chanlen; if (nowmustbechan != CR_CHAN(chanlist[i])) { /* channel list isn't continuous :-( */ - printk(KERN_WARNING - "comedi%d: pcl816: channel list must " - "be continuous! chanlist[%i]=%d but " - "must be %d or %d!\n", dev->minor, - i, CR_CHAN(chanlist[i]), nowmustbechan, - CR_CHAN(chanlist[0])); + dev_dbg(dev->class_dev, + "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n", + i, CR_CHAN(chanlist[i]), nowmustbechan, + CR_CHAN(chanlist[0])); return 0; } /* well, this is next correct channel in list */ @@ -849,22 +799,15 @@ check_channel_list(struct comedi_device *dev, /* check whole chanlist */ for (i = 0, segpos = 0; i < chanlen; i++) { - DEBUG(printk("%d %d=%d %d\n", - CR_CHAN(chansegment[i % seglen]), - CR_RANGE(chansegment[i % seglen]), - CR_CHAN(chanlist[i]), - CR_RANGE(chanlist[i]));) if (chanlist[i] != chansegment[i % seglen]) { - printk(KERN_WARNING - "comedi%d: pcl816: bad channel or range" - " number! chanlist[%i]=%d,%d,%d and not" - " %d,%d,%d!\n", dev->minor, i, - CR_CHAN(chansegment[i]), - CR_RANGE(chansegment[i]), - CR_AREF(chansegment[i]), - CR_CHAN(chanlist[i % seglen]), - CR_RANGE(chanlist[i % seglen]), - CR_AREF(chansegment[i % seglen])); + dev_dbg(dev->class_dev, + "bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n", + i, CR_CHAN(chansegment[i]), + CR_RANGE(chansegment[i]), + CR_AREF(chansegment[i]), + CR_CHAN(chanlist[i % seglen]), + CR_RANGE(chanlist[i % seglen]), + CR_AREF(chansegment[i % seglen])); return 0; /* chan/gain list is strange */ } } @@ -909,7 +852,7 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it) const struct pcl816_board *board = comedi_board(dev); struct pcl816_private *devpriv; int ret; - unsigned int irq, dma; + unsigned int dma; unsigned long pages; /* int i; */ struct comedi_subdevice *s; @@ -919,7 +862,7 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it) return ret; if (pcl816_check(dev->iobase)) { - printk(KERN_ERR ", I cann't detect board. FAIL!\n"); + dev_err(dev->class_dev, "I can't detect board. FAIL!\n"); return -EIO; } @@ -927,43 +870,20 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (!devpriv) return -ENOMEM; - /* grab our IRQ */ - irq = 0; - if (board->IRQbits != 0) { /* board support IRQ */ - irq = it->options[1]; - if (irq) { /* we want to use IRQ */ - if (((1 << irq) & board->IRQbits) == 0) { - printk - (", IRQ %u is out of allowed range, " - "DISABLING IT", irq); - irq = 0; /* Bad IRQ */ - } else { - if (request_irq(irq, interrupt_pcl816, 0, - dev->board_name, dev)) { - printk - (", unable to allocate IRQ %u, " - "DISABLING IT", irq); - irq = 0; /* Can't use IRQ */ - } else { - printk(KERN_INFO ", irq=%u", irq); - } - } - } + if ((1 << it->options[1]) & board->IRQbits) { + ret = request_irq(it->options[1], interrupt_pcl816, 0, + dev->board_name, dev); + if (ret == 0) + dev->irq = it->options[1]; } - dev->irq = irq; - if (irq) /* 1=we have allocated irq */ - devpriv->irq_free = 1; - else - devpriv->irq_free = 0; - devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */ devpriv->int816_mode = 0; /* mode of irq */ /* grab our DMA */ dma = 0; devpriv->dma = dma; - if (!devpriv->irq_free) + if (!dev->irq) goto no_dma; /* if we haven't IRQ, we can't use DMA */ if (board->DMAbits != 0) { /* board support DMA */ @@ -972,23 +892,24 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it) goto no_dma; /* DMA disabled */ if (((1 << dma) & board->DMAbits) == 0) { - printk(", DMA is out of allowed range, FAIL!\n"); + dev_err(dev->class_dev, + "DMA is out of allowed range, FAIL!\n"); return -EINVAL; /* Bad DMA */ } ret = request_dma(dma, dev->board_name); if (ret) { - printk(KERN_ERR - ", unable to allocate DMA %u, FAIL!\n", dma); + dev_err(dev->class_dev, + "unable to allocate DMA %u, FAIL!\n", dma); return -EBUSY; /* DMA isn't free */ } devpriv->dma = dma; - printk(KERN_INFO ", dma=%u", dma); pages = 2; /* we need 16KB */ devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages); if (!devpriv->dmabuf[0]) { - printk(", unable to allocate DMA buffer, FAIL!\n"); + dev_err(dev->class_dev, + "unable to allocate DMA buffer, FAIL!\n"); /* * maybe experiment with try_to_free_pages() * will help .... @@ -998,13 +919,11 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it) devpriv->dmapages[0] = pages; devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]); devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE; - /* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */ devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages); if (!devpriv->dmabuf[1]) { - printk(KERN_ERR - ", unable to allocate DMA buffer, " - "FAIL!\n"); + dev_err(dev->class_dev, + "unable to allocate DMA buffer, FAIL!\n"); return -EBUSY; } devpriv->dmapages[1] = pages; @@ -1029,20 +948,20 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it) s = &dev->subdevices[0]; if (board->n_aichan > 0) { s->type = COMEDI_SUBD_AI; - devpriv->sub_ai = s; - dev->read_subdev = s; - s->subdev_flags = SDF_READABLE | SDF_CMD_READ; + s->subdev_flags = SDF_CMD_READ | SDF_DIFF; s->n_chan = board->n_aichan; - s->subdev_flags |= SDF_DIFF; - /* printk (", %dchans DIFF DAC - %d", s->n_chan, i); */ s->maxdata = board->ai_maxdata; - s->len_chanlist = board->ai_chanlist; s->range_table = board->ai_range_type; - s->cancel = pcl816_ai_cancel; - s->do_cmdtest = pcl816_ai_cmdtest; - s->do_cmd = pcl816_ai_cmd; - s->poll = pcl816_ai_poll; s->insn_read = pcl816_ai_insn_read; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = board->ai_chanlist; + s->do_cmdtest = pcl816_ai_cmdtest; + s->do_cmd = pcl816_ai_cmd; + s->poll = pcl816_ai_poll; + s->cancel = pcl816_ai_cancel; + } } else { s->type = COMEDI_SUBD_UNUSED; } @@ -1075,8 +994,6 @@ case COMEDI_SUBD_DO: pcl816_reset(dev); - printk("\n"); - return 0; } @@ -1085,7 +1002,7 @@ static void pcl816_detach(struct comedi_device *dev) struct pcl816_private *devpriv = dev->private; if (dev->private) { - pcl816_ai_cancel(dev, devpriv->sub_ai); + pcl816_ai_cancel(dev, dev->read_subdev); pcl816_reset(dev); if (devpriv->dma) free_dma(devpriv->dma); diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c index 9e4d7e8605097b..fa1758ad49d540 100644 --- a/drivers/staging/comedi/drivers/pcl818.c +++ b/drivers/staging/comedi/drivers/pcl818.c @@ -188,56 +188,78 @@ A word or two about DMA. Driver support DMA operations at two ways: #define MAGIC_DMA_WORD 0x5a5a -static const struct comedi_lrange range_pcl818h_ai = { 9, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - UNI_RANGE(10), - UNI_RANGE(5), - UNI_RANGE(2.5), - UNI_RANGE(1.25), - BIP_RANGE(10), - } +static const struct comedi_lrange range_pcl818h_ai = { + 9, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625), + UNI_RANGE(10), + UNI_RANGE(5), + UNI_RANGE(2.5), + UNI_RANGE(1.25), + BIP_RANGE(10) + } }; -static const struct comedi_lrange range_pcl818hg_ai = { 10, { - BIP_RANGE(5), - BIP_RANGE(0.5), - BIP_RANGE(0.05), - BIP_RANGE(0.005), - UNI_RANGE(10), - UNI_RANGE(1), - UNI_RANGE(0.1), - UNI_RANGE(0.01), - BIP_RANGE(10), - BIP_RANGE(1), - BIP_RANGE(0.1), - BIP_RANGE(0.01), - } +static const struct comedi_lrange range_pcl818hg_ai = { + 10, { + BIP_RANGE(5), + BIP_RANGE(0.5), + BIP_RANGE(0.05), + BIP_RANGE(0.005), + UNI_RANGE(10), + UNI_RANGE(1), + UNI_RANGE(0.1), + UNI_RANGE(0.01), + BIP_RANGE(10), + BIP_RANGE(1), + BIP_RANGE(0.1), + BIP_RANGE(0.01) + } }; -static const struct comedi_lrange range_pcl818l_l_ai = { 4, { - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - BIP_RANGE(0.625), - } +static const struct comedi_lrange range_pcl818l_l_ai = { + 4, { + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25), + BIP_RANGE(0.625) + } }; -static const struct comedi_lrange range_pcl818l_h_ai = { 4, { - BIP_RANGE(10), - BIP_RANGE(5), - BIP_RANGE(2.5), - BIP_RANGE(1.25), - } +static const struct comedi_lrange range_pcl818l_h_ai = { + 4, { + BIP_RANGE(10), + BIP_RANGE(5), + BIP_RANGE(2.5), + BIP_RANGE(1.25) + } +}; + +static const struct comedi_lrange range718_bipolar1 = { + 1, { + BIP_RANGE(1) + } }; -static const struct comedi_lrange range718_bipolar1 = { 1, {BIP_RANGE(1),} }; static const struct comedi_lrange range718_bipolar0_5 = { - 1, {BIP_RANGE(0.5),} }; -static const struct comedi_lrange range718_unipolar2 = { 1, {UNI_RANGE(2),} }; -static const struct comedi_lrange range718_unipolar1 = { 1, {BIP_RANGE(1),} }; + 1, { + BIP_RANGE(0.5) + } +}; + +static const struct comedi_lrange range718_unipolar2 = { + 1, { + UNI_RANGE(2) + } +}; + +static const struct comedi_lrange range718_unipolar1 = { + 1, { + BIP_RANGE(1) + } +}; struct pcl818_board { @@ -274,7 +296,6 @@ struct pcl818_private { unsigned char neverending_ai; /* if=1, then we do neverending record (you must use cancel()) */ unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */ int i8253_osc_base; /* 1/frequency of on board oscilator in ns */ - int irq_free; /* 1=have allocated IRQ */ int irq_blocked; /* 1=IRQ now uses any subdev */ int irq_was_now_closed; /* when IRQ finish, there's stored int818_mode for last interrupt */ int ai_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */ @@ -291,7 +312,6 @@ struct pcl818_private { unsigned int ai_data_len; /* len of data buffer */ unsigned int ai_timer1; /* timers */ unsigned int ai_timer2; - struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */ unsigned char usefifo; /* 1=use fifo */ unsigned int ao_readback[2]; }; @@ -441,7 +461,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d) { struct comedi_device *dev = d; struct pcl818_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; unsigned char low; int timeout = 50; /* wait max 50us */ @@ -463,10 +483,10 @@ static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d) outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ if ((low & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */ - printk - ("comedi: A/D mode1/3 IRQ - channel dropout %x!=%x !\n", - (low & 0xf), - devpriv->act_chanlist[devpriv->act_chanlist_pos]); + dev_dbg(dev->class_dev, + "A/D mode1/3 IRQ - channel dropout %x!=%x !\n", + (low & 0xf), + devpriv->act_chanlist[devpriv->act_chanlist_pos]); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); @@ -478,7 +498,6 @@ static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d) s->async->cur_chan++; if (s->async->cur_chan >= devpriv->ai_n_chan) { - /* printk("E"); */ s->async->cur_chan = 0; devpriv->ai_act_scan--; } @@ -501,7 +520,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d) { struct comedi_device *dev = d; struct pcl818_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; int i, len, bufptr; unsigned long flags; unsigned short *ptr; @@ -523,7 +542,6 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d) release_dma_lock(flags); enable_dma(devpriv->dma); } - printk("comedi: A/D mode1/3 IRQ \n"); devpriv->dma_runs_to_end--; outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ @@ -534,11 +552,11 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d) for (i = 0; i < len; i++) { if ((ptr[bufptr] & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */ - printk - ("comedi: A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n", - (ptr[bufptr] & 0xf), - devpriv->act_chanlist[devpriv->act_chanlist_pos], - devpriv->act_chanlist_pos); + dev_dbg(dev->class_dev, + "A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n", + (ptr[bufptr] & 0xf), + devpriv->act_chanlist[devpriv->act_chanlist_pos], + devpriv->act_chanlist_pos); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); @@ -562,7 +580,6 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d) pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); - /* printk("done int ai13 dma\n"); */ return IRQ_HANDLED; } } @@ -580,7 +597,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d) { struct comedi_device *dev = d; struct pcl818_private *devpriv = dev->private; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; int i, len; unsigned char lo; @@ -612,10 +629,10 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d) for (i = 0; i < len; i++) { lo = inb(dev->iobase + PCL818_FI_DATALO); if ((lo & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */ - printk - ("comedi: A/D mode1/3 FIFO - channel dropout %d!=%d !\n", - (lo & 0xf), - devpriv->act_chanlist[devpriv->act_chanlist_pos]); + dev_dbg(dev->class_dev, + "A/D mode1/3 FIFO - channel dropout %d!=%d !\n", + (lo & 0xf), + devpriv->act_chanlist[devpriv->act_chanlist_pos]); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); @@ -661,7 +678,6 @@ static irqreturn_t interrupt_pcl818(int irq, void *d) comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } - /* printk("I\n"); */ if (devpriv->irq_blocked && devpriv->irq_was_now_closed) { if ((devpriv->neverending_ai || (!devpriv->neverending_ai && @@ -673,10 +689,9 @@ static irqreturn_t interrupt_pcl818(int irq, void *d) being reprogrammed while a DMA transfer is in progress. */ - struct comedi_subdevice *s = &dev->subdevices[0]; devpriv->ai_act_scan = 0; devpriv->neverending_ai = 0; - pcl818_ai_cancel(dev, s); + pcl818_ai_cancel(dev, dev->read_subdev); } outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ @@ -705,8 +720,7 @@ static irqreturn_t interrupt_pcl818(int irq, void *d) outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ - if ((!dev->irq) || (!devpriv->irq_free) || (!devpriv->irq_blocked) - || (!devpriv->ai_mode)) { + if (!devpriv->irq_blocked || !devpriv->ai_mode) { comedi_error(dev, "bad IRQ!"); return IRQ_NONE; } @@ -726,7 +740,6 @@ static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev, unsigned int flags; unsigned int bytes; - printk("mode13dma_int, mode: %d\n", mode); disable_dma(devpriv->dma); /* disable dma */ bytes = devpriv->hwdmasize[0]; if (!devpriv->neverending_ai) { @@ -753,7 +766,7 @@ static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev, } else { devpriv->ai_mode = INT_TYPE_AI3_DMA; outb(0x86 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+IRQ+DMA */ - }; + } } /* @@ -768,12 +781,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev, int divisor1 = 0, divisor2 = 0; unsigned int seglen; - dev_dbg(dev->class_dev, "pcl818_ai_cmd_mode()\n"); - if (!dev->irq) { - comedi_error(dev, "IRQ not defined!"); - return -EINVAL; - } - if (devpriv->irq_blocked) return -EBUSY; @@ -824,7 +831,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev, case 0: if (!devpriv->usefifo) { /* IRQ */ - /* printk("IRQ\n"); */ if (mode == 1) { devpriv->ai_mode = INT_TYPE_AI1_INT; /* Pacer+IRQ */ @@ -853,7 +859,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev, start_pacer(dev, mode, divisor1, divisor2); - dev_dbg(dev->class_dev, "pcl818_ai_cmd_mode() end\n"); return 0; } @@ -899,10 +904,6 @@ static int check_channel_list(struct comedi_device *dev, chansegment[0] = chanlist[0]; /* build part of chanlist */ for (i = 1, seglen = 1; i < n_chan; i++, seglen++) { - - /* printk("%d. %d * %d\n",i, - * CR_CHAN(it->chanlist[i]),CR_RANGE(it->chanlist[i]));*/ - /* we detect loop, this must by finish */ if (chanlist[0] == chanlist[i]) @@ -910,10 +911,10 @@ static int check_channel_list(struct comedi_device *dev, nowmustbechan = (CR_CHAN(chansegment[i - 1]) + 1) % s->n_chan; if (nowmustbechan != CR_CHAN(chanlist[i])) { /* channel list isn't continuous :-( */ - printk - ("comedi%d: pcl818: channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n", - dev->minor, i, CR_CHAN(chanlist[i]), - nowmustbechan, CR_CHAN(chanlist[0])); + dev_dbg(dev->class_dev, + "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n", + i, CR_CHAN(chanlist[i]), nowmustbechan, + CR_CHAN(chanlist[0])); return 0; } /* well, this is next correct channel in list */ @@ -922,23 +923,21 @@ static int check_channel_list(struct comedi_device *dev, /* check whole chanlist */ for (i = 0, segpos = 0; i < n_chan; i++) { - /* printk("%d %d=%d %d\n",CR_CHAN(chansegment[i%seglen]),CR_RANGE(chansegment[i%seglen]),CR_CHAN(it->chanlist[i]),CR_RANGE(it->chanlist[i])); */ if (chanlist[i] != chansegment[i % seglen]) { - printk - ("comedi%d: pcl818: bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n", - dev->minor, i, CR_CHAN(chansegment[i]), - CR_RANGE(chansegment[i]), - CR_AREF(chansegment[i]), - CR_CHAN(chanlist[i % seglen]), - CR_RANGE(chanlist[i % seglen]), - CR_AREF(chansegment[i % seglen])); + dev_dbg(dev->class_dev, + "bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n", + i, CR_CHAN(chansegment[i]), + CR_RANGE(chansegment[i]), + CR_AREF(chansegment[i]), + CR_CHAN(chanlist[i % seglen]), + CR_RANGE(chanlist[i % seglen]), + CR_AREF(chansegment[i % seglen])); return 0; /* chan/gain list is strange */ } } } else { seglen = 1; } - printk("check_channel_list: seglen %d\n", seglen); return seglen; } @@ -1067,7 +1066,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) struct comedi_cmd *cmd = &s->async->cmd; int retval; - dev_dbg(dev->class_dev, "pcl818_ai_cmd()\n"); devpriv->ai_n_chan = cmd->chanlist_len; devpriv->ai_chanlist = cmd->chanlist; devpriv->ai_flags = cmd->flags; @@ -1084,7 +1082,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */ devpriv->ai_timer1 = cmd->convert_arg; retval = pcl818_ai_cmd_mode(1, dev, s); - dev_dbg(dev->class_dev, "pcl818_ai_cmd() end\n"); return retval; } if (cmd->convert_src == TRIG_EXT) { /* mode 3 */ @@ -1105,7 +1102,6 @@ static int pcl818_ai_cancel(struct comedi_device *dev, struct pcl818_private *devpriv = dev->private; if (devpriv->irq_blocked > 0) { - dev_dbg(dev->class_dev, "pcl818_ai_cancel()\n"); devpriv->irq_was_now_closed = 1; switch (devpriv->ai_mode) { @@ -1149,7 +1145,6 @@ static int pcl818_ai_cancel(struct comedi_device *dev, } end: - dev_dbg(dev->class_dev, "pcl818_ai_cancel() end\n"); return 0; } @@ -1216,7 +1211,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) const struct pcl818_board *board = comedi_board(dev); struct pcl818_private *devpriv; int ret; - unsigned int irq; int dma; unsigned long pages; struct comedi_subdevice *s; @@ -1240,50 +1234,28 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) return -EIO; } - /* grab our IRQ */ - irq = 0; - if (board->IRQbits != 0) { /* board support IRQ */ - irq = it->options[1]; - if (irq) { /* we want to use IRQ */ - if (((1 << irq) & board->IRQbits) == 0) { - printk - (", IRQ %u is out of allowed range, DISABLING IT", - irq); - irq = 0; /* Bad IRQ */ - } else { - if (request_irq(irq, interrupt_pcl818, 0, - dev->board_name, dev)) { - printk - (", unable to allocate IRQ %u, DISABLING IT", - irq); - irq = 0; /* Can't use IRQ */ - } else { - printk(KERN_DEBUG "irq=%u", irq); - } - } - } + if ((1 << it->options[1]) & board->IRQbits) { + ret = request_irq(it->options[1], interrupt_pcl818, 0, + dev->board_name, dev); + if (ret == 0) + dev->irq = it->options[1]; } - dev->irq = irq; - if (irq) - devpriv->irq_free = 1; /* 1=we have allocated irq */ - else - devpriv->irq_free = 0; - devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */ devpriv->ai_mode = 0; /* mode of irq */ /* grab our DMA */ dma = 0; devpriv->dma = dma; - if (!devpriv->irq_free) + if (!dev->irq) goto no_dma; /* if we haven't IRQ, we can't use DMA */ if (board->DMAbits != 0) { /* board support DMA */ dma = it->options[2]; if (dma < 1) goto no_dma; /* DMA disabled */ if (((1 << dma) & board->DMAbits) == 0) { - printk(KERN_ERR "DMA is out of allowed range, FAIL!\n"); + dev_err(dev->class_dev, + "DMA is out of allowed range, FAIL!\n"); return -EINVAL; /* Bad DMA */ } ret = request_dma(dma, dev->board_name); @@ -1298,7 +1270,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) devpriv->dmapages[0] = pages; devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]); devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE; - /* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */ devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages); if (!devpriv->dmabuf[1]) return -EBUSY; @@ -1318,27 +1289,24 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->type = COMEDI_SUBD_UNUSED; } else { s->type = COMEDI_SUBD_AI; - devpriv->sub_ai = s; s->subdev_flags = SDF_READABLE; if (check_single_ended(dev->iobase)) { s->n_chan = board->n_aichan_se; s->subdev_flags |= SDF_COMMON | SDF_GROUND; - printk(", %dchans S.E. DAC", s->n_chan); } else { s->n_chan = board->n_aichan_diff; s->subdev_flags |= SDF_DIFF; - printk(", %dchans DIFF DAC", s->n_chan); } s->maxdata = board->ai_maxdata; - s->len_chanlist = s->n_chan; s->range_table = board->ai_range_type; - s->cancel = pcl818_ai_cancel; s->insn_read = pcl818_ai_insn_read; - if (irq) { + if (dev->irq) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = s->n_chan; s->do_cmdtest = ai_cmdtest; s->do_cmd = ai_cmd; + s->cancel = pcl818_ai_cancel; } if (board->is_818) { if ((it->options[4] == 1) || (it->options[4] == 10)) @@ -1387,7 +1355,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->subdev_flags = SDF_WRITABLE | SDF_GROUND; s->n_chan = board->n_aochan; s->maxdata = board->ao_maxdata; - s->len_chanlist = board->n_aochan; s->range_table = board->ao_range_type; s->insn_read = pcl818_ao_insn_read; s->insn_write = pcl818_ao_insn_write; @@ -1412,7 +1379,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->subdev_flags = SDF_READABLE; s->n_chan = board->n_dichan; s->maxdata = 1; - s->len_chanlist = board->n_dichan; s->range_table = &range_digital; s->insn_bits = pcl818_di_insn_bits; } @@ -1425,7 +1391,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) s->subdev_flags = SDF_WRITABLE; s->n_chan = board->n_dochan; s->maxdata = 1; - s->len_chanlist = board->n_dochan; s->range_table = &range_digital; s->insn_bits = pcl818_do_insn_bits; } @@ -1446,8 +1411,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) pcl818_reset(dev); - printk("\n"); - return 0; } @@ -1456,7 +1419,7 @@ static void pcl818_detach(struct comedi_device *dev) struct pcl818_private *devpriv = dev->private; if (devpriv) { - pcl818_ai_cancel(dev, devpriv->sub_ai); + pcl818_ai_cancel(dev, dev->read_subdev); pcl818_reset(dev); if (devpriv->dma) free_dma(devpriv->dma); diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c index cc1dc7f66e5b58..f4a49bd649f0e9 100644 --- a/drivers/staging/comedi/drivers/pcm3724.c +++ b/drivers/staging/comedi/drivers/pcm3724.c @@ -70,14 +70,11 @@ static int subdev_8255_cb(int dir, int port, int data, unsigned long arg) { unsigned long iobase = arg; unsigned char inbres; - /* printk("8255cb %d %d %d %lx\n", dir,port,data,arg); */ if (dir) { - /* printk("8255 cb outb(%x, %lx)\n", data, iobase+port); */ outb(data, iobase + port); return 0; } else { inbres = inb(iobase + port); - /* printk("8255 cb inb(%lx) = %x\n", iobase+port, inbres); */ return inbres; } } @@ -137,8 +134,6 @@ static void do_3724_config(struct comedi_device *dev, port_8255_cfg = dev->iobase + SIZE_8255 + _8255_CR; outb(buffer_config, dev->iobase + 8); /* update buffer register */ - /* printk("pcm3724 buffer_config (%lx) %d, %x\n", - dev->iobase + _8255_CR, chanspec, buffer_config); */ outb(config, port_8255_cfg); } @@ -177,7 +172,6 @@ static void enable_chan(struct comedi_device *dev, struct comedi_subdevice *s, if (priv->dio_2 & 0xff) gatecfg |= GATE_A1; - /* printk("gate control %x\n", gatecfg); */ outb(gatecfg, dev->iobase + 9); } diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c index 14cee3ac92c52f..c388f7f3222762 100644 --- a/drivers/staging/comedi/drivers/pcmmio.c +++ b/drivers/staging/comedi/drivers/pcmmio.c @@ -1,76 +1,76 @@ /* - comedi/drivers/pcmmio.c - Driver for Winsystems PC-104 based multifunction IO board. - - COMEDI - Linux Control and Measurement Device Interface - Copyright (C) 2007 Calin A. Culianu - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. -*/ + * pcmmio.c + * Driver for Winsystems PC-104 based multifunction IO board. + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2007 Calin A. Culianu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + /* -Driver: pcmmio -Description: A driver for the PCM-MIO multifunction board -Devices: [Winsystems] PCM-MIO (pcmmio) -Author: Calin Culianu -Updated: Wed, May 16 2007 16:21:10 -0500 -Status: works - -A driver for the relatively new PCM-MIO multifunction board from -Winsystems. This board is a PC-104 based I/O board. It contains -four subdevices: - subdevice 0 - 16 channels of 16-bit AI - subdevice 1 - 8 channels of 16-bit AO - subdevice 2 - first 24 channels of the 48 channel of DIO - (with edge-triggered interrupt support) - subdevice 3 - last 24 channels of the 48 channel DIO - (no interrupt support for this bank of channels) - - Some notes: - - Synchronous reads and writes are the only things implemented for AI and AO, - even though the hardware itself can do streaming acquisition, etc. Anyone - want to add asynchronous I/O for AI/AO as a feature? Be my guest... - - Asynchronous I/O for the DIO subdevices *is* implemented, however! They are - basically edge-triggered interrupts for any configuration of the first - 24 DIO-lines. - - Also note that this interrupt support is untested. - - A few words about edge-detection IRQ support (commands on DIO): - - * To use edge-detection IRQ support for the DIO subdevice, pass the IRQ - of the board to the comedi_config command. The board IRQ is not jumpered - but rather configured through software, so any IRQ from 1-15 is OK. - - * Due to the genericity of the comedi API, you need to create a special - comedi_command in order to use edge-triggered interrupts for DIO. - - * Use comedi_commands with TRIG_NOW. Your callback will be called each - time an edge is detected on the specified DIO line(s), and the data - values will be two sample_t's, which should be concatenated to form - one 32-bit unsigned int. This value is the mask of channels that had - edges detected from your channel list. Note that the bits positions - in the mask correspond to positions in your chanlist when you - specified the command and *not* channel id's! - - * To set the polarity of the edge-detection interrupts pass a nonzero value - for either CR_RANGE or CR_AREF for edge-up polarity, or a zero - value for both CR_RANGE and CR_AREF if you want edge-down polarity. - -Configuration Options: - [0] - I/O port base address - [1] - IRQ (optional -- for edge-detect interrupt support only, - leave out if you don't need this feature) -*/ + * Driver: pcmmio + * Description: A driver for the PCM-MIO multifunction board + * Devices: (Winsystems) PCM-MIO [pcmmio] + * Author: Calin Culianu + * Updated: Wed, May 16 2007 16:21:10 -0500 + * Status: works + * + * A driver for the PCM-MIO multifunction board from Winsystems. This + * is a PC-104 based I/O board. It contains four subdevices: + * + * subdevice 0 - 16 channels of 16-bit AI + * subdevice 1 - 8 channels of 16-bit AO + * subdevice 2 - first 24 channels of the 48 channel of DIO + * (with edge-triggered interrupt support) + * subdevice 3 - last 24 channels of the 48 channel DIO + * (no interrupt support for this bank of channels) + * + * Some notes: + * + * Synchronous reads and writes are the only things implemented for analog + * input and output. The hardware itself can do streaming acquisition, etc. + * + * Asynchronous I/O for the DIO subdevices *is* implemented, however! They + * are basically edge-triggered interrupts for any configuration of the + * channels in subdevice 2. + * + * Also note that this interrupt support is untested. + * + * A few words about edge-detection IRQ support (commands on DIO): + * + * To use edge-detection IRQ support for the DIO subdevice, pass the IRQ + * of the board to the comedi_config command. The board IRQ is not jumpered + * but rather configured through software, so any IRQ from 1-15 is OK. + * + * Due to the genericity of the comedi API, you need to create a special + * comedi_command in order to use edge-triggered interrupts for DIO. + * + * Use comedi_commands with TRIG_NOW. Your callback will be called each + * time an edge is detected on the specified DIO line(s), and the data + * values will be two sample_t's, which should be concatenated to form + * one 32-bit unsigned int. This value is the mask of channels that had + * edges detected from your channel list. Note that the bits positions + * in the mask correspond to positions in your chanlist when you + * specified the command and *not* channel id's! + * + * To set the polarity of the edge-detection interrupts pass a nonzero value + * for either CR_RANGE or CR_AREF for edge-up polarity, or a zero + * value for both CR_RANGE and CR_AREF if you want edge-down polarity. + * + * Configuration Options: + * [0] - I/O port base address + * [1] - IRQ (optional -- for edge-detect interrupt support only, + * leave out if you don't need this feature) + */ #include #include @@ -80,232 +80,211 @@ Configuration Options: #include "comedi_fc.h" -/* This stuff is all from pcmuio.c -- it refers to the DIO subdevices only */ -#define CHANS_PER_PORT 8 -#define PORTS_PER_ASIC 6 -#define INTR_PORTS_PER_ASIC 3 -#define MAX_CHANS_PER_SUBDEV 24 /* number of channels per comedi subdevice */ -#define PORTS_PER_SUBDEV (MAX_CHANS_PER_SUBDEV/CHANS_PER_PORT) -#define CHANS_PER_ASIC (CHANS_PER_PORT*PORTS_PER_ASIC) -#define INTR_CHANS_PER_ASIC 24 -#define INTR_PORTS_PER_SUBDEV (INTR_CHANS_PER_ASIC/CHANS_PER_PORT) -#define MAX_DIO_CHANS (PORTS_PER_ASIC*1*CHANS_PER_PORT) -#define MAX_ASICS (MAX_DIO_CHANS/CHANS_PER_ASIC) -#define CALC_N_DIO_SUBDEVS(nchans) ((nchans)/MAX_CHANS_PER_SUBDEV + (!!((nchans)%MAX_CHANS_PER_SUBDEV)) /*+ (nchans > INTR_CHANS_PER_ASIC ? 2 : 1)*/) -/* IO Memory sizes */ -#define ASIC_IOSIZE (0x0B) -#define PCMMIO48_IOSIZE ASIC_IOSIZE - -/* Some offsets - these are all in the 16byte IO memory offset from - the base address. Note that there is a paging scheme to swap out - offsets 0x8-0xA using the PAGELOCK register. See the table below. - - Register(s) Pages R/W? Description - -------------------------------------------------------------- - REG_PORTx All R/W Read/Write/Configure IO - REG_INT_PENDING All ReadOnly Quickly see which INT_IDx has int. - REG_PAGELOCK All WriteOnly Select a page - REG_POLx Pg. 1 only WriteOnly Select edge-detection polarity - REG_ENABx Pg. 2 only WriteOnly Enable/Disable edge-detect. int. - REG_INT_IDx Pg. 3 only R/W See which ports/bits have ints. +/* + * Register I/O map */ -#define REG_PORT0 0x0 -#define REG_PORT1 0x1 -#define REG_PORT2 0x2 -#define REG_PORT3 0x3 -#define REG_PORT4 0x4 -#define REG_PORT5 0x5 -#define REG_INT_PENDING 0x6 -#define REG_PAGELOCK 0x7 /* - * page selector register, upper 2 bits select - * a page and bits 0-5 are used to 'lock down' - * a particular port above to make it readonly. - */ -#define REG_POL0 0x8 -#define REG_POL1 0x9 -#define REG_POL2 0xA -#define REG_ENAB0 0x8 -#define REG_ENAB1 0x9 -#define REG_ENAB2 0xA -#define REG_INT_ID0 0x8 -#define REG_INT_ID1 0x9 -#define REG_INT_ID2 0xA - -#define NUM_PAGED_REGS 3 -#define NUM_PAGES 4 -#define FIRST_PAGED_REG 0x8 -#define REG_PAGE_BITOFFSET 6 -#define REG_LOCK_BITOFFSET 0 -#define REG_PAGE_MASK (~((0x1<private) +static void pcmmio_dio_write(struct comedi_device *dev, unsigned int val, + int page, int port) +{ + struct pcmmio_private *devpriv = dev->private; + unsigned long iobase = dev->iobase; + unsigned long flags; + + spin_lock_irqsave(&devpriv->pagelock, flags); + if (page == 0) { + /* Port registers are valid for any page */ + outb(val & 0xff, iobase + PCMMIO_PORT_REG(port + 0)); + outb((val >> 8) & 0xff, iobase + PCMMIO_PORT_REG(port + 1)); + outb((val >> 16) & 0xff, iobase + PCMMIO_PORT_REG(port + 2)); + } else { + outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG); + outb(val & 0xff, iobase + PCMMIO_PAGE_REG(0)); + outb((val >> 8) & 0xff, iobase + PCMMIO_PAGE_REG(1)); + outb((val >> 16) & 0xff, iobase + PCMMIO_PAGE_REG(2)); + } + spin_unlock_irqrestore(&devpriv->pagelock, flags); +} + +static unsigned int pcmmio_dio_read(struct comedi_device *dev, + int page, int port) +{ + struct pcmmio_private *devpriv = dev->private; + unsigned long iobase = dev->iobase; + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(&devpriv->pagelock, flags); + if (page == 0) { + /* Port registers are valid for any page */ + val = inb(iobase + PCMMIO_PORT_REG(port + 0)); + val |= (inb(iobase + PCMMIO_PORT_REG(port + 1)) << 8); + val |= (inb(iobase + PCMMIO_PORT_REG(port + 2)) << 16); + } else { + outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG); + val = inb(iobase + PCMMIO_PAGE_REG(0)); + val |= (inb(iobase + PCMMIO_PAGE_REG(1)) << 8); + val |= (inb(iobase + PCMMIO_PAGE_REG(2)) << 16); + } + spin_unlock_irqrestore(&devpriv->pagelock, flags); + + return val; +} -/* DIO devices are slightly special. Although it is possible to - * implement the insn_read/insn_write interface, it is much more - * useful to applications if you implement the insn_bits interface. - * This allows packed reading/writing of the DIO channels. The - * comedi core can convert between insn_bits and insn_read/write */ +/* + * Each channel can be individually programmed for input or output. + * Writing a '0' to a channel causes the corresponding output pin + * to go to a high-z state (pulled high by an external 10K resistor). + * This allows it to be used as an input. When used in the input mode, + * a read reflects the inverted state of the I/O pin, such that a + * high on the pin will read as a '0' in the register. Writing a '1' + * to a bit position causes the pin to sink current (up to 12mA), + * effectively pulling it low. + */ static int pcmmio_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) + struct comedi_insn *insn, + unsigned int *data) { - int byte_no; - - /* NOTE: - reading a 0 means this channel was high - writine a 0 sets the channel high - reading a 1 means this channel was low - writing a 1 means set this channel low - - Therefore everything is always inverted. */ - - /* The insn data is a mask in data[0] and the new data - * in data[1], each channel cooresponding to a bit. */ - -#ifdef DAMMIT_ITS_BROKEN - /* DEBUG */ - printk(KERN_DEBUG "write mask: %08x data: %08x\n", data[0], data[1]); -#endif - - s->state = 0; - - for (byte_no = 0; byte_no < s->n_chan / CHANS_PER_PORT; ++byte_no) { - /* address of 8-bit port */ - unsigned long ioaddr = subpriv->iobases[byte_no], - /* bit offset of port in 32-bit doubleword */ - offset = byte_no * 8; - /* this 8-bit port's data */ - unsigned char byte = 0, - /* The write mask for this port (if any) */ - write_mask_byte = (data[0] >> offset) & 0xff, - /* The data byte for this port */ - data_byte = (data[1] >> offset) & 0xff; - - byte = inb(ioaddr); /* read all 8-bits for this port */ - -#ifdef DAMMIT_ITS_BROKEN - /* DEBUG */ - printk - (KERN_DEBUG "byte %d wmb %02x db %02x offset %02d io %04x," - " data_in %02x ", byte_no, (unsigned)write_mask_byte, - (unsigned)data_byte, offset, ioaddr, (unsigned)byte); -#endif - - if (write_mask_byte) { - /* - * this byte has some write_bits - * -- so set the output lines - */ - /* clear bits for write mask */ - byte &= ~write_mask_byte; - /* set to inverted data_byte */ - byte |= ~data_byte & write_mask_byte; - /* Write out the new digital output state */ - outb(byte, ioaddr); - } -#ifdef DAMMIT_ITS_BROKEN - /* DEBUG */ - printk(KERN_DEBUG "data_out_byte %02x\n", (unsigned)byte); -#endif - /* save the digital input lines for this byte.. */ - s->state |= ((unsigned int)byte) << offset; + /* subdevice 2 uses ports 0-2, subdevice 3 uses ports 3-5 */ + int port = s->index == 2 ? 0 : 3; + unsigned int chanmask = (1 << s->n_chan) - 1; + unsigned int mask; + unsigned int val; + + mask = comedi_dio_update_state(s, data); + if (mask) { + /* + * Outputs are inverted, invert the state and + * update the channels. + * + * The s->io_bits mask makes sure the input channels + * are '0' so that the outputs pins stay in a high + * z-state. + */ + val = ~s->state & chanmask; + val &= s->io_bits; + pcmmio_dio_write(dev, val, 0, port); } - /* now return the DIO lines to data[1] - note they came inverted! */ - data[1] = ~s->state; + /* get inverted state of the channels from the port */ + val = pcmmio_dio_read(dev, 0, port); -#ifdef DAMMIT_ITS_BROKEN - /* DEBUG */ - printk(KERN_DEBUG "s->state %08x data_out %08x\n", s->state, data[1]); -#endif + /* return the true state of the channels */ + data[1] = ~val & chanmask; return insn->n; } @@ -315,376 +294,172 @@ static int pcmmio_dio_insn_config(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data) { - unsigned int chan = CR_CHAN(insn->chanspec); - int byte_no = chan / 8; - int bit_no = chan % 8; + /* subdevice 2 uses ports 0-2, subdevice 3 uses ports 3-5 */ + int port = s->index == 2 ? 0 : 3; int ret; ret = comedi_dio_insn_config(dev, s, insn, data, 0); if (ret) return ret; - if (data[0] == INSN_CONFIG_DIO_INPUT) { - unsigned long ioaddr = subpriv->iobases[byte_no]; - unsigned char val; - - val = inb(ioaddr); - val &= ~(1 << bit_no); - outb(val, ioaddr); - } + if (data[0] == INSN_CONFIG_DIO_INPUT) + pcmmio_dio_write(dev, s->io_bits, 0, port); return insn->n; } -static void switch_page(struct comedi_device *dev, int asic, int page) +static void pcmmio_reset(struct comedi_device *dev) { - struct pcmmio_private *devpriv = dev->private; - - if (asic < 0 || asic >= 1) - return; /* paranoia */ - if (page < 0 || page >= NUM_PAGES) - return; /* more paranoia */ - - devpriv->asics[asic].pagelock &= ~REG_PAGE_MASK; - devpriv->asics[asic].pagelock |= page << REG_PAGE_BITOFFSET; - - /* now write out the shadow register */ - outb(devpriv->asics[asic].pagelock, - devpriv->asics[asic].iobase + REG_PAGELOCK); + /* Clear all the DIO port bits */ + pcmmio_dio_write(dev, 0, 0, 0); + pcmmio_dio_write(dev, 0, 0, 3); + + /* Clear all the paged registers */ + pcmmio_dio_write(dev, 0, PCMMIO_PAGE_POL, 0); + pcmmio_dio_write(dev, 0, PCMMIO_PAGE_ENAB, 0); + pcmmio_dio_write(dev, 0, PCMMIO_PAGE_INT_ID, 0); } -static void init_asics(struct comedi_device *dev) -{ /* sets up an - ASIC chip to defaults */ +/* devpriv->spinlock is already locked */ +static void pcmmio_stop_intr(struct comedi_device *dev, + struct comedi_subdevice *s) +{ struct pcmmio_private *devpriv = dev->private; - int asic; - - for (asic = 0; asic < 1; ++asic) { - int port, page; - unsigned long baseaddr = devpriv->asics[asic].iobase; - - switch_page(dev, asic, 0); /* switch back to page 0 */ - - /* first, clear all the DIO port bits */ - for (port = 0; port < PORTS_PER_ASIC; ++port) - outb(0, baseaddr + REG_PORT0 + port); - - /* Next, clear all the paged registers for each page */ - for (page = 1; page < NUM_PAGES; ++page) { - int reg; - /* now clear all the paged registers */ - switch_page(dev, asic, page); - for (reg = FIRST_PAGED_REG; - reg < FIRST_PAGED_REG + NUM_PAGED_REGS; ++reg) - outb(0, baseaddr + reg); - } - /* DEBUG set rising edge interrupts on port0 of both asics */ - /*switch_page(dev, asic, PAGE_POL); - outb(0xff, baseaddr + REG_POL0); - switch_page(dev, asic, PAGE_ENAB); - outb(0xff, baseaddr + REG_ENAB0); */ - /* END DEBUG */ + devpriv->enabled_mask = 0; + devpriv->active = 0; + s->async->inttrig = NULL; - /* switch back to default page 0 */ - switch_page(dev, asic, 0); - } + /* disable all dio interrupts */ + pcmmio_dio_write(dev, 0, PCMMIO_PAGE_ENAB, 0); } -#ifdef notused -static void lock_port(struct comedi_device *dev, int asic, int port) +static void pcmmio_handle_dio_intr(struct comedi_device *dev, + struct comedi_subdevice *s, + unsigned int triggered) { struct pcmmio_private *devpriv = dev->private; + unsigned int oldevents = s->async->events; + unsigned int len = s->async->cmd.chanlist_len; + unsigned int val = 0; + unsigned long flags; + int i; - if (asic < 0 || asic >= 1) - return; /* paranoia */ - if (port < 0 || port >= PORTS_PER_ASIC) - return; /* more paranoia */ + spin_lock_irqsave(&devpriv->spinlock, flags); - devpriv->asics[asic].pagelock |= 0x1 << port; - /* now write out the shadow register */ - outb(devpriv->asics[asic].pagelock, - devpriv->asics[asic].iobase + REG_PAGELOCK); - return; -} + if (!devpriv->active) + goto done; -static void unlock_port(struct comedi_device *dev, int asic, int port) -{ - struct pcmmio_private *devpriv = dev->private; + if (!(triggered & devpriv->enabled_mask)) + goto done; - if (asic < 0 || asic >= 1) - return; /* paranoia */ - if (port < 0 || port >= PORTS_PER_ASIC) - return; /* more paranoia */ - devpriv->asics[asic].pagelock &= ~(0x1 << port) | REG_LOCK_MASK; - /* now write out the shadow register */ - outb(devpriv->asics[asic].pagelock, - devpriv->asics[asic].iobase + REG_PAGELOCK); -} -#endif /* notused */ + for (i = 0; i < len; i++) { + unsigned int chan = CR_CHAN(s->async->cmd.chanlist[i]); -static void pcmmio_stop_intr(struct comedi_device *dev, - struct comedi_subdevice *s) -{ - struct pcmmio_private *devpriv = dev->private; - int nports, firstport, asic, port; + if (triggered & (1 << chan)) + val |= (1 << i); + } - asic = subpriv->dio.intr.asic; - if (asic < 0) - return; /* not an interrupt subdev */ + /* Write the scan to the buffer. */ + if (comedi_buf_put(s->async, val) && + comedi_buf_put(s->async, val >> 16)) { + s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS); + } else { + /* Overflow! Stop acquisition!! */ + /* TODO: STOP_ACQUISITION_CALL_HERE!! */ + pcmmio_stop_intr(dev, s); + } - subpriv->dio.intr.enabled_mask = 0; - subpriv->dio.intr.active = 0; - s->async->inttrig = NULL; - nports = subpriv->dio.intr.num_asic_chans / CHANS_PER_PORT; - firstport = subpriv->dio.intr.asic_chan / CHANS_PER_PORT; - switch_page(dev, asic, PAGE_ENAB); - for (port = firstport; port < firstport + nports; ++port) { - /* disable all intrs for this subdev.. */ - outb(0, devpriv->asics[asic].iobase + REG_ENAB0 + port); + /* Check for end of acquisition. */ + if (!devpriv->continuous) { + /* stop_src == TRIG_COUNT */ + if (devpriv->stop_count > 0) { + devpriv->stop_count--; + if (devpriv->stop_count == 0) { + s->async->events |= COMEDI_CB_EOA; + /* TODO: STOP_ACQUISITION_CALL_HERE!! */ + pcmmio_stop_intr(dev, s); + } + } } + +done: + spin_unlock_irqrestore(&devpriv->spinlock, flags); + + if (oldevents != s->async->events) + comedi_event(dev, s); } static irqreturn_t interrupt_pcmmio(int irq, void *d) { - int asic, got1 = 0; - struct comedi_device *dev = (struct comedi_device *)d; - struct pcmmio_private *devpriv = dev->private; - int i; + struct comedi_device *dev = d; + struct comedi_subdevice *s = dev->read_subdev; + unsigned int triggered; + unsigned char int_pend; - for (asic = 0; asic < MAX_ASICS; ++asic) { - if (irq == devpriv->asics[asic].irq) { - unsigned long flags; - unsigned triggered = 0; - unsigned long iobase = devpriv->asics[asic].iobase; - /* it is an interrupt for ASIC #asic */ - unsigned char int_pend; - - spin_lock_irqsave(&devpriv->asics[asic].spinlock, - flags); - - int_pend = inb(iobase + REG_INT_PENDING) & 0x07; - - if (int_pend) { - int port; - for (port = 0; port < INTR_PORTS_PER_ASIC; - ++port) { - if (int_pend & (0x1 << port)) { - unsigned char - io_lines_with_edges = 0; - switch_page(dev, asic, - PAGE_INT_ID); - io_lines_with_edges = - inb(iobase + - REG_INT_ID0 + port); - - if (io_lines_with_edges) - /* - * clear pending - * interrupt - */ - outb(0, iobase + - REG_INT_ID0 + - port); - - triggered |= - io_lines_with_edges << - port * 8; - } - } - - ++got1; - } + /* are there any interrupts pending */ + int_pend = inb(dev->iobase + PCMMIO_INT_PENDING_REG) & 0x07; + if (!int_pend) + return IRQ_NONE; - spin_unlock_irqrestore(&devpriv->asics[asic].spinlock, - flags); - - if (triggered) { - struct comedi_subdevice *s; - /* - * TODO here: dispatch io lines to subdevs - * with commands.. - */ - printk - (KERN_DEBUG "got edge detect interrupt %d asic %d which_chans: %06x\n", - irq, asic, triggered); - for (i = 2; i < dev->n_subdevices; i++) { - s = &dev->subdevices[i]; - /* - * this is an interrupt subdev, - * and it matches this asic! - */ - if (subpriv->dio.intr.asic == asic) { - unsigned long flags; - unsigned oldevents; - - spin_lock_irqsave(&subpriv->dio. - intr.spinlock, - flags); - - oldevents = s->async->events; - - if (subpriv->dio.intr.active) { - unsigned mytrig = - ((triggered >> - subpriv->dio.intr.asic_chan) - & - ((0x1 << subpriv-> - dio.intr. - num_asic_chans) - - 1)) << subpriv-> - dio.intr.first_chan; - if (mytrig & - subpriv->dio. - intr.enabled_mask) { - unsigned int val - = 0; - unsigned int n, - ch, len; - - len = - s-> - async->cmd.chanlist_len; - for (n = 0; - n < len; - n++) { - ch = CR_CHAN(s->async->cmd.chanlist[n]); - if (mytrig & (1U << ch)) - val |= (1U << n); - } - /* Write the scan to the buffer. */ - if (comedi_buf_put(s->async, val) - && - comedi_buf_put - (s->async, - val >> 16)) { - s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS); - } else { - /* Overflow! Stop acquisition!! */ - /* TODO: STOP_ACQUISITION_CALL_HERE!! */ - pcmmio_stop_intr - (dev, - s); - } - - /* Check for end of acquisition. */ - if (!subpriv->dio.intr.continuous) { - /* stop_src == TRIG_COUNT */ - if (subpriv->dio.intr.stop_count > 0) { - subpriv->dio.intr.stop_count--; - if (subpriv->dio.intr.stop_count == 0) { - s->async->events |= COMEDI_CB_EOA; - /* TODO: STOP_ACQUISITION_CALL_HERE!! */ - pcmmio_stop_intr - (dev, - s); - } - } - } - } - } - - spin_unlock_irqrestore - (&subpriv->dio.intr. - spinlock, flags); - - if (oldevents != - s->async->events) { - comedi_event(dev, s); - } - - } - - } - } + /* get, and clear, the pending interrupts */ + triggered = pcmmio_dio_read(dev, PCMMIO_PAGE_INT_ID, 0); + pcmmio_dio_write(dev, 0, PCMMIO_PAGE_INT_ID, 0); + + pcmmio_handle_dio_intr(dev, s, triggered); - } - } - if (!got1) - return IRQ_NONE; /* interrupt from other source */ return IRQ_HANDLED; } +/* devpriv->spinlock is already locked */ static int pcmmio_start_intr(struct comedi_device *dev, struct comedi_subdevice *s) { struct pcmmio_private *devpriv = dev->private; + struct comedi_cmd *cmd = &s->async->cmd; + unsigned int bits = 0; + unsigned int pol_bits = 0; + int i; - if (!subpriv->dio.intr.continuous && subpriv->dio.intr.stop_count == 0) { + if (!devpriv->continuous && devpriv->stop_count == 0) { /* An empty acquisition! */ s->async->events |= COMEDI_CB_EOA; - subpriv->dio.intr.active = 0; + devpriv->active = 0; return 1; - } else { - unsigned bits = 0, pol_bits = 0, n; - int nports, firstport, asic, port; - struct comedi_cmd *cmd = &s->async->cmd; - - asic = subpriv->dio.intr.asic; - if (asic < 0) - return 1; /* not an interrupt - subdev */ - subpriv->dio.intr.enabled_mask = 0; - subpriv->dio.intr.active = 1; - nports = subpriv->dio.intr.num_asic_chans / CHANS_PER_PORT; - firstport = subpriv->dio.intr.asic_chan / CHANS_PER_PORT; - if (cmd->chanlist) { - for (n = 0; n < cmd->chanlist_len; n++) { - bits |= (1U << CR_CHAN(cmd->chanlist[n])); - pol_bits |= (CR_AREF(cmd->chanlist[n]) - || CR_RANGE(cmd-> - chanlist[n]) ? 1U : 0U) - << CR_CHAN(cmd->chanlist[n]); - } - } - bits &= ((0x1 << subpriv->dio.intr.num_asic_chans) - - 1) << subpriv->dio.intr.first_chan; - subpriv->dio.intr.enabled_mask = bits; - - { - /* - * the below code configures the board - * to use a specific IRQ from 0-15. - */ - unsigned char b; - /* - * set resource enable register - * to enable IRQ operation - */ - outb(1 << 4, dev->iobase + 3); - /* set bits 0-3 of b to the irq number from 0-15 */ - b = dev->irq & ((1 << 4) - 1); - outb(b, dev->iobase + 2); - /* done, we told the board what irq to use */ - } + } - switch_page(dev, asic, PAGE_ENAB); - for (port = firstport; port < firstport + nports; ++port) { - unsigned enab = - bits >> (subpriv->dio.intr.first_chan + (port - - firstport) - * 8) & 0xff, pol = - pol_bits >> (subpriv->dio.intr.first_chan + - (port - firstport) * 8) & 0xff; - /* set enab intrs for this subdev.. */ - outb(enab, - devpriv->asics[asic].iobase + REG_ENAB0 + port); - switch_page(dev, asic, PAGE_POL); - outb(pol, - devpriv->asics[asic].iobase + REG_ENAB0 + port); + devpriv->enabled_mask = 0; + devpriv->active = 1; + if (cmd->chanlist) { + for (i = 0; i < cmd->chanlist_len; i++) { + unsigned int chanspec = cmd->chanlist[i]; + unsigned int chan = CR_CHAN(chanspec); + unsigned int range = CR_RANGE(chanspec); + unsigned int aref = CR_AREF(chanspec); + + bits |= (1 << chan); + pol_bits |= (((aref || range) ? 1 : 0) << chan); } } + bits &= ((1 << s->n_chan) - 1); + devpriv->enabled_mask = bits; + + /* set polarity and enable interrupts */ + pcmmio_dio_write(dev, pol_bits, PCMMIO_PAGE_POL, 0); + pcmmio_dio_write(dev, bits, PCMMIO_PAGE_ENAB, 0); + return 0; } static int pcmmio_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { + struct pcmmio_private *devpriv = dev->private; unsigned long flags; - spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags); - if (subpriv->dio.intr.active) + spin_lock_irqsave(&devpriv->spinlock, flags); + if (devpriv->active) pcmmio_stop_intr(dev, s); - spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags); + spin_unlock_irqrestore(&devpriv->spinlock, flags); return 0; } @@ -696,17 +471,18 @@ static int pcmmio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { + struct pcmmio_private *devpriv = dev->private; unsigned long flags; int event = 0; if (trignum != 0) return -EINVAL; - spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags); + spin_lock_irqsave(&devpriv->spinlock, flags); s->async->inttrig = NULL; - if (subpriv->dio.intr.active) + if (devpriv->active) event = pcmmio_start_intr(dev, s); - spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags); + spin_unlock_irqrestore(&devpriv->spinlock, flags); if (event) comedi_event(dev, s); @@ -719,23 +495,24 @@ pcmmio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s, */ static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { + struct pcmmio_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned long flags; int event = 0; - spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags); - subpriv->dio.intr.active = 1; + spin_lock_irqsave(&devpriv->spinlock, flags); + devpriv->active = 1; /* Set up end of acquisition. */ switch (cmd->stop_src) { case TRIG_COUNT: - subpriv->dio.intr.continuous = 0; - subpriv->dio.intr.stop_count = cmd->stop_arg; + devpriv->continuous = 0; + devpriv->stop_count = cmd->stop_arg; break; default: /* TRIG_NONE */ - subpriv->dio.intr.continuous = 1; - subpriv->dio.intr.stop_count = 0; + devpriv->continuous = 1; + devpriv->stop_count = 0; break; } @@ -749,7 +526,7 @@ static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) event = pcmmio_start_intr(dev, s); break; } - spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags); + spin_unlock_irqrestore(&devpriv->spinlock, flags); if (event) comedi_event(dev, s); @@ -812,188 +589,171 @@ static int pcmmio_cmdtest(struct comedi_device *dev, return 0; } -static int adc_wait_ready(unsigned long iobase) +static int pcmmio_ai_wait_for_eoc(unsigned long iobase, unsigned int timeout) { - unsigned long retry = 100000; - while (retry--) - if (inb(iobase + 3) & 0x80) + unsigned char status; + + while (timeout--) { + status = inb(iobase + PCMMIO_AI_STATUS_REG); + if (status & PCMMIO_AI_STATUS_DATA_READY) return 0; - return 1; + } + return -ETIME; } -/* All this is for AI and AO */ -static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) +static int pcmmio_ai_insn_read(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, + unsigned int *data) { - int n; - unsigned long iobase = subpriv->iobase; + unsigned long iobase = dev->iobase; + unsigned int chan = CR_CHAN(insn->chanspec); + unsigned int range = CR_RANGE(insn->chanspec); + unsigned int aref = CR_AREF(insn->chanspec); + unsigned char cmd = 0; + unsigned int val; + int ret; + int i; /* - 1. write the CMD byte (to BASE+2) - 2. read junk lo byte (BASE+0) - 3. read junk hi byte (BASE+1) - 4. (mux settled so) write CMD byte again (BASE+2) - 5. read valid lo byte(BASE+0) - 6. read valid hi byte(BASE+1) - - Additionally note that the BASE += 4 if the channel >= 8 + * The PCM-MIO uses two Linear Tech LTC1859CG 8-channel A/D converters. + * The devices use a full duplex serial interface which transmits and + * receives data simultaneously. An 8-bit command is shifted into the + * ADC interface to configure it for the next conversion. At the same + * time, the data from the previous conversion is shifted out of the + * device. Consequently, the conversion result is delayed by one + * conversion from the command word. + * + * Setup the cmd for the conversions then do a dummy conversion to + * flush the junk data. Then do each conversion requested by the + * comedi_insn. Note that the last conversion will leave junk data + * in ADC which will get flushed on the next comedi_insn. */ - /* convert n samples */ - for (n = 0; n < insn->n; n++) { - unsigned chan = CR_CHAN(insn->chanspec), range = - CR_RANGE(insn->chanspec), aref = CR_AREF(insn->chanspec); - unsigned char command_byte = 0; - unsigned iooffset = 0; - unsigned short sample, adc_adjust = 0; - - if (chan > 7) - chan -= 8, iooffset = 4; /* - * use the second dword - * for channels > 7 - */ - - if (aref != AREF_DIFF) { - aref = AREF_GROUND; - command_byte |= 1 << 7; /* - * set bit 7 to indicate - * single-ended - */ - } - if (range < 2) - adc_adjust = 0x8000; /* - * bipolar ranges - * (-5,5 .. -10,10 need to be - * adjusted -- that is.. they - * need to wrap around by - * adding 0x8000 - */ - - if (chan % 2) { - command_byte |= 1 << 6; /* - * odd-numbered channels - * have bit 6 set - */ - } - - /* select the channel, bits 4-5 == chan/2 */ - command_byte |= ((chan / 2) & 0x3) << 4; + if (chan > 7) { + chan -= 8; + iobase += PCMMIO_AI_2ND_ADC_OFFSET; + } - /* set the range, bits 2-3 */ - command_byte |= (range & 0x3) << 2; + if (aref == AREF_GROUND) + cmd |= PCMMIO_AI_CMD_SE; + if (chan % 2) + cmd |= PCMMIO_AI_CMD_ODD_CHAN; + cmd |= PCMMIO_AI_CMD_CHAN_SEL(chan / 2); + cmd |= PCMMIO_AI_CMD_RANGE(range); - /* need to do this twice to make sure mux settled */ - /* chan/range/aref select */ - outb(command_byte, iobase + iooffset + 2); + outb(cmd, iobase + PCMMIO_AI_CMD_REG); + ret = pcmmio_ai_wait_for_eoc(iobase, 100000); + if (ret) + return ret; - /* wait for the adc to say it finised the conversion */ - adc_wait_ready(iobase + iooffset); + val = inb(iobase + PCMMIO_AI_LSB_REG); + val |= inb(iobase + PCMMIO_AI_MSB_REG) << 8; - /* select the chan/range/aref AGAIN */ - outb(command_byte, iobase + iooffset + 2); + for (i = 0; i < insn->n; i++) { + outb(cmd, iobase + PCMMIO_AI_CMD_REG); + ret = pcmmio_ai_wait_for_eoc(iobase, 100000); + if (ret) + return ret; - adc_wait_ready(iobase + iooffset); + val = inb(iobase + PCMMIO_AI_LSB_REG); + val |= inb(iobase + PCMMIO_AI_MSB_REG) << 8; - /* read data lo byte */ - sample = inb(iobase + iooffset + 0); + /* bipolar data is two's complement */ + if (comedi_range_is_bipolar(s, range)) + val = comedi_offset_munge(s, val); - /* read data hi byte */ - sample |= inb(iobase + iooffset + 1) << 8; - sample += adc_adjust; /* adjustment .. munge data */ - data[n] = sample; + data[i] = val; } - /* return the number of samples read/written */ - return n; + + return insn->n; } -static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) +static int pcmmio_ao_insn_read(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, + unsigned int *data) { - int n; - for (n = 0; n < insn->n; n++) { - unsigned chan = CR_CHAN(insn->chanspec); - if (chan < s->n_chan) - data[n] = subpriv->ao.shadow_samples[chan]; - } - return n; + struct pcmmio_private *devpriv = dev->private; + unsigned int chan = CR_CHAN(insn->chanspec); + int i; + + for (i = 0; i < insn->n; i++) + data[i] = devpriv->ao_readback[chan]; + + return insn->n; } -static int wait_dac_ready(unsigned long iobase) +static int pcmmio_ao_wait_for_eoc(unsigned long iobase, unsigned int timeout) { - unsigned long retry = 100000L; + unsigned char status; - /* This may seem like an absurd way to handle waiting and violates the - "no busy waiting" policy. The fact is that the hardware is - normally so fast that we usually only need one time through the loop - anyway. The longer timeout is for rare occasions and for detecting - non-existent hardware. */ - - while (retry--) { - if (inb(iobase + 3) & 0x80) + while (timeout--) { + status = inb(iobase + PCMMIO_AO_STATUS_REG); + if (status & PCMMIO_AO_STATUS_DATA_READY) return 0; - } - return 1; + return -ETIME; } -static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) +static int pcmmio_ao_insn_write(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, + unsigned int *data) { - int n; - unsigned iobase = subpriv->iobase, iooffset = 0; - - for (n = 0; n < insn->n; n++) { - unsigned chan = CR_CHAN(insn->chanspec), range = - CR_RANGE(insn->chanspec); - if (chan < s->n_chan) { - unsigned char command_byte = 0, range_byte = - range & ((1 << 4) - 1); - if (chan >= 4) - chan -= 4, iooffset += 4; - /* set the range.. */ - outb(range_byte, iobase + iooffset + 0); - outb(0, iobase + iooffset + 1); - - /* tell it to begin */ - command_byte = (chan << 1) | 0x60; - outb(command_byte, iobase + iooffset + 2); - - wait_dac_ready(iobase + iooffset); - - /* low order byte */ - outb(data[n] & 0xff, iobase + iooffset + 0); - - /* high order byte */ - outb((data[n] >> 8) & 0xff, iobase + iooffset + 1); - - /* - * set bit 4 of command byte to indicate - * data is loaded and trigger conversion - */ - command_byte = 0x70 | (chan << 1); - /* trigger converion */ - outb(command_byte, iobase + iooffset + 2); - - wait_dac_ready(iobase + iooffset); - - /* save to shadow register for ao_rinsn */ - subpriv->ao.shadow_samples[chan] = data[n]; - } + struct pcmmio_private *devpriv = dev->private; + unsigned long iobase = dev->iobase; + unsigned int chan = CR_CHAN(insn->chanspec); + unsigned int range = CR_RANGE(insn->chanspec); + unsigned int val = devpriv->ao_readback[chan]; + unsigned char cmd = 0; + int ret; + int i; + + /* + * The PCM-MIO has two Linear Tech LTC2704 DAC devices. Each device + * is a 4-channel converter with software-selectable output range. + */ + + if (chan > 3) { + cmd |= PCMMIO_AO_CMD_CHAN_SEL(chan - 4); + iobase += PCMMIO_AO_2ND_DAC_OFFSET; + } else { + cmd |= PCMMIO_AO_CMD_CHAN_SEL(chan); } - return n; + + /* set the range for the channel */ + outb(PCMMIO_AO_LSB_SPAN(range), iobase + PCMMIO_AO_LSB_REG); + outb(0, iobase + PCMMIO_AO_MSB_REG); + outb(cmd | PCMMIO_AO_CMD_WR_SPAN_UPDATE, iobase + PCMMIO_AO_CMD_REG); + ret = pcmmio_ao_wait_for_eoc(iobase, 100000); + if (ret) + return ret; + + for (i = 0; i < insn->n; i++) { + val = data[i]; + + /* write the data to the channel */ + outb(val & 0xff, iobase + PCMMIO_AO_LSB_REG); + outb((val >> 8) & 0xff, iobase + PCMMIO_AO_MSB_REG); + outb(cmd | PCMMIO_AO_CMD_WR_CODE_UPDATE, + iobase + PCMMIO_AO_CMD_REG); + ret = pcmmio_ao_wait_for_eoc(iobase, 100000); + if (ret) + return ret; + + devpriv->ao_readback[chan] = val; + } + + return insn->n; } static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pcmmio_private *devpriv; struct comedi_subdevice *s; - int sdev_no, chans_left, n_dio_subdevs, n_subdevs, port, asic, - thisasic_chanct = 0; - unsigned int irq[MAX_ASICS]; int ret; - irq[0] = it->options[1]; - ret = comedi_request_region(dev, it->options[0], 32); if (ret) return ret; @@ -1002,177 +762,99 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (!devpriv) return -ENOMEM; - for (asic = 0; asic < MAX_ASICS; ++asic) { - devpriv->asics[asic].num = asic; - devpriv->asics[asic].iobase = - dev->iobase + 16 + asic * ASIC_IOSIZE; - /* - * this gets actually set at the end of this function when we - * request_irqs - */ - devpriv->asics[asic].irq = 0; - spin_lock_init(&devpriv->asics[asic].spinlock); - } + spin_lock_init(&devpriv->pagelock); + spin_lock_init(&devpriv->spinlock); - chans_left = CHANS_PER_ASIC * 1; - n_dio_subdevs = CALC_N_DIO_SUBDEVS(chans_left); - n_subdevs = n_dio_subdevs + 2; - devpriv->sprivs = - kcalloc(n_subdevs, sizeof(struct pcmmio_subdev_private), - GFP_KERNEL); - if (!devpriv->sprivs) { - printk(KERN_ERR "comedi%d: cannot allocate subdevice private data structures\n", - dev->minor); - return -ENOMEM; + pcmmio_reset(dev); + + if (it->options[1]) { + ret = request_irq(it->options[1], interrupt_pcmmio, 0, + dev->board_name, dev); + if (ret == 0) { + dev->irq = it->options[1]; + + /* configure the interrupt routing on the board */ + outb(PCMMIO_AI_RES_ENA_DIO_RES_ACCESS, + dev->iobase + PCMMIO_AI_RES_ENA_REG); + outb(PCMMIO_RESOURCE_IRQ(dev->irq), + dev->iobase + PCMMIO_RESOURCE_REG); + } } - ret = comedi_alloc_subdevices(dev, n_subdevs); + ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; - /* First, AI */ + /* Analog Input subdevice */ s = &dev->subdevices[0]; - s->private = &devpriv->sprivs[0]; - s->maxdata = 0xffff; - s->range_table = &ranges_ai; - s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF; - s->type = COMEDI_SUBD_AI; - s->n_chan = 16; - s->len_chanlist = s->n_chan; - s->insn_read = ai_rinsn; - subpriv->iobase = dev->iobase + 0; - /* initialize the resource enable register by clearing it */ - outb(0, subpriv->iobase + 3); - outb(0, subpriv->iobase + 4 + 3); + s->type = COMEDI_SUBD_AI; + s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF; + s->n_chan = 16; + s->maxdata = 0xffff; + s->range_table = &pcmmio_ai_ranges; + s->insn_read = pcmmio_ai_insn_read; - /* Next, AO */ - s = &dev->subdevices[1]; - s->private = &devpriv->sprivs[1]; - s->maxdata = 0xffff; - s->range_table = &ranges_ao; - s->subdev_flags = SDF_READABLE; - s->type = COMEDI_SUBD_AO; - s->n_chan = 8; - s->len_chanlist = s->n_chan; - s->insn_read = ao_rinsn; - s->insn_write = ao_winsn; - subpriv->iobase = dev->iobase + 8; /* initialize the resource enable register by clearing it */ - outb(0, subpriv->iobase + 3); - outb(0, subpriv->iobase + 4 + 3); - - port = 0; - asic = 0; - for (sdev_no = 2; sdev_no < dev->n_subdevices; ++sdev_no) { - int byte_no; - - s = &dev->subdevices[sdev_no]; - s->private = &devpriv->sprivs[sdev_no]; - s->maxdata = 1; - s->range_table = &range_digital; - s->subdev_flags = SDF_READABLE | SDF_WRITABLE; - s->type = COMEDI_SUBD_DIO; - s->insn_bits = pcmmio_dio_insn_bits; - s->insn_config = pcmmio_dio_insn_config; - s->n_chan = min(chans_left, MAX_CHANS_PER_SUBDEV); - subpriv->dio.intr.asic = -1; - subpriv->dio.intr.first_chan = -1; - subpriv->dio.intr.asic_chan = -1; - subpriv->dio.intr.num_asic_chans = -1; - subpriv->dio.intr.active = 0; - s->len_chanlist = 1; - - /* save the ioport address for each 'port' of 8 channels in the - subdevice */ - for (byte_no = 0; byte_no < PORTS_PER_SUBDEV; ++byte_no, ++port) { - if (port >= PORTS_PER_ASIC) { - port = 0; - ++asic; - thisasic_chanct = 0; - } - subpriv->iobases[byte_no] = - devpriv->asics[asic].iobase + port; - - if (thisasic_chanct < - CHANS_PER_PORT * INTR_PORTS_PER_ASIC - && subpriv->dio.intr.asic < 0) { - /* - * this is an interrupt subdevice, - * so setup the struct - */ - subpriv->dio.intr.asic = asic; - subpriv->dio.intr.active = 0; - subpriv->dio.intr.stop_count = 0; - subpriv->dio.intr.first_chan = byte_no * 8; - subpriv->dio.intr.asic_chan = thisasic_chanct; - subpriv->dio.intr.num_asic_chans = - s->n_chan - subpriv->dio.intr.first_chan; - s->cancel = pcmmio_cancel; - s->do_cmd = pcmmio_cmd; - s->do_cmdtest = pcmmio_cmdtest; - s->len_chanlist = - subpriv->dio.intr.num_asic_chans; - } - thisasic_chanct += CHANS_PER_PORT; - } - spin_lock_init(&subpriv->dio.intr.spinlock); - - chans_left -= s->n_chan; + outb(PCMMIO_AI_RES_ENA_CMD_REG_ACCESS, + dev->iobase + PCMMIO_AI_RES_ENA_REG); + outb(PCMMIO_AI_RES_ENA_CMD_REG_ACCESS, + dev->iobase + PCMMIO_AI_RES_ENA_REG + PCMMIO_AI_2ND_ADC_OFFSET); - if (!chans_left) { - /* - * reset the asic to our first asic, - * to do intr subdevs - */ - asic = 0; - port = 0; - } + /* Analog Output subdevice */ + s = &dev->subdevices[1]; + s->type = COMEDI_SUBD_AO; + s->subdev_flags = SDF_READABLE; + s->n_chan = 8; + s->maxdata = 0xffff; + s->range_table = &pcmmio_ao_ranges; + s->insn_read = pcmmio_ao_insn_read; + s->insn_write = pcmmio_ao_insn_write; + /* initialize the resource enable register by clearing it */ + outb(0, dev->iobase + PCMMIO_AO_RESOURCE_ENA_REG); + outb(0, dev->iobase + PCMMIO_AO_2ND_DAC_OFFSET + + PCMMIO_AO_RESOURCE_ENA_REG); + + /* Digital I/O subdevice with interrupt support */ + s = &dev->subdevices[2]; + s->type = COMEDI_SUBD_DIO; + s->subdev_flags = SDF_READABLE | SDF_WRITABLE; + s->n_chan = 24; + s->maxdata = 1; + s->len_chanlist = 1; + s->range_table = &range_digital; + s->insn_bits = pcmmio_dio_insn_bits; + s->insn_config = pcmmio_dio_insn_config; + if (dev->irq) { + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = s->n_chan; + s->cancel = pcmmio_cancel; + s->do_cmd = pcmmio_cmd; + s->do_cmdtest = pcmmio_cmdtest; } - init_asics(dev); /* clear out all the registers, basically */ - - for (asic = 0; irq[0] && asic < MAX_ASICS; ++asic) { - if (irq[asic] - && request_irq(irq[asic], interrupt_pcmmio, - IRQF_SHARED, dev->board_name, dev)) { - int i; - /* unroll the allocated irqs.. */ - for (i = asic - 1; i >= 0; --i) { - free_irq(irq[i], dev); - devpriv->asics[i].irq = irq[i] = 0; - } - irq[asic] = 0; - } - devpriv->asics[asic].irq = irq[asic]; - } + /* Digital I/O subdevice */ + s = &dev->subdevices[3]; + s->type = COMEDI_SUBD_DIO; + s->subdev_flags = SDF_READABLE | SDF_WRITABLE; + s->n_chan = 24; + s->maxdata = 1; + s->range_table = &range_digital; + s->insn_bits = pcmmio_dio_insn_bits; + s->insn_config = pcmmio_dio_insn_config; - return 1; -} - -static void pcmmio_detach(struct comedi_device *dev) -{ - struct pcmmio_private *devpriv = dev->private; - int i; - - if (devpriv) { - for (i = 0; i < MAX_ASICS; ++i) { - if (devpriv->asics[i].irq) - free_irq(devpriv->asics[i].irq, dev); - } - kfree(devpriv->sprivs); - } - comedi_legacy_detach(dev); + return 0; } static struct comedi_driver pcmmio_driver = { .driver_name = "pcmmio", .module = THIS_MODULE, .attach = pcmmio_attach, - .detach = pcmmio_detach, + .detach = comedi_legacy_detach, }; module_comedi_driver(pcmmio_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); -MODULE_DESCRIPTION("Comedi low-level driver"); +MODULE_DESCRIPTION("Comedi driver for Winsystems PCM-MIO PC/104 board"); MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c index 954fa96a50ac76..a8f390f7a87418 100644 --- a/drivers/staging/comedi/drivers/pcmuio.c +++ b/drivers/staging/comedi/drivers/pcmuio.c @@ -75,7 +75,6 @@ #include #include -#include #include "../comedidev.h" @@ -127,36 +126,53 @@ static const struct pcmuio_board pcmuio_boards[] = { }, }; -struct pcmuio_subdev_private { - /* The below is only used for intr subdevices */ - struct { - /* if non-negative, this subdev has an interrupt asic */ - int asic; - /* - * subdev-relative channel mask for channels - * we are interested in - */ - int enabled_mask; - int active; - int stop_count; - int continuous; - spinlock_t spinlock; - } intr; +struct pcmuio_asic { + spinlock_t pagelock; /* protects the page registers */ + spinlock_t spinlock; /* protects member variables */ + unsigned int enabled_mask; + unsigned int stop_count; + unsigned int active:1; + unsigned int continuous:1; }; struct pcmuio_private { - struct { - unsigned int irq; - spinlock_t spinlock; - } asics[PCMUIO_MAX_ASICS]; - struct pcmuio_subdev_private *sprivs; + struct pcmuio_asic asics[PCMUIO_MAX_ASICS]; + unsigned int irq2; }; +static inline unsigned long pcmuio_asic_iobase(struct comedi_device *dev, + int asic) +{ + return dev->iobase + (asic * PCMUIO_ASIC_IOSIZE); +} + +static inline int pcmuio_subdevice_to_asic(struct comedi_subdevice *s) +{ + /* + * subdevice 0 and 1 are handled by the first asic + * subdevice 2 and 3 are handled by the second asic + */ + return s->index / 2; +} + +static inline int pcmuio_subdevice_to_port(struct comedi_subdevice *s) +{ + /* + * subdevice 0 and 2 use port registers 0-2 + * subdevice 1 and 3 use port registers 3-5 + */ + return (s->index % 2) ? 3 : 0; +} + static void pcmuio_write(struct comedi_device *dev, unsigned int val, int asic, int page, int port) { - unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE); + struct pcmuio_private *devpriv = dev->private; + struct pcmuio_asic *chip = &devpriv->asics[asic]; + unsigned long iobase = pcmuio_asic_iobase(dev, asic); + unsigned long flags; + spin_lock_irqsave(&chip->pagelock, flags); if (page == 0) { /* Port registers are valid for any page */ outb(val & 0xff, iobase + PCMUIO_PORT_REG(port + 0)); @@ -168,14 +184,19 @@ static void pcmuio_write(struct comedi_device *dev, unsigned int val, outb((val >> 8) & 0xff, iobase + PCMUIO_PAGE_REG(1)); outb((val >> 16) & 0xff, iobase + PCMUIO_PAGE_REG(2)); } + spin_unlock_irqrestore(&chip->pagelock, flags); } static unsigned int pcmuio_read(struct comedi_device *dev, int asic, int page, int port) { - unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE); + struct pcmuio_private *devpriv = dev->private; + struct pcmuio_asic *chip = &devpriv->asics[asic]; + unsigned long iobase = pcmuio_asic_iobase(dev, asic); + unsigned long flags; unsigned int val; + spin_lock_irqsave(&chip->pagelock, flags); if (page == 0) { /* Port registers are valid for any page */ val = inb(iobase + PCMUIO_PORT_REG(port + 0)); @@ -187,6 +208,7 @@ static unsigned int pcmuio_read(struct comedi_device *dev, val |= (inb(iobase + PCMUIO_PAGE_REG(1)) << 8); val |= (inb(iobase + PCMUIO_PAGE_REG(2)) << 16); } + spin_unlock_irqrestore(&chip->pagelock, flags); return val; } @@ -203,30 +225,35 @@ static unsigned int pcmuio_read(struct comedi_device *dev, */ static int pcmuio_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) + struct comedi_insn *insn, + unsigned int *data) { - unsigned int mask = data[0] & s->io_bits; /* outputs only */ - unsigned int bits = data[1]; - int asic = s->index / 2; - int port = (s->index % 2) ? 3 : 0; + int asic = pcmuio_subdevice_to_asic(s); + int port = pcmuio_subdevice_to_port(s); + unsigned int chanmask = (1 << s->n_chan) - 1; + unsigned int mask; unsigned int val; - /* get inverted state of the channels from the port */ - val = pcmuio_read(dev, asic, 0, port); - - /* get the true state of the channels */ - s->state = val ^ ((0x1 << s->n_chan) - 1); - + mask = comedi_dio_update_state(s, data); if (mask) { - s->state &= ~mask; - s->state |= (mask & bits); - - /* invert the state and update the channels */ - val = s->state ^ ((0x1 << s->n_chan) - 1); + /* + * Outputs are inverted, invert the state and + * update the channels. + * + * The s->io_bits mask makes sure the input channels + * are '0' so that the outputs pins stay in a high + * z-state. + */ + val = ~s->state & chanmask; + val &= s->io_bits; pcmuio_write(dev, val, asic, 0, port); } - data[1] = s->state; + /* get inverted state of the channels from the port */ + val = pcmuio_read(dev, asic, 0, port); + + /* return the true state of the channels */ + data[1] = ~val & chanmask; return insn->n; } @@ -236,8 +263,8 @@ static int pcmuio_dio_insn_config(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data) { - int asic = s->index / 2; - int port = (s->index % 2) ? 3 : 0; + int asic = pcmuio_subdevice_to_asic(s); + int port = pcmuio_subdevice_to_port(s); int ret; ret = comedi_dio_insn_config(dev, s, insn, data, 0); @@ -267,18 +294,16 @@ static void pcmuio_reset(struct comedi_device *dev) } } +/* chip->spinlock is already locked */ static void pcmuio_stop_intr(struct comedi_device *dev, struct comedi_subdevice *s) { - struct pcmuio_subdev_private *subpriv = s->private; - int asic; - - asic = subpriv->intr.asic; - if (asic < 0) - return; /* not an interrupt subdev */ + struct pcmuio_private *devpriv = dev->private; + int asic = pcmuio_subdevice_to_asic(s); + struct pcmuio_asic *chip = &devpriv->asics[asic]; - subpriv->intr.enabled_mask = 0; - subpriv->intr.active = 0; + chip->enabled_mask = 0; + chip->active = 0; s->async->inttrig = NULL; /* disable all intrs for this subdev.. */ @@ -289,29 +314,27 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev, struct comedi_subdevice *s, unsigned triggered) { - struct pcmuio_subdev_private *subpriv = s->private; + struct pcmuio_private *devpriv = dev->private; + int asic = pcmuio_subdevice_to_asic(s); + struct pcmuio_asic *chip = &devpriv->asics[asic]; unsigned int len = s->async->cmd.chanlist_len; unsigned oldevents = s->async->events; unsigned int val = 0; unsigned long flags; - unsigned mytrig; unsigned int i; - spin_lock_irqsave(&subpriv->intr.spinlock, flags); + spin_lock_irqsave(&chip->spinlock, flags); - if (!subpriv->intr.active) + if (!chip->active) goto done; - mytrig = triggered; - mytrig &= ((0x1 << s->n_chan) - 1); - - if (!(mytrig & subpriv->intr.enabled_mask)) + if (!(triggered & chip->enabled_mask)) goto done; for (i = 0; i < len; i++) { unsigned int chan = CR_CHAN(s->async->cmd.chanlist[i]); - if (mytrig & (1U << chan)) - val |= (1U << i); + if (triggered & (1 << chan)) + val |= (1 << i); } /* Write the scan to the buffer. */ @@ -325,11 +348,11 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev, } /* Check for end of acquisition. */ - if (!subpriv->intr.continuous) { + if (!chip->continuous) { /* stop_src == TRIG_COUNT */ - if (subpriv->intr.stop_count > 0) { - subpriv->intr.stop_count--; - if (subpriv->intr.stop_count == 0) { + if (chip->stop_count > 0) { + chip->stop_count--; + if (chip->stop_count == 0) { s->async->events |= COMEDI_CB_EOA; /* TODO: STOP_ACQUISITION_CALL_HERE!! */ pcmuio_stop_intr(dev, s); @@ -338,7 +361,7 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev, } done: - spin_unlock_irqrestore(&subpriv->intr.spinlock, flags); + spin_unlock_irqrestore(&chip->spinlock, flags); if (oldevents != s->async->events) comedi_event(dev, s); @@ -346,114 +369,93 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev, static int pcmuio_handle_asic_interrupt(struct comedi_device *dev, int asic) { - struct pcmuio_private *devpriv = dev->private; - struct pcmuio_subdev_private *subpriv; - unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE); - unsigned int triggered = 0; - int got1 = 0; - unsigned long flags; - unsigned char int_pend; - int i; + /* there are could be two asics so we can't use dev->read_subdev */ + struct comedi_subdevice *s = &dev->subdevices[asic * 2]; + unsigned long iobase = pcmuio_asic_iobase(dev, asic); + unsigned int val; - spin_lock_irqsave(&devpriv->asics[asic].spinlock, flags); + /* are there any interrupts pending */ + val = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07; + if (!val) + return 0; - int_pend = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07; - if (int_pend) { - triggered = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0); - pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0); + /* get, and clear, the pending interrupts */ + val = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0); + pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0); - ++got1; - } + /* handle the pending interrupts */ + pcmuio_handle_intr_subdev(dev, s, val); - spin_unlock_irqrestore(&devpriv->asics[asic].spinlock, flags); - - if (triggered) { - struct comedi_subdevice *s; - /* TODO here: dispatch io lines to subdevs with commands.. */ - for (i = 0; i < dev->n_subdevices; i++) { - s = &dev->subdevices[i]; - subpriv = s->private; - if (subpriv->intr.asic == asic) { - /* - * This is an interrupt subdev, and it - * matches this asic! - */ - pcmuio_handle_intr_subdev(dev, s, - triggered); - } - } - } - return got1; + return 1; } static irqreturn_t pcmuio_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct pcmuio_private *devpriv = dev->private; - int got1 = 0; - int asic; + int handled = 0; - for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic) { - if (irq == devpriv->asics[asic].irq) { - /* it is an interrupt for ASIC #asic */ - if (pcmuio_handle_asic_interrupt(dev, asic)) - got1++; - } - } - if (!got1) - return IRQ_NONE; /* interrupt from other source */ - return IRQ_HANDLED; + if (irq == dev->irq) + handled += pcmuio_handle_asic_interrupt(dev, 0); + if (irq == devpriv->irq2) + handled += pcmuio_handle_asic_interrupt(dev, 1); + + return handled ? IRQ_HANDLED : IRQ_NONE; } +/* chip->spinlock is already locked */ static int pcmuio_start_intr(struct comedi_device *dev, struct comedi_subdevice *s) { - struct pcmuio_subdev_private *subpriv = s->private; + struct pcmuio_private *devpriv = dev->private; + int asic = pcmuio_subdevice_to_asic(s); + struct pcmuio_asic *chip = &devpriv->asics[asic]; + struct comedi_cmd *cmd = &s->async->cmd; + unsigned int bits = 0; + unsigned int pol_bits = 0; + int i; - if (!subpriv->intr.continuous && subpriv->intr.stop_count == 0) { + if (!chip->continuous && chip->stop_count == 0) { /* An empty acquisition! */ s->async->events |= COMEDI_CB_EOA; - subpriv->intr.active = 0; + chip->active = 0; return 1; - } else { - unsigned bits = 0, pol_bits = 0, n; - int asic; - struct comedi_cmd *cmd = &s->async->cmd; - - asic = subpriv->intr.asic; - if (asic < 0) - return 1; /* not an interrupt - subdev */ - subpriv->intr.enabled_mask = 0; - subpriv->intr.active = 1; - if (cmd->chanlist) { - for (n = 0; n < cmd->chanlist_len; n++) { - bits |= (1U << CR_CHAN(cmd->chanlist[n])); - pol_bits |= (CR_AREF(cmd->chanlist[n]) - || CR_RANGE(cmd-> - chanlist[n]) ? 1U : 0U) - << CR_CHAN(cmd->chanlist[n]); - } - } - bits &= ((0x1 << s->n_chan) - 1); - subpriv->intr.enabled_mask = bits; + } - /* set pol and enab intrs for this subdev.. */ - pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0); - pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0); + chip->enabled_mask = 0; + chip->active = 1; + if (cmd->chanlist) { + for (i = 0; i < cmd->chanlist_len; i++) { + unsigned int chanspec = cmd->chanlist[i]; + unsigned int chan = CR_CHAN(chanspec); + unsigned int range = CR_RANGE(chanspec); + unsigned int aref = CR_AREF(chanspec); + + bits |= (1 << chan); + pol_bits |= ((aref || range) ? 1 : 0) << chan; + } } + bits &= ((1 << s->n_chan) - 1); + chip->enabled_mask = bits; + + /* set pol and enab intrs for this subdev.. */ + pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0); + pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0); + return 0; } static int pcmuio_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { - struct pcmuio_subdev_private *subpriv = s->private; + struct pcmuio_private *devpriv = dev->private; + int asic = pcmuio_subdevice_to_asic(s); + struct pcmuio_asic *chip = &devpriv->asics[asic]; unsigned long flags; - spin_lock_irqsave(&subpriv->intr.spinlock, flags); - if (subpriv->intr.active) + spin_lock_irqsave(&chip->spinlock, flags); + if (chip->active) pcmuio_stop_intr(dev, s); - spin_unlock_irqrestore(&subpriv->intr.spinlock, flags); + spin_unlock_irqrestore(&chip->spinlock, flags); return 0; } @@ -465,19 +467,21 @@ static int pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { - struct pcmuio_subdev_private *subpriv = s->private; + struct pcmuio_private *devpriv = dev->private; + int asic = pcmuio_subdevice_to_asic(s); + struct pcmuio_asic *chip = &devpriv->asics[asic]; unsigned long flags; int event = 0; if (trignum != 0) return -EINVAL; - spin_lock_irqsave(&subpriv->intr.spinlock, flags); + spin_lock_irqsave(&chip->spinlock, flags); s->async->inttrig = NULL; - if (subpriv->intr.active) + if (chip->active) event = pcmuio_start_intr(dev, s); - spin_unlock_irqrestore(&subpriv->intr.spinlock, flags); + spin_unlock_irqrestore(&chip->spinlock, flags); if (event) comedi_event(dev, s); @@ -490,24 +494,26 @@ pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s, */ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { - struct pcmuio_subdev_private *subpriv = s->private; + struct pcmuio_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; + int asic = pcmuio_subdevice_to_asic(s); + struct pcmuio_asic *chip = &devpriv->asics[asic]; unsigned long flags; int event = 0; - spin_lock_irqsave(&subpriv->intr.spinlock, flags); - subpriv->intr.active = 1; + spin_lock_irqsave(&chip->spinlock, flags); + chip->active = 1; /* Set up end of acquisition. */ switch (cmd->stop_src) { case TRIG_COUNT: - subpriv->intr.continuous = 0; - subpriv->intr.stop_count = cmd->stop_arg; + chip->continuous = 0; + chip->stop_count = cmd->stop_arg; break; default: /* TRIG_NONE */ - subpriv->intr.continuous = 1; - subpriv->intr.stop_count = 0; + chip->continuous = 1; + chip->stop_count = 0; break; } @@ -521,7 +527,7 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) event = pcmuio_start_intr(dev, s); break; } - spin_unlock_irqrestore(&subpriv->intr.spinlock, flags); + spin_unlock_irqrestore(&chip->spinlock, flags); if (event) comedi_event(dev, s); @@ -589,13 +595,8 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it) const struct pcmuio_board *board = comedi_board(dev); struct comedi_subdevice *s; struct pcmuio_private *devpriv; - struct pcmuio_subdev_private *subpriv; - int sdev_no, n_subdevs, asic; - unsigned int irq[PCMUIO_MAX_ASICS]; int ret; - - irq[0] = it->options[1]; - irq[1] = it->options[2]; + int i; ret = comedi_request_region(dev, it->options[0], board->num_asics * PCMUIO_ASIC_IOSIZE); @@ -606,62 +607,60 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (!devpriv) return -ENOMEM; - for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic) - spin_lock_init(&devpriv->asics[asic].spinlock); + for (i = 0; i < PCMUIO_MAX_ASICS; ++i) { + struct pcmuio_asic *chip = &devpriv->asics[i]; - n_subdevs = board->num_asics * 2; - devpriv->sprivs = kcalloc(n_subdevs, sizeof(*subpriv), GFP_KERNEL); - if (!devpriv->sprivs) - return -ENOMEM; + spin_lock_init(&chip->pagelock); + spin_lock_init(&chip->spinlock); + } - ret = comedi_alloc_subdevices(dev, n_subdevs); - if (ret) - return ret; + pcmuio_reset(dev); - for (sdev_no = 0; sdev_no < (int)dev->n_subdevices; ++sdev_no) { - s = &dev->subdevices[sdev_no]; - subpriv = &devpriv->sprivs[sdev_no]; - s->private = subpriv; - s->maxdata = 1; - s->range_table = &range_digital; - s->subdev_flags = SDF_READABLE | SDF_WRITABLE; - s->type = COMEDI_SUBD_DIO; - s->insn_bits = pcmuio_dio_insn_bits; - s->insn_config = pcmuio_dio_insn_config; - s->n_chan = 24; - - /* subdevices 0 and 2 suppport interrupts */ - if ((sdev_no % 2) == 0) { - /* setup the interrupt subdevice */ - subpriv->intr.asic = sdev_no / 2; - dev->read_subdev = s; - s->subdev_flags |= SDF_CMD_READ; - s->cancel = pcmuio_cancel; - s->do_cmd = pcmuio_cmd; - s->do_cmdtest = pcmuio_cmdtest; - s->len_chanlist = s->n_chan; - } else { - subpriv->intr.asic = -1; - s->len_chanlist = 1; + if (it->options[1]) { + /* request the irq for the 1st asic */ + ret = request_irq(it->options[1], pcmuio_interrupt, 0, + dev->board_name, dev); + if (ret == 0) + dev->irq = it->options[1]; + } + + if (board->num_asics == 2) { + if (it->options[2] == dev->irq) { + /* the same irq (or none) is used by both asics */ + devpriv->irq2 = it->options[2]; + } else if (it->options[2]) { + /* request the irq for the 2nd asic */ + ret = request_irq(it->options[2], pcmuio_interrupt, 0, + dev->board_name, dev); + if (ret == 0) + devpriv->irq2 = it->options[2]; } - spin_lock_init(&subpriv->intr.spinlock); } - pcmuio_reset(dev); + ret = comedi_alloc_subdevices(dev, board->num_asics * 2); + if (ret) + return ret; - for (asic = 0; irq[0] && asic < PCMUIO_MAX_ASICS; ++asic) { - if (irq[asic] - && request_irq(irq[asic], pcmuio_interrupt, - IRQF_SHARED, board->name, dev)) { - int i; - /* unroll the allocated irqs.. */ - for (i = asic - 1; i >= 0; --i) { - free_irq(irq[i], dev); - devpriv->asics[i].irq = irq[i] = 0; - } - irq[asic] = 0; + for (i = 0; i < dev->n_subdevices; ++i) { + s = &dev->subdevices[i]; + s->type = COMEDI_SUBD_DIO; + s->subdev_flags = SDF_READABLE | SDF_WRITABLE; + s->n_chan = 24; + s->maxdata = 1; + s->range_table = &range_digital; + s->insn_bits = pcmuio_dio_insn_bits; + s->insn_config = pcmuio_dio_insn_config; + + /* subdevices 0 and 2 can suppport interrupts */ + if ((i == 0 && dev->irq) || (i == 2 && devpriv->irq2)) { + /* setup the interrupt subdevice */ + dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; + s->len_chanlist = s->n_chan; + s->cancel = pcmuio_cancel; + s->do_cmd = pcmuio_cmd; + s->do_cmdtest = pcmuio_cmdtest; } - devpriv->asics[asic].irq = irq[asic]; } return 0; @@ -670,14 +669,13 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it) static void pcmuio_detach(struct comedi_device *dev) { struct pcmuio_private *devpriv = dev->private; - int i; if (devpriv) { - for (i = 0; i < PCMUIO_MAX_ASICS; ++i) { - if (devpriv->asics[i].irq) - free_irq(devpriv->asics[i].irq, dev); - } - kfree(devpriv->sprivs); + pcmuio_reset(dev); + + /* free the 2nd irq if used, the core will free the 1st one */ + if (devpriv->irq2 && devpriv->irq2 != dev->irq) + free_irq(devpriv->irq2, dev); } comedi_legacy_detach(dev); } diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h index 0d254a1b78a77f..55e3c2e2bc5292 100644 --- a/drivers/staging/comedi/drivers/plx9080.h +++ b/drivers/staging/comedi/drivers/plx9080.h @@ -402,12 +402,9 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel) udelay(1); dma_status = readb(dma_cs_addr); } - if (i == timeout) { - printk - ("plx9080: cancel() timed out waiting for dma %i done clear\n", - channel); + if (i == timeout) return -ETIMEDOUT; - } + /* disable and abort channel */ writeb(PLX_DMA_ABORT_BIT, dma_cs_addr); /* wait for dma done bit */ @@ -416,12 +413,8 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel) udelay(1); dma_status = readb(dma_cs_addr); } - if (i == timeout) { - printk - ("plx9080: cancel() timed out waiting for dma %i done set\n", - channel); + if (i == timeout) return -ETIMEDOUT; - } return 0; } diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c index 44c8712ed9e05d..0f026afde9be27 100644 --- a/drivers/staging/comedi/drivers/rtd520.c +++ b/drivers/staging/comedi/drivers/rtd520.c @@ -495,8 +495,6 @@ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev, case AREF_OTHER: /* ??? */ break; } - /*printk ("chan=%d r=%d a=%d -> 0x%x\n", - chan, range, aref, r); */ return r; } @@ -606,7 +604,6 @@ static int rtd_ai_rinsn(struct comedi_device *dev, /* read data */ d = readw(devpriv->las1 + LAS1_ADC_FIFO); - /*printk ("rtd520: Got 0x%x after %d usec\n", d, ii+1); */ d = d >> 3; /* low 3 bits are marker lines */ if (test_bit(0, devpriv->chan_is_bipolar)) /* convert to comedi unsigned data */ @@ -692,7 +689,7 @@ static int ai_read_dregs(struct comedi_device *dev, struct comedi_subdevice *s) static irqreturn_t rtd_interrupt(int irq, void *d) { struct comedi_device *dev = d; - struct comedi_subdevice *s = &dev->subdevices[0]; + struct comedi_subdevice *s = dev->read_subdev; struct rtd_private *devpriv = dev->private; u32 overrun; u16 status; @@ -1427,7 +1424,7 @@ static int rtd520_pci_probe(struct pci_dev *dev, return comedi_pci_auto_config(dev, &rtd520_driver, id->driver_data); } -static DEFINE_PCI_DEVICE_TABLE(rtd520_pci_table) = { +static const struct pci_device_id rtd520_pci_table[] = { { PCI_VDEVICE(RTD, 0x7520), BOARD_DM7520 }, { PCI_VDEVICE(RTD, 0x4520), BOARD_PCI4520 }, { 0 } diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index b486099b543d56..19da1dbea49478 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -199,7 +199,7 @@ static bool s626_mc_test(struct comedi_device *dev, static const struct comedi_lrange s626_range_table = { 2, { BIP_RANGE(5), - BIP_RANGE(10), + BIP_RANGE(10) } }; @@ -1614,12 +1614,13 @@ static irqreturn_t s626_irq_handler(int irq, void *d) static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl) { struct s626_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; + struct comedi_cmd *cmd = &s->async->cmd; uint32_t *rps; uint32_t jmp_adrs; uint16_t i; uint16_t n; uint32_t local_ppl; - struct comedi_cmd *cmd = &dev->subdevices->async->cmd; /* Stop RPS program in case it is currently running */ s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1); @@ -2079,12 +2080,6 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) if (cmd == NULL) return -EINVAL; - if (dev->irq == 0) { - comedi_error(dev, - "s626_ai_cmd: cannot run command without an irq"); - return -EIO; - } - s626_ai_load_polllist(ppl, cmd); devpriv->ai_cmd_running = 1; devpriv->ai_convert_count = 0; @@ -2645,6 +2640,7 @@ static void s626_initialize(struct comedi_device *dev) * a defined state after a PCI reset. */ { + struct comedi_subdevice *s = dev->read_subdev; uint8_t poll_list; uint16_t adc_data; uint16_t start_val; @@ -2656,7 +2652,7 @@ static void s626_initialize(struct comedi_device *dev) s626_reset_adc(dev, &poll_list); /* Get initial ADC value */ - s626_ai_rinsn(dev, dev->subdevices, NULL, data); + s626_ai_rinsn(dev, s, NULL, data); start_val = data[0]; /* @@ -2670,7 +2666,7 @@ static void s626_initialize(struct comedi_device *dev) * being unusually quiet or at the rail. */ for (index = 0; index < 500; index++) { - s626_ai_rinsn(dev, dev->subdevices, NULL, data); + s626_ai_rinsn(dev, s, NULL, data); adc_data = data[0]; if (adc_data != start_val) break; @@ -2833,7 +2829,7 @@ static int s626_auto_attach(struct comedi_device *dev, s = &dev->subdevices[0]; /* analog input subdevice */ s->type = COMEDI_SUBD_AI; - s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_CMD_READ; + s->subdev_flags = SDF_READABLE | SDF_DIFF; s->n_chan = S626_ADC_CHANNELS; s->maxdata = 0x3fff; s->range_table = &s626_range_table; @@ -2841,6 +2837,7 @@ static int s626_auto_attach(struct comedi_device *dev, s->insn_read = s626_ai_insn_read; if (dev->irq) { dev->read_subdev = s; + s->subdev_flags |= SDF_CMD_READ; s->do_cmd = s626_ai_cmd; s->do_cmdtest = s626_ai_cmdtest; s->cancel = s626_ai_cancel; @@ -2965,7 +2962,7 @@ static int s626_pci_probe(struct pci_dev *dev, * also subvendor:subdevice ids, because otherwise it will conflict with * Philips SAA7146 media/dvb based cards. */ -static DEFINE_PCI_DEVICE_TABLE(s626_pci_table) = { +static const struct pci_device_id s626_pci_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146, 0x6000, 0x0272) }, { 0 } diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c index daee2f42bde063..77e2059ff62e0b 100644 --- a/drivers/staging/comedi/drivers/skel.c +++ b/drivers/staging/comedi/drivers/skel.c @@ -698,7 +698,7 @@ static int skel_pci_probe(struct pci_dev *dev, * This is used by modprobe to translate PCI IDs to drivers. * Should only be used for PCI and ISA-PnP devices */ -static DEFINE_PCI_DEVICE_TABLE(skel_pci_table) = { +static const struct pci_device_id skel_pci_table[] = { { PCI_VDEVICE(SKEL, 0x0100), BOARD_SKEL100 }, { PCI_VDEVICE(SKEL, 0x0200), BOARD_SKEL200 }, { 0 } diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c index 93eec2fc254c18..adf7cb7086cc12 100644 --- a/drivers/staging/comedi/drivers/unioxx5.c +++ b/drivers/staging/comedi/drivers/unioxx5.c @@ -38,7 +38,6 @@ Devices: [Fastwel] UNIOxx-5 (unioxx5), */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include @@ -91,12 +90,14 @@ static int __unioxx5_define_chan_offset(int chan_num) } #if 0 /* not used? */ -static void __unioxx5_digital_config(struct unioxx5_subd_priv *usp, int mode) +static void __unioxx5_digital_config(struct comedi_subdevice *s, int mode) { + struct unioxx5_subd_priv *usp = s->private; + struct device *csdev = s->device->class_dev; int i, mask; mask = (mode == ALL_2_OUTPUT) ? 0xFF : 0x00; - printk("COMEDI: mode = %d\n", mask); + dev_dbg(csdev, "mode = %d\n", mask); outb(1, usp->usp_iobase + 0); @@ -135,15 +136,18 @@ static void __unioxx5_analog_config(struct unioxx5_subd_priv *usp, int channel) usp->usp_prev_cn_val[channel_offset - 1] = conf; } -static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp, +static int __unioxx5_digital_read(struct comedi_subdevice *s, unsigned int *data, int channel, int minor) { + struct unioxx5_subd_priv *usp = s->private; + struct device *csdev = s->device->class_dev; int channel_offset, mask = 1 << (channel & 0x07); channel_offset = __unioxx5_define_chan_offset(channel); if (channel_offset < 0) { - pr_err("comedi%d: undefined channel %d. channel range is 0 .. 23\n", - minor, channel); + dev_err(csdev, + "undefined channel %d. channel range is 0 .. 23\n", + channel); return 0; } @@ -157,9 +161,11 @@ static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp, return 1; } -static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp, +static int __unioxx5_analog_read(struct comedi_subdevice *s, unsigned int *data, int channel, int minor) { + struct unioxx5_subd_priv *usp = s->private; + struct device *csdev = s->device->class_dev; int module_no, read_ch; char control; @@ -168,8 +174,9 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp, /* defining if given module can work on input */ if (usp->usp_module_type[module_no] & MODULE_OUTPUT_MASK) { - pr_err("comedi%d: module in position %d with id 0x%02x is for output only", - minor, module_no, usp->usp_module_type[module_no]); + dev_err(csdev, + "module in position %d with id 0x%02x is for output only", + module_no, usp->usp_module_type[module_no]); return 0; } @@ -185,7 +192,7 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp, /* if four bytes readding error occurs - return 0(false) */ if ((control & Rx4CA_ERR_MASK)) { - printk("COMEDI: 4 bytes error\n"); + dev_err(csdev, "4 bytes error\n"); return 0; } @@ -197,16 +204,19 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp, return 1; } -static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp, +static int __unioxx5_digital_write(struct comedi_subdevice *s, unsigned int *data, int channel, int minor) { + struct unioxx5_subd_priv *usp = s->private; + struct device *csdev = s->device->class_dev; int channel_offset, val; int mask = 1 << (channel & 0x07); channel_offset = __unioxx5_define_chan_offset(channel); if (channel_offset < 0) { - pr_err("comedi%d: undefined channel %d. channel range is 0 .. 23\n", - minor, channel); + dev_err(csdev, + "undefined channel %d. channel range is 0 .. 23\n", + channel); return 0; } @@ -225,9 +235,11 @@ static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp, return 1; } -static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp, +static int __unioxx5_analog_write(struct comedi_subdevice *s, unsigned int *data, int channel, int minor) { + struct unioxx5_subd_priv *usp = s->private; + struct device *csdev = s->device->class_dev; int module, i; module = channel / 2; /* definig module number(0 .. 11) */ @@ -235,8 +247,9 @@ static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp, /* defining if given module can work on output */ if (!(usp->usp_module_type[module] & MODULE_OUTPUT_MASK)) { - pr_err("comedi%d: module in position %d with id 0x%0x is for input only!\n", - minor, module, usp->usp_module_type[module]); + dev_err(csdev, + "module in position %d with id 0x%0x is for input only!\n", + module, usp->usp_module_type[module]); return 0; } @@ -273,10 +286,10 @@ static int unioxx5_subdev_read(struct comedi_device *dev, type = usp->usp_module_type[channel / 2]; if (type == MODULE_DIGITAL) { - if (!__unioxx5_digital_read(usp, data, channel, dev->minor)) + if (!__unioxx5_digital_read(subdev, data, channel, dev->minor)) return -1; } else { - if (!__unioxx5_analog_read(usp, data, channel, dev->minor)) + if (!__unioxx5_analog_read(subdev, data, channel, dev->minor)) return -1; } @@ -295,10 +308,10 @@ static int unioxx5_subdev_write(struct comedi_device *dev, type = usp->usp_module_type[channel / 2]; if (type == MODULE_DIGITAL) { - if (!__unioxx5_digital_write(usp, data, channel, dev->minor)) + if (!__unioxx5_digital_write(subdev, data, channel, dev->minor)) return -1; } else { - if (!__unioxx5_analog_write(usp, data, channel, dev->minor)) + if (!__unioxx5_analog_write(subdev, data, channel, dev->minor)) return -1; } @@ -318,16 +331,15 @@ static int unioxx5_insn_config(struct comedi_device *dev, if (type != MODULE_DIGITAL) { dev_err(dev->class_dev, - "comedi%d: channel configuration accessible only for digital modules\n", - dev->minor); + "channel configuration accessible only for digital modules\n"); return -1; } channel_offset = __unioxx5_define_chan_offset(channel); if (channel_offset < 0) { dev_err(dev->class_dev, - "comedi%d: undefined channel %d. channel range is 0 .. 23\n", - dev->minor, channel); + "undefined channel %d. channel range is 0 .. 23\n", + channel); return -1; } @@ -342,8 +354,7 @@ static int unioxx5_insn_config(struct comedi_device *dev, flags |= mask; break; default: - dev_err(dev->class_dev, - "comedi%d: unknown flag\n", dev->minor); + dev_err(dev->class_dev, "unknown flag\n"); return -1; } @@ -435,9 +446,10 @@ static int unioxx5_attach(struct comedi_device *dev, dev->iobase = iobase; iobase += UNIOXX5_SUBDEV_BASE; + n_subd = 0; - /* defining number of subdevices and getting they types (it must be 'g01') */ - for (i = n_subd = 0, ba = iobase; i < 4; i++, ba += UNIOXX5_SUBDEV_ODDS) { + /* getting number of subdevices with types 'g01' */ + for (i = 0, ba = iobase; i < 4; i++, ba += UNIOXX5_SUBDEV_ODDS) { id = inb(ba + 0xE); num = inb(ba + 0xF); diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c index da1d501d9e4e64..71db683098d673 100644 --- a/drivers/staging/comedi/drivers/usbdux.c +++ b/drivers/staging/comedi/drivers/usbdux.c @@ -80,7 +80,6 @@ sampling rate. If you sample two channels you get 4kHz and so on. #include #include -#include #include #include #include diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index 9707dd1239c4c0..5f85c55c110931 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -37,7 +37,6 @@ #include #include -#include #include #include #include @@ -138,7 +137,10 @@ * comedi constants */ static const struct comedi_lrange range_usbduxfast_ai_range = { - 2, {BIP_RANGE(0.75), BIP_RANGE(0.5)} + 2, { + BIP_RANGE(0.75), + BIP_RANGE(0.5) + } }; /* diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index a5363ded366832..3beeb12541522b 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c @@ -43,7 +43,6 @@ #include #include -#include #include #include #include @@ -1656,11 +1655,13 @@ static int usbduxsigma_auto_attach(struct comedi_device *dev, } offset = usbduxsigma_getstatusinfo(dev, 0); - if (offset < 0) + if (offset < 0) { dev_err(dev->class_dev, - "Communication to USBDUXSIGMA failed! Check firmware and cabling\n"); + "Communication to USBDUXSIGMA failed! Check firmware and cabling.\n"); + return offset; + } - dev_info(dev->class_dev, "attached, ADC_zero = %x\n", offset); + dev_info(dev->class_dev, "ADC_zero = %x\n", offset); return 0; } diff --git a/drivers/staging/comedi/kcomedilib/Makefile b/drivers/staging/comedi/kcomedilib/Makefile index 18ee99bdde0841..3aff8ed08e2dec 100644 --- a/drivers/staging/comedi/kcomedilib/Makefile +++ b/drivers/staging/comedi/kcomedilib/Makefile @@ -1,3 +1,5 @@ +ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG + obj-$(CONFIG_COMEDI_KCOMEDILIB) += kcomedilib.o kcomedilib-objs := kcomedilib_main.o diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c index cd60677a3ed2f2..7dc5a18e69d42b 100644 --- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c +++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c @@ -35,7 +35,7 @@ MODULE_LICENSE("GPL"); struct comedi_device *comedi_open(const char *filename) { - struct comedi_device *dev; + struct comedi_device *dev, *retval = NULL; unsigned int minor; if (strncmp(filename, "/dev/comedi", 11) != 0) @@ -46,24 +46,27 @@ struct comedi_device *comedi_open(const char *filename) if (minor >= COMEDI_NUM_BOARD_MINORS) return NULL; - dev = comedi_dev_from_minor(minor); - - if (!dev || !dev->attached) + dev = comedi_dev_get_from_minor(minor); + if (!dev) return NULL; - if (!try_module_get(dev->driver->module)) - return NULL; + down_read(&dev->attach_lock); + if (dev->attached) + retval = dev; + else + retval = NULL; + up_read(&dev->attach_lock); + + if (retval == NULL) + comedi_dev_put(dev); - return dev; + return retval; } EXPORT_SYMBOL_GPL(comedi_open); -int comedi_close(struct comedi_device *d) +int comedi_close(struct comedi_device *dev) { - struct comedi_device *dev = (struct comedi_device *)d; - - module_put(dev->driver->module); - + comedi_dev_put(dev); return 0; } EXPORT_SYMBOL_GPL(comedi_close); @@ -73,7 +76,14 @@ static int comedi_do_insn(struct comedi_device *dev, unsigned int *data) { struct comedi_subdevice *s; - int ret = 0; + int ret; + + mutex_lock(&dev->mutex); + + if (!dev->attached) { + ret = -EINVAL; + goto error; + } /* a subdevice instruction */ if (insn->subdev >= dev->n_subdevices) { @@ -120,6 +130,7 @@ static int comedi_do_insn(struct comedi_device *dev, s->busy = NULL; error: + mutex_unlock(&dev->mutex); return ret; } @@ -169,9 +180,6 @@ int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev, unsigned int shift; int ret; - if (subdev >= dev->n_subdevices) - return -EINVAL; - base_channel = CR_CHAN(base_channel); n_chan = comedi_get_n_channels(dev, subdev); if (base_channel >= n_chan) @@ -211,23 +219,33 @@ int comedi_find_subdevice_by_type(struct comedi_device *dev, int type, unsigned int subd) { struct comedi_subdevice *s; - - if (subd > dev->n_subdevices) - return -ENODEV; - - for (; subd < dev->n_subdevices; subd++) { - s = &dev->subdevices[subd]; - if (s->type == type) - return subd; - } - return -1; + int ret = -ENODEV; + + down_read(&dev->attach_lock); + if (dev->attached) + for (; subd < dev->n_subdevices; subd++) { + s = &dev->subdevices[subd]; + if (s->type == type) { + ret = subd; + break; + } + } + up_read(&dev->attach_lock); + return ret; } EXPORT_SYMBOL_GPL(comedi_find_subdevice_by_type); int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice) { - struct comedi_subdevice *s = &dev->subdevices[subdevice]; + int n; + + down_read(&dev->attach_lock); + if (!dev->attached || subdevice >= dev->n_subdevices) + n = 0; + else + n = dev->subdevices[subdevice].n_chan; + up_read(&dev->attach_lock); - return s->n_chan; + return n; } EXPORT_SYMBOL_GPL(comedi_get_n_channels); diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c index ade00035d3bb58..da6bc5855ebc5e 100644 --- a/drivers/staging/comedi/proc.c +++ b/drivers/staging/comedi/proc.c @@ -41,16 +41,20 @@ static int comedi_read(struct seq_file *m, void *v) "driver_name, board_name, n_subdevices"); for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) { - struct comedi_device *dev = comedi_dev_from_minor(i); + struct comedi_device *dev = comedi_dev_get_from_minor(i); + if (!dev) continue; + down_read(&dev->attach_lock); if (dev->attached) { devices_q = 1; seq_printf(m, "%2d: %-20s %-20s %4d\n", i, dev->driver->driver_name, dev->board_name, dev->n_subdevices); } + up_read(&dev->attach_lock); + comedi_dev_put(dev); } if (!devices_q) seq_puts(m, "no devices\n"); diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c index 8fde55495d34bf..46b3da686384ea 100644 --- a/drivers/staging/comedi/range.c +++ b/drivers/staging/comedi/range.c @@ -83,8 +83,10 @@ int do_rangeinfo_ioctl(struct comedi_device *dev, } if (RANGE_LENGTH(it.range_type) != lr->length) { - DPRINTK("wrong length %d should be %d (0x%08x)\n", - RANGE_LENGTH(it.range_type), lr->length, it.range_type); + dev_dbg(dev->class_dev, + "wrong length %d should be %d (0x%08x)\n", + RANGE_LENGTH(it.range_type), + lr->length, it.range_type); return -EINVAL; } @@ -123,7 +125,8 @@ static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec) default: break; } - DPRINTK("subdevice does not support aref %i", aref); + dev_dbg(s->device->class_dev, "subdevice does not support aref %i", + aref); return 1; } diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h index 981708f3ee3914..92b0cff248cbae 100644 --- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h +++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h @@ -229,7 +229,7 @@ enum BC_DRV_CMD { DRV_CMD_REG_RD, /* Read Device Register */ DRV_CMD_REG_WR, /* Write Device Register */ DRV_CMD_FPGA_RD, /* Read FPGA Register */ - DRV_CMD_FPGA_WR, /* Wrtie FPGA Reister */ + DRV_CMD_FPGA_WR, /* Write FPGA Register */ DRV_CMD_MEM_RD, /* Read Device Memory */ DRV_CMD_MEM_WR, /* Write Device Memory */ DRV_CMD_RD_PCI_CFG, /* Read PCI Config Space */ diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c index 07a2f24d0d47de..642f438793c3ff 100644 --- a/drivers/staging/crystalhd/crystalhd_cmds.c +++ b/drivers/staging/crystalhd/crystalhd_cmds.c @@ -798,7 +798,7 @@ static const struct crystalhd_cmd_tbl g_crystalhd_cproc_tbl[] = { * * Current gstreamer frame work does not provide any power management * related notification to user mode decoder plug-in. As a work-around - * we pass on the power mangement notification to our plug-in by completing + * we pass on the power management notification to our plug-in by completing * all outstanding requests with BC_STS_IO_USER_ABORT return code. */ enum BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, @@ -1059,7 +1059,7 @@ bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx) { if (!ctx) { BCMLOG_ERR("Invalid arg..\n"); - return 0; + return false; } return crystalhd_hw_interrupt(ctx->adp, &ctx->hw_ctx); diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h index 377cd9d68b08e2..b5bf59dbcde9bf 100644 --- a/drivers/staging/crystalhd/crystalhd_cmds.h +++ b/drivers/staging/crystalhd/crystalhd_cmds.h @@ -29,7 +29,7 @@ /* * NOTE:: This is the main interface file between the Linux layer - * and the harware layer. This file will use the definitions + * and the hardware layer. This file will use the definitions * from _dts_glob and dts_defs etc.. which are defined for * windows. */ diff --git a/drivers/staging/crystalhd/crystalhd_fw_if.h b/drivers/staging/crystalhd/crystalhd_fw_if.h index 4b363a5069d76f..05615e2a231aad 100644 --- a/drivers/staging/crystalhd/crystalhd_fw_if.h +++ b/drivers/staging/crystalhd/crystalhd_fw_if.h @@ -115,7 +115,7 @@ struct fgt_sei { unsigned char model_id; /* Model id. */ /* +unused SE based on Thomson spec */ - unsigned char color_desc_flag; /* Separate color descrition flag. */ + unsigned char color_desc_flag; /* Separate color description flag. */ unsigned char bit_depth_luma; /* Bit depth luma minus 8. */ unsigned char bit_depth_chroma; /* Bit depth chroma minus 8. */ unsigned char full_range_flag; /* Full range flag. */ diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c index 043bd49843ffd4..8d0680d9368474 100644 --- a/drivers/staging/crystalhd/crystalhd_hw.c +++ b/drivers/staging/crystalhd/crystalhd_hw.c @@ -398,7 +398,7 @@ static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw, * Call back from TX - IOQ deletion. * * This routine will release the TX DMA rings allocated - * druing setup_dma rings interface. + * during setup_dma rings interface. * * Memory is allocated per DMA ring basis. This is just * a place holder to be able to create the dio queues. diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h index 37809442c5539b..d5cb68dfe695f5 100644 --- a/drivers/staging/crystalhd/crystalhd_hw.h +++ b/drivers/staging/crystalhd/crystalhd_hw.h @@ -46,7 +46,7 @@ #define Cpu2HstMbx1 0x00100F04 #define MbxStat1 0x00100F08 #define Stream2Host_Intr_Sts 0x00100F24 -#define C011_RET_SUCCESS 0x0 /* Reutrn status of firmware command. */ +#define C011_RET_SUCCESS 0x0 /* Return status of firmware command. */ /* TS input status register */ #define TS_StreamAFIFOStatus 0x0010044C @@ -141,7 +141,7 @@ union link_misc_perst_deco_ctrl { uint32_t reserved0:3; /* Reserved.No Effect*/ uint32_t stop_bcm_7412_clk:1; /* 1 ->Stops branch of 27MHz clk used to clk BCM7412*/ - uint32_t reserved1:27; /* Reseved. No Effect*/ + uint32_t reserved1:27; /* Reserved. No Effect*/ }; uint32_t whole_reg; @@ -176,7 +176,7 @@ union link_misc_perst_decoder_ctrl { uint32_t res0:3; /* Reserved.No Effect*/ uint32_t stop_7412_clk:1; /* 1 ->Stops branch of 27MHz clk used to clk BCM7412*/ - uint32_t res1:27; /* Reseved. No Effect */ + uint32_t res1:27; /* Reserved. No Effect */ }; uint32_t whole_reg; diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c index 190b9b92436884..99eefd0291c315 100644 --- a/drivers/staging/crystalhd/crystalhd_lnx.c +++ b/drivers/staging/crystalhd/crystalhd_lnx.c @@ -703,7 +703,7 @@ static int chd_dec_pci_resume(struct pci_dev *pdev) } #endif -static DEFINE_PCI_DEVICE_TABLE(chd_dec_pci_id_table) = { +static const struct pci_device_id chd_dec_pci_id_table[] = { { PCI_VDEVICE(BROADCOM, 0x1612), 8 }, { 0, }, }; diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h index bac572a8bc2e16..816e1cd5db6204 100644 --- a/drivers/staging/crystalhd/crystalhd_lnx.h +++ b/drivers/staging/crystalhd/crystalhd_lnx.h @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -53,7 +52,7 @@ /* OS specific PCI information structure and adapter information. */ struct crystalhd_adp { - /* Hardware borad/PCI specifics */ + /* Hardware board/PCI specifics */ char name[32]; struct pci_dev *pdev; diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c index 51f698052aff35..c3d024406337c3 100644 --- a/drivers/staging/crystalhd/crystalhd_misc.c +++ b/drivers/staging/crystalhd/crystalhd_misc.c @@ -389,7 +389,7 @@ void *bc_kern_dma_alloc(struct crystalhd_adp *adp, uint32_t sz, void *temp = NULL; if (!adp || !sz || !phy_addr) { - BCMLOG_ERR("Invalide Arg..\n"); + BCMLOG_ERR("Invalid Arg..\n"); return temp; } @@ -415,7 +415,7 @@ void bc_kern_dma_free(struct crystalhd_adp *adp, uint32_t sz, void *ka, dma_addr_t phy_addr) { if (!adp || !ka || !sz || !phy_addr) { - BCMLOG_ERR("Invalide Arg..\n"); + BCMLOG_ERR("Invalid Arg..\n"); return; } diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h index aa736c8855decb..77ab72a2a06126 100644 --- a/drivers/staging/crystalhd/crystalhd_misc.h +++ b/drivers/staging/crystalhd/crystalhd_misc.h @@ -206,7 +206,7 @@ extern void crystalhd_show_buffer(uint32_t off, uint8_t *buff, enum _chd_log_levels { BCMLOG_ERROR = 0x80000000, /* Don't disable this option */ BCMLOG_DATA = 0x40000000, /* Data, enable by default */ - BCMLOG_SPINLOCK = 0x20000000, /* Spcial case for Spin locks*/ + BCMLOG_SPINLOCK = 0x20000000, /* Special case for Spin locks*/ /* Following are allowed only in debug mode */ BCMLOG_INFO = 0x00000001, /* Generic informational */ diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c index 46a0d92173e097..c4c8c0f9c95999 100644 --- a/drivers/staging/cxt1e1/comet.c +++ b/drivers/staging/cxt1e1/comet.c @@ -28,9 +28,9 @@ extern int cxt1e1_log_level; #define COMET_NUM_UNITS 5 /* Number of points per entry in table */ /* forward references */ -static void SetPwrLevel(comet_t *comet); -static void WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table); -static void WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]); +static void SetPwrLevel(struct s_comet_reg *comet); +static void WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table); +static void WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]); void *TWV_table[12] = { @@ -58,7 +58,7 @@ lbo_tbl_lkup(int t1, int lbo) { return lbo - 1; } -void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster, +void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int clockmaster, u_int8_t moreParams) { u_int8_t isT1mode; @@ -159,8 +159,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster, /* 60: t1 ALMI cfg */ /* Configure Line Coding */ - switch (port_mode) - { + switch (port_mode) { /* 1 - T1 B8ZS */ case CFG_FRAME_SF: pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0); @@ -286,8 +285,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster, /* 0x30: "BRIF cfg"; 0x20 is 'CMODE', 0x03 is (bit) rate */ /* note "rate bits can only be set once after reset" */ - if (clockmaster) - { + if (clockmaster) { /* CMODE == clockMode, 0=clock master (so all 3 others should be slave) */ /* rate = 1.544 Mb/s */ if (isT1mode) @@ -302,16 +300,17 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster, /* Master Mode i.e.FPMODE=0 (@0x20) */ pci_write_32((u_int32_t *) &comet->brif_fpcfg, 0x00); - if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL) - { + if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL) { if (cxt1e1_log_level >= LOG_SBEBUG12) - pr_info(">> %s: clockmaster internal clock\n", __func__); + pr_info(">> %s: clockmaster internal clock\n", + __func__); /* internal oscillator */ pci_write_32((u_int32_t *) &comet->tx_time, 0x0d); } else { /* external clock source */ if (cxt1e1_log_level >= LOG_SBEBUG12) - pr_info(">> %s: clockmaster external clock\n", __func__); + pr_info(">> %s: clockmaster external clock\n", + __func__); /* loop timing(external) */ pci_write_32((u_int32_t *) &comet->tx_time, 0x09); } @@ -399,7 +398,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster, ** Returns: Nothing */ static void -WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int8_t data) +WrtXmtWaveform(ci_t *ci, struct s_comet_reg *comet, u_int32_t sample, u_int32_t unit, u_int8_t data) { u_int8_t WaveformAddr; @@ -417,19 +416,20 @@ WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int ** Returns: Nothing */ static void -WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, +WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]) { u_int32_t sample, unit; - for (sample = 0; sample < COMET_NUM_SAMPLES; sample++) - { + for (sample = 0; sample < COMET_NUM_SAMPLES; sample++) { for (unit = 0; unit < COMET_NUM_UNITS; unit++) - WrtXmtWaveform(ci, comet, sample, unit, table[sample][unit]); + WrtXmtWaveform(ci, comet, sample, unit, + table[sample][unit]); } /* Enable transmitter and set output amplitude */ - pci_write_32((u_int32_t *) &comet->xlpg_cfg, table[COMET_NUM_SAMPLES][0]); + pci_write_32((u_int32_t *) &comet->xlpg_cfg, + table[COMET_NUM_SAMPLES][0]); } @@ -444,7 +444,7 @@ WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, */ static void -WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table) +WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table) { u_int32_t ramaddr; volatile u_int32_t value; @@ -457,7 +457,8 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table) /* for write order preservation when Optimizing driver */ pci_flush_write(ci); /* write the addr, initiate a read */ - pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr); + pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, + (u_int8_t) ramaddr); /* for write order preservation when Optimizing driver */ pci_flush_write(ci); /* @@ -470,9 +471,12 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table) } value = *table++; - pci_write_32((u_int32_t *) &comet->rlps_idata3, (u_int8_t) (value >> 24)); - pci_write_32((u_int32_t *) &comet->rlps_idata2, (u_int8_t) (value >> 16)); - pci_write_32((u_int32_t *) &comet->rlps_idata1, (u_int8_t) (value >> 8)); + pci_write_32((u_int32_t *) &comet->rlps_idata3, + (u_int8_t) (value >> 24)); + pci_write_32((u_int32_t *) &comet->rlps_idata2, + (u_int8_t) (value >> 16)); + pci_write_32((u_int32_t *) &comet->rlps_idata1, + (u_int8_t) (value >> 8)); pci_write_32((u_int32_t *) &comet->rlps_idata0, (u_int8_t) value); /* for write order preservation when Optimizing driver */ pci_flush_write(ci); @@ -484,7 +488,8 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table) /* for write order preservation when optimizing driver */ pci_flush_write(ci); /* write the addr, initiate a read */ - pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr); + pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, + (u_int8_t) ramaddr); /* for write order preservation when optimizing driver */ pci_flush_write(ci); @@ -508,7 +513,7 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table) */ static void -SetPwrLevel(comet_t *comet) +SetPwrLevel(struct s_comet_reg *comet) { volatile u_int32_t temp; @@ -550,12 +555,11 @@ SetPwrLevel(comet_t *comet) */ #if 0 static void -SetCometOps(comet_t *comet) +SetCometOps(struct s_comet_reg *comet) { volatile u_int8_t rd_value; - if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2)) - { + if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2)) { /* read the BRIF Configuration */ rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg); rd_value &= ~0x20; diff --git a/drivers/staging/cxt1e1/comet.h b/drivers/staging/cxt1e1/comet.h index 03b9bb77a80970..d5d286e47a4b44 100644 --- a/drivers/staging/cxt1e1/comet.h +++ b/drivers/staging/cxt1e1/comet.h @@ -25,304 +25,313 @@ #define VINT32 volatile u_int32_t -struct s_comet_reg -{ - VINT32 gbl_cfg; /* 00 Global Cfg */ - VINT32 clkmon; /* 01 Clk Monitor */ - VINT32 rx_opt; /* 02 RX Options */ - VINT32 rx_line_cfg; /* 03 RX Line Interface Cfg */ - VINT32 tx_line_cfg; /* 04 TX Line Interface Cfg */ - VINT32 tx_frpass; /* 05 TX Framing & Bypass Options */ - VINT32 tx_time; /* 06 TX Timing Options */ - VINT32 intr_1; /* 07 Intr Source #1 */ - VINT32 intr_2; /* 08 Intr Source #2 */ - VINT32 intr_3; /* 09 Intr Source #3 */ - VINT32 mdiag; /* 0A Master Diagnostics */ - VINT32 mtest; /* 0B Master Test */ - VINT32 adiag; /* 0C Analog Diagnostics */ - VINT32 rev_id; /* 0D Rev/Chip Id/Global PMON Update */ +struct s_comet_reg { + VINT32 gbl_cfg; /* 00 Global Cfg */ + VINT32 clkmon; /* 01 Clk Monitor */ + VINT32 rx_opt; /* 02 RX Options */ + VINT32 rx_line_cfg; /* 03 RX Line Interface Cfg */ + VINT32 tx_line_cfg; /* 04 TX Line Interface Cfg */ + VINT32 tx_frpass; /* 05 TX Framing & Bypass Options */ + VINT32 tx_time; /* 06 TX Timing Options */ + VINT32 intr_1; /* 07 Intr Source #1 */ + VINT32 intr_2; /* 08 Intr Source #2 */ + VINT32 intr_3; /* 09 Intr Source #3 */ + VINT32 mdiag; /* 0A Master Diagnostics */ + VINT32 mtest; /* 0B Master Test */ + VINT32 adiag; /* 0C Analog Diagnostics */ + VINT32 rev_id; /* 0D Rev/Chip Id/Global PMON Update */ #define pmon rev_id - VINT32 reset; /* 0E Reset */ - VINT32 prgd_phctl; /* 0F PRGD Positioning/Ctl & HDLC Ctl */ - VINT32 cdrc_cfg; /* 10 CDRC Cfg */ - VINT32 cdrc_ien; /* 11 CDRC Intr Enable */ - VINT32 cdrc_ists; /* 12 CDRC Intr Sts */ - VINT32 cdrc_alos; /* 13 CDRC Alternate Loss of Signal */ - - VINT32 rjat_ists; /* 14 RJAT Intr Sts */ - VINT32 rjat_n1clk; /* 15 RJAT Reference Clk Divisor (N1) Ctl */ - VINT32 rjat_n2clk; /* 16 RJAT Output Clk Divisor (N2) Ctl */ - VINT32 rjat_cfg; /* 17 RJAT Cfg */ - - VINT32 tjat_ists; /* 18 TJAT Intr Sts */ - VINT32 tjat_n1clk; /* 19 TJAT Reference Clk Divisor (N1) Ctl */ - VINT32 tjat_n2clk; /* 1A TJAT Output Clk Divisor (N2) Ctl */ - VINT32 tjat_cfg; /* 1B TJAT Cfg */ - - VINT32 rx_elst_cfg; /* 1C RX-ELST Cfg */ - VINT32 rx_elst_ists; /* 1D RX-ELST Intr Sts */ - VINT32 rx_elst_idle; /* 1E RX-ELST Idle Code */ - VINT32 _rx_elst_res1f; /* 1F RX-ELST Reserved */ - - VINT32 tx_elst_cfg; /* 20 TX-ELST Cfg */ - VINT32 tx_elst_ists; /* 21 TX-ELST Intr Sts */ - VINT32 _tx_elst_res22; /* 22 TX-ELST Reserved */ - VINT32 _tx_elst_res23; /* 23 TX-ELST Reserved */ - VINT32 __res24; /* 24 Reserved */ - VINT32 __res25; /* 25 Reserved */ - VINT32 __res26; /* 26 Reserved */ - VINT32 __res27; /* 27 Reserved */ - - VINT32 rxce1_ctl; /* 28 RXCE RX Data Link 1 Ctl */ - VINT32 rxce1_bits; /* 29 RXCE RX Data Link 1 Bit Select */ - VINT32 rxce2_ctl; /* 2A RXCE RX Data Link 2 Ctl */ - VINT32 rxce2_bits; /* 2B RXCE RX Data Link 2 Bit Select */ - VINT32 rxce3_ctl; /* 2C RXCE RX Data Link 3 Ctl */ - VINT32 rxce3_bits; /* 2D RXCE RX Data Link 3 Bit Select */ - VINT32 _rxce_res2E; /* 2E RXCE Reserved */ - VINT32 _rxce_res2F; /* 2F RXCE Reserved */ - - VINT32 brif_cfg; /* 30 BRIF RX Backplane Cfg */ - VINT32 brif_fpcfg; /* 31 BRIF RX Backplane Frame Pulse Cfg */ - VINT32 brif_pfcfg; /* 32 BRIF RX Backplane Parity/F-Bit Cfg */ - VINT32 brif_tsoff; /* 33 BRIF RX Backplane Time Slot Offset */ - VINT32 brif_boff; /* 34 BRIF RX Backplane Bit Offset */ - VINT32 _brif_res35; /* 35 BRIF RX Backplane Reserved */ - VINT32 _brif_res36; /* 36 BRIF RX Backplane Reserved */ - VINT32 _brif_res37; /* 37 BRIF RX Backplane Reserved */ - - VINT32 txci1_ctl; /* 38 TXCI TX Data Link 1 Ctl */ - VINT32 txci1_bits; /* 39 TXCI TX Data Link 2 Bit Select */ - VINT32 txci2_ctl; /* 3A TXCI TX Data Link 1 Ctl */ - VINT32 txci2_bits; /* 3B TXCI TX Data Link 2 Bit Select */ - VINT32 txci3_ctl; /* 3C TXCI TX Data Link 1 Ctl */ - VINT32 txci3_bits; /* 3D TXCI TX Data Link 2 Bit Select */ - VINT32 _txci_res3E; /* 3E TXCI Reserved */ - VINT32 _txci_res3F; /* 3F TXCI Reserved */ - - VINT32 btif_cfg; /* 40 BTIF TX Backplane Cfg */ - VINT32 btif_fpcfg; /* 41 BTIF TX Backplane Frame Pulse Cfg */ - VINT32 btif_pcfgsts; /* 42 BTIF TX Backplane Parity Cfg & Sts */ - VINT32 btif_tsoff; /* 43 BTIF TX Backplane Time Slot Offset */ - VINT32 btif_boff; /* 44 BTIF TX Backplane Bit Offset */ - VINT32 _btif_res45; /* 45 BTIF TX Backplane Reserved */ - VINT32 _btif_res46; /* 46 BTIF TX Backplane Reserved */ - VINT32 _btif_res47; /* 47 BTIF TX Backplane Reserved */ - VINT32 t1_frmr_cfg; /* 48 T1 FRMR Cfg */ - VINT32 t1_frmr_ien; /* 49 T1 FRMR Intr Enable */ - VINT32 t1_frmr_ists; /* 4A T1 FRMR Intr Sts */ - VINT32 __res_4B; /* 4B Reserved */ - VINT32 ibcd_cfg; /* 4C IBCD Cfg */ - VINT32 ibcd_ies; /* 4D IBCD Intr Enable/Sts */ - VINT32 ibcd_act; /* 4E IBCD Activate Code */ - VINT32 ibcd_deact; /* 4F IBCD Deactivate Code */ - - VINT32 sigx_cfg; /* 50 SIGX Cfg/Change of Signaling State */ - VINT32 sigx_acc_cos; /* 51 SIGX uP Access Sts/Change of Signaling State */ - VINT32 sigx_iac_cos; /* 52 SIGX Channel Indirect - * Addr/Ctl/Change of Signaling State */ - VINT32 sigx_idb_cos; /* 53 SIGX Channel Indirect Data - * Buffer/Change of Signaling State */ - - VINT32 t1_xbas_cfg; /* 54 T1 XBAS Cfg */ - VINT32 t1_xbas_altx; /* 55 T1 XBAS Alarm TX */ - VINT32 t1_xibc_ctl; /* 56 T1 XIBC Ctl */ - VINT32 t1_xibc_lbcode; /* 57 T1 XIBC Loopback Code */ - - VINT32 pmon_ies; /* 58 PMON Intr Enable/Sts */ - VINT32 pmon_fberr; /* 59 PMON Framing Bit Err Cnt */ - VINT32 pmon_feb_lsb; /* 5A PMON OFF/COFA/Far End Block Err Cnt (LSB) */ - VINT32 pmon_feb_msb; /* 5B PMON OFF/COFA/Far End Block Err Cnt (MSB) */ - VINT32 pmon_bed_lsb; /* 5C PMON Bit/Err/CRCE Cnt (LSB) */ - VINT32 pmon_bed_msb; /* 5D PMON Bit/Err/CRCE Cnt (MSB) */ - VINT32 pmon_lvc_lsb; /* 5E PMON LVC Cnt (LSB) */ - VINT32 pmon_lvc_msb; /* 5F PMON LVC Cnt (MSB) */ - - VINT32 t1_almi_cfg; /* 60 T1 ALMI Cfg */ - VINT32 t1_almi_ien; /* 61 T1 ALMI Intr Enable */ - VINT32 t1_almi_ists; /* 62 T1 ALMI Intr Sts */ - VINT32 t1_almi_detsts; /* 63 T1 ALMI Alarm Detection Sts */ - - VINT32 _t1_pdvd_res64; /* 64 T1 PDVD Reserved */ - VINT32 t1_pdvd_ies; /* 65 T1 PDVD Intr Enable/Sts */ - VINT32 _t1_xboc_res66; /* 66 T1 XBOC Reserved */ - VINT32 t1_xboc_code; /* 67 T1 XBOC Code */ - VINT32 _t1_xpde_res68; /* 68 T1 XPDE Reserved */ - VINT32 t1_xpde_ies; /* 69 T1 XPDE Intr Enable/Sts */ - - VINT32 t1_rboc_ena; /* 6A T1 RBOC Enable */ - VINT32 t1_rboc_sts; /* 6B T1 RBOC Code Sts */ - - VINT32 t1_tpsc_cfg; /* 6C TPSC Cfg */ - VINT32 t1_tpsc_sts; /* 6D TPSC uP Access Sts */ - VINT32 t1_tpsc_ciaddr; /* 6E TPSC Channel Indirect - * Addr/Ctl */ - VINT32 t1_tpsc_cidata; /* 6F TPSC Channel Indirect Data - * Buffer */ - VINT32 t1_rpsc_cfg; /* 70 RPSC Cfg */ - VINT32 t1_rpsc_sts; /* 71 RPSC uP Access Sts */ - VINT32 t1_rpsc_ciaddr; /* 72 RPSC Channel Indirect - * Addr/Ctl */ - VINT32 t1_rpsc_cidata; /* 73 RPSC Channel Indirect Data - * Buffer */ - VINT32 __res74; /* 74 Reserved */ - VINT32 __res75; /* 75 Reserved */ - VINT32 __res76; /* 76 Reserved */ - VINT32 __res77; /* 77 Reserved */ - - VINT32 t1_aprm_cfg; /* 78 T1 APRM Cfg/Ctl */ - VINT32 t1_aprm_load; /* 79 T1 APRM Manual Load */ - VINT32 t1_aprm_ists; /* 7A T1 APRM Intr Sts */ - VINT32 t1_aprm_1sec_2; /* 7B T1 APRM One Second Content Octet 2 */ - VINT32 t1_aprm_1sec_3; /* 7C T1 APRM One Second Content Octet 3 */ - VINT32 t1_aprm_1sec_4; /* 7D T1 APRM One Second Content Octet 4 */ - VINT32 t1_aprm_1sec_5; /* 7E T1 APRM One Second Content MSB (Octect 5) */ - VINT32 t1_aprm_1sec_6; /* 7F T1 APRM One Second Content MSB (Octect 6) */ - - VINT32 e1_tran_cfg; /* 80 E1 TRAN Cfg */ - VINT32 e1_tran_txalarm; /* 81 E1 TRAN TX Alarm/Diagnostic Ctl */ - VINT32 e1_tran_intctl; /* 82 E1 TRAN International Ctl */ - VINT32 e1_tran_extrab; /* 83 E1 TRAN Extra Bits Ctl */ - VINT32 e1_tran_ien; /* 84 E1 TRAN Intr Enable */ - VINT32 e1_tran_ists; /* 85 E1 TRAN Intr Sts */ - VINT32 e1_tran_nats; /* 86 E1 TRAN National Bit Codeword - * Select */ - VINT32 e1_tran_nat; /* 87 E1 TRAN National Bit Codeword */ - VINT32 __res88; /* 88 Reserved */ - VINT32 __res89; /* 89 Reserved */ - VINT32 __res8A; /* 8A Reserved */ - VINT32 __res8B; /* 8B Reserved */ - - VINT32 _t1_frmr_res8C; /* 8C T1 FRMR Reserved */ - VINT32 _t1_frmr_res8D; /* 8D T1 FRMR Reserved */ - VINT32 __res8E; /* 8E Reserved */ - VINT32 __res8F; /* 8F Reserved */ - - VINT32 e1_frmr_aopts; /* 90 E1 FRMR Frame Alignment Options */ - VINT32 e1_frmr_mopts; /* 91 E1 FRMR Maintenance Mode Options */ - VINT32 e1_frmr_ien; /* 92 E1 FRMR Framing Sts Intr Enable */ - VINT32 e1_frmr_mien; /* 93 E1 FRMR Maintenance/Alarm Sts Intr Enable */ - VINT32 e1_frmr_ists; /* 94 E1 FRMR Framing Sts Intr Indication */ - VINT32 e1_frmr_mists; /* 95 E1 FRMR Maintenance/Alarm Sts Indication Enable */ - VINT32 e1_frmr_sts; /* 96 E1 FRMR Framing Sts */ - VINT32 e1_frmr_masts; /* 97 E1 FRMR Maintenance/Alarm Sts */ - VINT32 e1_frmr_nat_bits; /* 98 E1 FRMR International/National Bits */ - VINT32 e1_frmr_crc_lsb; /* 99 E1 FRMR CRC Err Cnt - LSB */ - VINT32 e1_frmr_crc_msb; /* 9A E1 FRMR CRC Err Cnt - MSB */ - VINT32 e1_frmr_nat_ien; /* 9B E1 FRMR National Bit Codeword Intr Enables */ - VINT32 e1_frmr_nat_ists; /* 9C E1 FRMR National Bit Codeword Intr/Sts */ - VINT32 e1_frmr_nat; /* 9D E1 FRMR National Bit Codewords */ - VINT32 e1_frmr_fp_ien; /* 9E E1 FRMR Frame Pulse/Alarm Intr Enables */ - VINT32 e1_frmr_fp_ists; /* 9F E1 FRMR Frame Pulse/Alarm Intr/Sts */ - - VINT32 __resA0; /* A0 Reserved */ - VINT32 __resA1; /* A1 Reserved */ - VINT32 __resA2; /* A2 Reserved */ - VINT32 __resA3; /* A3 Reserved */ - VINT32 __resA4; /* A4 Reserved */ - VINT32 __resA5; /* A5 Reserved */ - VINT32 __resA6; /* A6 Reserved */ - VINT32 __resA7; /* A7 Reserved */ - - VINT32 tdpr1_cfg; /* A8 TDPR #1 Cfg */ - VINT32 tdpr1_utl; /* A9 TDPR #1 Upper TX Threshold */ - VINT32 tdpr1_ltl; /* AA TDPR #1 Lower TX Threshold */ - VINT32 tdpr1_ien; /* AB TDPR #1 Intr Enable */ - VINT32 tdpr1_ists; /* AC TDPR #1 Intr Sts/UDR Clear */ - VINT32 tdpr1_data; /* AD TDPR #1 TX Data */ - VINT32 __resAE; /* AE Reserved */ - VINT32 __resAF; /* AF Reserved */ - VINT32 tdpr2_cfg; /* B0 TDPR #2 Cfg */ - VINT32 tdpr2_utl; /* B1 TDPR #2 Upper TX Threshold */ - VINT32 tdpr2_ltl; /* B2 TDPR #2 Lower TX Threshold */ - VINT32 tdpr2_ien; /* B3 TDPR #2 Intr Enable */ - VINT32 tdpr2_ists; /* B4 TDPR #2 Intr Sts/UDR Clear */ - VINT32 tdpr2_data; /* B5 TDPR #2 TX Data */ - VINT32 __resB6; /* B6 Reserved */ - VINT32 __resB7; /* B7 Reserved1 */ - VINT32 tdpr3_cfg; /* B8 TDPR #3 Cfg */ - VINT32 tdpr3_utl; /* B9 TDPR #3 Upper TX Threshold */ - VINT32 tdpr3_ltl; /* BA TDPR #3 Lower TX Threshold */ - VINT32 tdpr3_ien; /* BB TDPR #3 Intr Enable */ - VINT32 tdpr3_ists; /* BC TDPR #3 Intr Sts/UDR Clear */ - VINT32 tdpr3_data; /* BD TDPR #3 TX Data */ - VINT32 __resBE; /* BE Reserved */ - VINT32 __resBF; /* BF Reserved */ - - VINT32 rdlc1_cfg; /* C0 RDLC #1 Cfg */ - VINT32 rdlc1_intctl; /* C1 RDLC #1 Intr Ctl */ - VINT32 rdlc1_sts; /* C2 RDLC #1 Sts */ - VINT32 rdlc1_data; /* C3 RDLC #1 Data */ - VINT32 rdlc1_paddr; /* C4 RDLC #1 Primary Addr Match */ - VINT32 rdlc1_saddr; /* C5 RDLC #1 Secondary Addr Match */ - VINT32 __resC6; /* C6 Reserved */ - VINT32 __resC7; /* C7 Reserved */ - VINT32 rdlc2_cfg; /* C8 RDLC #2 Cfg */ - VINT32 rdlc2_intctl; /* C9 RDLC #2 Intr Ctl */ - VINT32 rdlc2_sts; /* CA RDLC #2 Sts */ - VINT32 rdlc2_data; /* CB RDLC #2 Data */ - VINT32 rdlc2_paddr; /* CC RDLC #2 Primary Addr Match */ - VINT32 rdlc2_saddr; /* CD RDLC #2 Secondary Addr Match */ - VINT32 __resCE; /* CE Reserved */ - VINT32 __resCF; /* CF Reserved */ - VINT32 rdlc3_cfg; /* D0 RDLC #3 Cfg */ - VINT32 rdlc3_intctl; /* D1 RDLC #3 Intr Ctl */ - VINT32 rdlc3_sts; /* D2 RDLC #3 Sts */ - VINT32 rdlc3_data; /* D3 RDLC #3 Data */ - VINT32 rdlc3_paddr; /* D4 RDLC #3 Primary Addr Match */ - VINT32 rdlc3_saddr; /* D5 RDLC #3 Secondary Addr Match */ - - VINT32 csu_cfg; /* D6 CSU Cfg */ - VINT32 _csu_resD7; /* D7 CSU Reserved */ - - VINT32 rlps_idata3; /* D8 RLPS Indirect Data, 24-31 */ - VINT32 rlps_idata2; /* D9 RLPS Indirect Data, 16-23 */ - VINT32 rlps_idata1; /* DA RLPS Indirect Data, 8-15 */ - VINT32 rlps_idata0; /* DB RLPS Indirect Data, 0-7 */ - VINT32 rlps_eqvr; /* DC RLPS Equalizer Voltage Reference - * (E1 missing) */ - VINT32 _rlps_resDD; /* DD RLPS Reserved */ - VINT32 _rlps_resDE; /* DE RLPS Reserved */ - VINT32 _rlps_resDF; /* DF RLPS Reserved */ - - VINT32 prgd_ctl; /* E0 PRGD Ctl */ - VINT32 prgd_ies; /* E1 PRGD Intr Enable/Sts */ - VINT32 prgd_shift_len; /* E2 PRGD Shift Length */ - VINT32 prgd_tap; /* E3 PRGD Tap */ - VINT32 prgd_errin; /* E4 PRGD Err Insertion */ - VINT32 _prgd_resE5; /* E5 PRGD Reserved */ - VINT32 _prgd_resE6; /* E6 PRGD Reserved */ - VINT32 _prgd_resE7; /* E7 PRGD Reserved */ - VINT32 prgd_patin1; /* E8 PRGD Pattern Insertion #1 */ - VINT32 prgd_patin2; /* E9 PRGD Pattern Insertion #2 */ - VINT32 prgd_patin3; /* EA PRGD Pattern Insertion #3 */ - VINT32 prgd_patin4; /* EB PRGD Pattern Insertion #4 */ - VINT32 prgd_patdet1; /* EC PRGD Pattern Detector #1 */ - VINT32 prgd_patdet2; /* ED PRGD Pattern Detector #2 */ - VINT32 prgd_patdet3; /* EE PRGD Pattern Detector #3 */ - VINT32 prgd_patdet4; /* EF PRGD Pattern Detector #4 */ - - VINT32 xlpg_cfg; /* F0 XLPG Line Driver Cfg */ - VINT32 xlpg_ctlsts; /* F1 XLPG Ctl/Sts */ - VINT32 xlpg_pwave_addr; /* F2 XLPG Pulse Waveform Storage Write Addr */ - VINT32 xlpg_pwave_data; /* F3 XLPG Pulse Waveform Storage Data */ - VINT32 xlpg_atest_pctl; /* F4 XLPG Analog Test Positive Ctl */ - VINT32 xlpg_atest_nctl; /* F5 XLPG Analog Test Negative Ctl */ - VINT32 xlpg_fdata_sel; /* F6 XLPG Fuse Data Select */ - VINT32 _xlpg_resF7; /* F7 XLPG Reserved */ - - VINT32 rlps_cfgsts; /* F8 RLPS Cfg & Sts */ - VINT32 rlps_alos_thresh; /* F9 RLPS ALOS Detection/Clearance Threshold */ - VINT32 rlps_alos_dper; /* FA RLPS ALOS Detection Period */ - VINT32 rlps_alos_cper; /* FB RLPS ALOS Clearance Period */ - VINT32 rlps_eq_iaddr; /* FC RLPS Equalization Indirect Addr */ - VINT32 rlps_eq_rwsel; /* FD RLPS Equalization Read/WriteB Select */ - VINT32 rlps_eq_ctlsts; /* FE RLPS Equalizer Loop Sts & Ctl */ - VINT32 rlps_eq_cfg; /* FF RLPS Equalizer Cfg */ + VINT32 reset; /* 0E Reset */ + VINT32 prgd_phctl; /* 0F PRGD Positioning/Ctl & HDLC Ctl */ + VINT32 cdrc_cfg; /* 10 CDRC Cfg */ + VINT32 cdrc_ien; /* 11 CDRC Intr Enable */ + VINT32 cdrc_ists; /* 12 CDRC Intr Sts */ + VINT32 cdrc_alos; /* 13 CDRC Alternate Loss of Signal */ + + VINT32 rjat_ists; /* 14 RJAT Intr Sts */ + VINT32 rjat_n1clk; /* 15 RJAT Reference Clk Divisor (N1) Ctl */ + VINT32 rjat_n2clk; /* 16 RJAT Output Clk Divisor (N2) Ctl */ + VINT32 rjat_cfg; /* 17 RJAT Cfg */ + + VINT32 tjat_ists; /* 18 TJAT Intr Sts */ + VINT32 tjat_n1clk; /* 19 TJAT Reference Clk Divisor (N1) Ctl */ + VINT32 tjat_n2clk; /* 1A TJAT Output Clk Divisor (N2) Ctl */ + VINT32 tjat_cfg; /* 1B TJAT Cfg */ + + VINT32 rx_elst_cfg; /* 1C RX-ELST Cfg */ + VINT32 rx_elst_ists; /* 1D RX-ELST Intr Sts */ + VINT32 rx_elst_idle; /* 1E RX-ELST Idle Code */ + VINT32 _rx_elst_res1f; /* 1F RX-ELST Reserved */ + + VINT32 tx_elst_cfg; /* 20 TX-ELST Cfg */ + VINT32 tx_elst_ists; /* 21 TX-ELST Intr Sts */ + VINT32 _tx_elst_res22; /* 22 TX-ELST Reserved */ + VINT32 _tx_elst_res23; /* 23 TX-ELST Reserved */ + VINT32 __res24; /* 24 Reserved */ + VINT32 __res25; /* 25 Reserved */ + VINT32 __res26; /* 26 Reserved */ + VINT32 __res27; /* 27 Reserved */ + + VINT32 rxce1_ctl; /* 28 RXCE RX Data Link 1 Ctl */ + VINT32 rxce1_bits; /* 29 RXCE RX Data Link 1 Bit Select */ + VINT32 rxce2_ctl; /* 2A RXCE RX Data Link 2 Ctl */ + VINT32 rxce2_bits; /* 2B RXCE RX Data Link 2 Bit Select */ + VINT32 rxce3_ctl; /* 2C RXCE RX Data Link 3 Ctl */ + VINT32 rxce3_bits; /* 2D RXCE RX Data Link 3 Bit Select */ + VINT32 _rxce_res2E; /* 2E RXCE Reserved */ + VINT32 _rxce_res2F; /* 2F RXCE Reserved */ + + VINT32 brif_cfg; /* 30 BRIF RX Backplane Cfg */ + VINT32 brif_fpcfg; /* 31 BRIF RX Backplane Frame Pulse Cfg */ + VINT32 brif_pfcfg; /* 32 BRIF RX Backplane Parity/F-Bit Cfg */ + VINT32 brif_tsoff; /* 33 BRIF RX Backplane Time Slot Offset */ + VINT32 brif_boff; /* 34 BRIF RX Backplane Bit Offset */ + VINT32 _brif_res35; /* 35 BRIF RX Backplane Reserved */ + VINT32 _brif_res36; /* 36 BRIF RX Backplane Reserved */ + VINT32 _brif_res37; /* 37 BRIF RX Backplane Reserved */ + + VINT32 txci1_ctl; /* 38 TXCI TX Data Link 1 Ctl */ + VINT32 txci1_bits; /* 39 TXCI TX Data Link 2 Bit Select */ + VINT32 txci2_ctl; /* 3A TXCI TX Data Link 1 Ctl */ + VINT32 txci2_bits; /* 3B TXCI TX Data Link 2 Bit Select */ + VINT32 txci3_ctl; /* 3C TXCI TX Data Link 1 Ctl */ + VINT32 txci3_bits; /* 3D TXCI TX Data Link 2 Bit Select */ + VINT32 _txci_res3E; /* 3E TXCI Reserved */ + VINT32 _txci_res3F; /* 3F TXCI Reserved */ + + VINT32 btif_cfg; /* 40 BTIF TX Backplane Cfg */ + VINT32 btif_fpcfg; /* 41 BTIF TX Backplane Frame Pulse Cfg */ + VINT32 btif_pcfgsts; /* 42 BTIF TX Backplane Parity Cfg & Sts */ + VINT32 btif_tsoff; /* 43 BTIF TX Backplane Time Slot Offset */ + VINT32 btif_boff; /* 44 BTIF TX Backplane Bit Offset */ + VINT32 _btif_res45; /* 45 BTIF TX Backplane Reserved */ + VINT32 _btif_res46; /* 46 BTIF TX Backplane Reserved */ + VINT32 _btif_res47; /* 47 BTIF TX Backplane Reserved */ + VINT32 t1_frmr_cfg; /* 48 T1 FRMR Cfg */ + VINT32 t1_frmr_ien; /* 49 T1 FRMR Intr Enable */ + VINT32 t1_frmr_ists; /* 4A T1 FRMR Intr Sts */ + VINT32 __res_4B; /* 4B Reserved */ + VINT32 ibcd_cfg; /* 4C IBCD Cfg */ + VINT32 ibcd_ies; /* 4D IBCD Intr Enable/Sts */ + VINT32 ibcd_act; /* 4E IBCD Activate Code */ + VINT32 ibcd_deact; /* 4F IBCD Deactivate Code */ + + VINT32 sigx_cfg; /* 50 SIGX Cfg/Change of Signaling State */ + VINT32 sigx_acc_cos; /* 51 SIGX + * uP Access Sts/Change of Signaling State */ + VINT32 sigx_iac_cos; /* 52 SIGX Channel Indirect + * Addr/Ctl/Change of Signaling State */ + VINT32 sigx_idb_cos; /* 53 SIGX Channel Indirect Data + * Buffer/Change of Signaling State */ + + VINT32 t1_xbas_cfg; /* 54 T1 XBAS Cfg */ + VINT32 t1_xbas_altx; /* 55 T1 XBAS Alarm TX */ + VINT32 t1_xibc_ctl; /* 56 T1 XIBC Ctl */ + VINT32 t1_xibc_lbcode; /* 57 T1 XIBC Loopback Code */ + + VINT32 pmon_ies; /* 58 PMON Intr Enable/Sts */ + VINT32 pmon_fberr; /* 59 PMON Framing Bit Err Cnt */ + VINT32 pmon_feb_lsb; /* 5A PMON + * OFF/COFA/Far End Block Err Cnt (LSB) */ + VINT32 pmon_feb_msb; /* 5B PMON + * OFF/COFA/Far End Block Err Cnt (MSB) */ + VINT32 pmon_bed_lsb; /* 5C PMON Bit/Err/CRCE Cnt (LSB) */ + VINT32 pmon_bed_msb; /* 5D PMON Bit/Err/CRCE Cnt (MSB) */ + VINT32 pmon_lvc_lsb; /* 5E PMON LVC Cnt (LSB) */ + VINT32 pmon_lvc_msb; /* 5F PMON LVC Cnt (MSB) */ + + VINT32 t1_almi_cfg; /* 60 T1 ALMI Cfg */ + VINT32 t1_almi_ien; /* 61 T1 ALMI Intr Enable */ + VINT32 t1_almi_ists; /* 62 T1 ALMI Intr Sts */ + VINT32 t1_almi_detsts; /* 63 T1 ALMI Alarm Detection Sts */ + + VINT32 _t1_pdvd_res64; /* 64 T1 PDVD Reserved */ + VINT32 t1_pdvd_ies; /* 65 T1 PDVD Intr Enable/Sts */ + VINT32 _t1_xboc_res66; /* 66 T1 XBOC Reserved */ + VINT32 t1_xboc_code; /* 67 T1 XBOC Code */ + VINT32 _t1_xpde_res68; /* 68 T1 XPDE Reserved */ + VINT32 t1_xpde_ies; /* 69 T1 XPDE Intr Enable/Sts */ + + VINT32 t1_rboc_ena; /* 6A T1 RBOC Enable */ + VINT32 t1_rboc_sts; /* 6B T1 RBOC Code Sts */ + + VINT32 t1_tpsc_cfg; /* 6C TPSC Cfg */ + VINT32 t1_tpsc_sts; /* 6D TPSC uP Access Sts */ + VINT32 t1_tpsc_ciaddr; /* 6E TPSC Channel Indirect + * Addr/Ctl */ + VINT32 t1_tpsc_cidata; /* 6F TPSC Channel Indirect Data + * Buffer */ + VINT32 t1_rpsc_cfg; /* 70 RPSC Cfg */ + VINT32 t1_rpsc_sts; /* 71 RPSC uP Access Sts */ + VINT32 t1_rpsc_ciaddr; /* 72 RPSC Channel Indirect + * Addr/Ctl */ + VINT32 t1_rpsc_cidata; /* 73 RPSC Channel Indirect Data + * Buffer */ + VINT32 __res74; /* 74 Reserved */ + VINT32 __res75; /* 75 Reserved */ + VINT32 __res76; /* 76 Reserved */ + VINT32 __res77; /* 77 Reserved */ + + VINT32 t1_aprm_cfg; /* 78 T1 APRM Cfg/Ctl */ + VINT32 t1_aprm_load; /* 79 T1 APRM Manual Load */ + VINT32 t1_aprm_ists; /* 7A T1 APRM Intr Sts */ + VINT32 t1_aprm_1sec_2; /* 7B T1 APRM One Second Content Octet 2 */ + VINT32 t1_aprm_1sec_3; /* 7C T1 APRM One Second Content Octet 3 */ + VINT32 t1_aprm_1sec_4; /* 7D T1 APRM One Second Content Octet 4 */ + VINT32 t1_aprm_1sec_5; /* 7E T1 APRM + * One Second Content MSB (Octect 5) */ + VINT32 t1_aprm_1sec_6; /* 7F T1 APRM + * One Second Content MSB (Octect 6) */ + + VINT32 e1_tran_cfg; /* 80 E1 TRAN Cfg */ + VINT32 e1_tran_txalarm; /* 81 E1 TRAN TX Alarm/Diagnostic Ctl */ + VINT32 e1_tran_intctl; /* 82 E1 TRAN International Ctl */ + VINT32 e1_tran_extrab; /* 83 E1 TRAN Extra Bits Ctl */ + VINT32 e1_tran_ien; /* 84 E1 TRAN Intr Enable */ + VINT32 e1_tran_ists; /* 85 E1 TRAN Intr Sts */ + VINT32 e1_tran_nats; /* 86 E1 TRAN National Bit Codeword + * Select */ + VINT32 e1_tran_nat; /* 87 E1 TRAN National Bit Codeword */ + VINT32 __res88; /* 88 Reserved */ + VINT32 __res89; /* 89 Reserved */ + VINT32 __res8A; /* 8A Reserved */ + VINT32 __res8B; /* 8B Reserved */ + + VINT32 _t1_frmr_res8C; /* 8C T1 FRMR Reserved */ + VINT32 _t1_frmr_res8D; /* 8D T1 FRMR Reserved */ + VINT32 __res8E; /* 8E Reserved */ + VINT32 __res8F; /* 8F Reserved */ + + VINT32 e1_frmr_aopts; /* 90 E1 FRMR Frame Alignment Options */ + VINT32 e1_frmr_mopts; /* 91 E1 FRMR Maintenance Mode Options */ + VINT32 e1_frmr_ien; /* 92 E1 FRMR Framing Sts Intr Enable */ + VINT32 e1_frmr_mien; /* 93 E1 FRMR + * Maintenance/Alarm Sts Intr Enable */ + VINT32 e1_frmr_ists; /* 94 E1 FRMR Framing Sts Intr Indication */ + VINT32 e1_frmr_mists; /* 95 E1 FRMR + * Maintenance/Alarm Sts Indication Enable */ + VINT32 e1_frmr_sts; /* 96 E1 FRMR Framing Sts */ + VINT32 e1_frmr_masts; /* 97 E1 FRMR Maintenance/Alarm Sts */ + VINT32 e1_frmr_nat_bits; /* 98 E1 FRMR International/National Bits */ + VINT32 e1_frmr_crc_lsb; /* 99 E1 FRMR CRC Err Cnt - LSB */ + VINT32 e1_frmr_crc_msb; /* 9A E1 FRMR CRC Err Cnt - MSB */ + VINT32 e1_frmr_nat_ien; /* 9B E1 FRMR + * National Bit Codeword Intr Enables */ + VINT32 e1_frmr_nat_ists; /* 9C E1 FRMR + * National Bit Codeword Intr/Sts */ + VINT32 e1_frmr_nat; /* 9D E1 FRMR National Bit Codewords */ + VINT32 e1_frmr_fp_ien; /* 9E E1 FRMR + * Frame Pulse/Alarm Intr Enables */ + VINT32 e1_frmr_fp_ists; /* 9F E1 FRMR Frame Pulse/Alarm Intr/Sts */ + + VINT32 __resA0; /* A0 Reserved */ + VINT32 __resA1; /* A1 Reserved */ + VINT32 __resA2; /* A2 Reserved */ + VINT32 __resA3; /* A3 Reserved */ + VINT32 __resA4; /* A4 Reserved */ + VINT32 __resA5; /* A5 Reserved */ + VINT32 __resA6; /* A6 Reserved */ + VINT32 __resA7; /* A7 Reserved */ + + VINT32 tdpr1_cfg; /* A8 TDPR #1 Cfg */ + VINT32 tdpr1_utl; /* A9 TDPR #1 Upper TX Threshold */ + VINT32 tdpr1_ltl; /* AA TDPR #1 Lower TX Threshold */ + VINT32 tdpr1_ien; /* AB TDPR #1 Intr Enable */ + VINT32 tdpr1_ists; /* AC TDPR #1 Intr Sts/UDR Clear */ + VINT32 tdpr1_data; /* AD TDPR #1 TX Data */ + VINT32 __resAE; /* AE Reserved */ + VINT32 __resAF; /* AF Reserved */ + VINT32 tdpr2_cfg; /* B0 TDPR #2 Cfg */ + VINT32 tdpr2_utl; /* B1 TDPR #2 Upper TX Threshold */ + VINT32 tdpr2_ltl; /* B2 TDPR #2 Lower TX Threshold */ + VINT32 tdpr2_ien; /* B3 TDPR #2 Intr Enable */ + VINT32 tdpr2_ists; /* B4 TDPR #2 Intr Sts/UDR Clear */ + VINT32 tdpr2_data; /* B5 TDPR #2 TX Data */ + VINT32 __resB6; /* B6 Reserved */ + VINT32 __resB7; /* B7 Reserved1 */ + VINT32 tdpr3_cfg; /* B8 TDPR #3 Cfg */ + VINT32 tdpr3_utl; /* B9 TDPR #3 Upper TX Threshold */ + VINT32 tdpr3_ltl; /* BA TDPR #3 Lower TX Threshold */ + VINT32 tdpr3_ien; /* BB TDPR #3 Intr Enable */ + VINT32 tdpr3_ists; /* BC TDPR #3 Intr Sts/UDR Clear */ + VINT32 tdpr3_data; /* BD TDPR #3 TX Data */ + VINT32 __resBE; /* BE Reserved */ + VINT32 __resBF; /* BF Reserved */ + + VINT32 rdlc1_cfg; /* C0 RDLC #1 Cfg */ + VINT32 rdlc1_intctl; /* C1 RDLC #1 Intr Ctl */ + VINT32 rdlc1_sts; /* C2 RDLC #1 Sts */ + VINT32 rdlc1_data; /* C3 RDLC #1 Data */ + VINT32 rdlc1_paddr; /* C4 RDLC #1 Primary Addr Match */ + VINT32 rdlc1_saddr; /* C5 RDLC #1 Secondary Addr Match */ + VINT32 __resC6; /* C6 Reserved */ + VINT32 __resC7; /* C7 Reserved */ + VINT32 rdlc2_cfg; /* C8 RDLC #2 Cfg */ + VINT32 rdlc2_intctl; /* C9 RDLC #2 Intr Ctl */ + VINT32 rdlc2_sts; /* CA RDLC #2 Sts */ + VINT32 rdlc2_data; /* CB RDLC #2 Data */ + VINT32 rdlc2_paddr; /* CC RDLC #2 Primary Addr Match */ + VINT32 rdlc2_saddr; /* CD RDLC #2 Secondary Addr Match */ + VINT32 __resCE; /* CE Reserved */ + VINT32 __resCF; /* CF Reserved */ + VINT32 rdlc3_cfg; /* D0 RDLC #3 Cfg */ + VINT32 rdlc3_intctl; /* D1 RDLC #3 Intr Ctl */ + VINT32 rdlc3_sts; /* D2 RDLC #3 Sts */ + VINT32 rdlc3_data; /* D3 RDLC #3 Data */ + VINT32 rdlc3_paddr; /* D4 RDLC #3 Primary Addr Match */ + VINT32 rdlc3_saddr; /* D5 RDLC #3 Secondary Addr Match */ + + VINT32 csu_cfg; /* D6 CSU Cfg */ + VINT32 _csu_resD7; /* D7 CSU Reserved */ + + VINT32 rlps_idata3; /* D8 RLPS Indirect Data, 24-31 */ + VINT32 rlps_idata2; /* D9 RLPS Indirect Data, 16-23 */ + VINT32 rlps_idata1; /* DA RLPS Indirect Data, 8-15 */ + VINT32 rlps_idata0; /* DB RLPS Indirect Data, 0-7 */ + VINT32 rlps_eqvr; /* DC RLPS Equalizer Voltage Reference + * (E1 missing) */ + VINT32 _rlps_resDD; /* DD RLPS Reserved */ + VINT32 _rlps_resDE; /* DE RLPS Reserved */ + VINT32 _rlps_resDF; /* DF RLPS Reserved */ + + VINT32 prgd_ctl; /* E0 PRGD Ctl */ + VINT32 prgd_ies; /* E1 PRGD Intr Enable/Sts */ + VINT32 prgd_shift_len; /* E2 PRGD Shift Length */ + VINT32 prgd_tap; /* E3 PRGD Tap */ + VINT32 prgd_errin; /* E4 PRGD Err Insertion */ + VINT32 _prgd_resE5; /* E5 PRGD Reserved */ + VINT32 _prgd_resE6; /* E6 PRGD Reserved */ + VINT32 _prgd_resE7; /* E7 PRGD Reserved */ + VINT32 prgd_patin1; /* E8 PRGD Pattern Insertion #1 */ + VINT32 prgd_patin2; /* E9 PRGD Pattern Insertion #2 */ + VINT32 prgd_patin3; /* EA PRGD Pattern Insertion #3 */ + VINT32 prgd_patin4; /* EB PRGD Pattern Insertion #4 */ + VINT32 prgd_patdet1; /* EC PRGD Pattern Detector #1 */ + VINT32 prgd_patdet2; /* ED PRGD Pattern Detector #2 */ + VINT32 prgd_patdet3; /* EE PRGD Pattern Detector #3 */ + VINT32 prgd_patdet4; /* EF PRGD Pattern Detector #4 */ + + VINT32 xlpg_cfg; /* F0 XLPG Line Driver Cfg */ + VINT32 xlpg_ctlsts; /* F1 XLPG Ctl/Sts */ + VINT32 xlpg_pwave_addr; /* F2 XLPG + * Pulse Waveform Storage Write Addr */ + VINT32 xlpg_pwave_data; /* F3 XLPG Pulse Waveform Storage Data */ + VINT32 xlpg_atest_pctl; /* F4 XLPG Analog Test Positive Ctl */ + VINT32 xlpg_atest_nctl; /* F5 XLPG Analog Test Negative Ctl */ + VINT32 xlpg_fdata_sel; /* F6 XLPG Fuse Data Select */ + VINT32 _xlpg_resF7; /* F7 XLPG Reserved */ + + VINT32 rlps_cfgsts; /* F8 RLPS Cfg & Sts */ + VINT32 rlps_alos_thresh; /* F9 RLPS + * ALOS Detection/Clearance Threshold */ + VINT32 rlps_alos_dper; /* FA RLPS ALOS Detection Period */ + VINT32 rlps_alos_cper; /* FB RLPS ALOS Clearance Period */ + VINT32 rlps_eq_iaddr; /* FC RLPS Equalization Indirect Addr */ + VINT32 rlps_eq_rwsel; /* FD RLPS Equalization Read/WriteB Select */ + VINT32 rlps_eq_ctlsts; /* FE RLPS Equalizer Loop Sts & Ctl */ + VINT32 rlps_eq_cfg; /* FF RLPS Equalizer Cfg */ }; -typedef struct s_comet_reg comet_t; - /* 00AH: MDIAG Register bit definitions */ #define COMET_MDIAG_ID5 0x40 #define COMET_MDIAG_LBMASK 0x3F @@ -338,7 +347,7 @@ typedef struct s_comet_reg comet_t; #ifdef __KERNEL__ extern void -init_comet(void *, comet_t *, u_int32_t, int, u_int8_t); +init_comet(void *, struct s_comet_reg *, u_int32_t, int, u_int8_t); #endif #endif /* _INC_COMET_H_ */ diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c index d021b312ffa288..95218e2839662f 100644 --- a/drivers/staging/cxt1e1/functions.c +++ b/drivers/staging/cxt1e1/functions.c @@ -274,7 +274,7 @@ VMETRO_TRACE (void *x) void VMETRO_TRIGGER (ci_t *ci, int x) { - comet_t *comet; + struct s_comet_reg *comet; volatile u_int32_t data; comet = ci->port[0].cometbase; /* default to COMET # 0 */ diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c index 0ba8c3ae673bd1..7a3a30cd0f7f55 100644 --- a/drivers/staging/cxt1e1/musycc.c +++ b/drivers/staging/cxt1e1/musycc.c @@ -1,5 +1,5 @@ -unsigned int max_intcnt = 0; -unsigned int max_bh = 0; +static unsigned int max_intcnt = 0; +static unsigned int max_bh = 0; /*----------------------------------------------------------------------------- * musycc.c - diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c index 4028ea11c4423b..a9d95753be2047 100644 --- a/drivers/staging/cxt1e1/pmcc4_drv.c +++ b/drivers/staging/cxt1e1/pmcc4_drv.c @@ -194,7 +194,7 @@ checkPorts (ci_t *ci) * alarms conflicts with NCOMM's interrupt servicing implementation. */ - comet_t *comet; + struct s_comet_reg *comet; volatile u_int32_t value; u_int32_t copyVal, LEDval; @@ -507,7 +507,7 @@ c4_cleanup (void) int c4_get_portcfg (ci_t *ci) { - comet_t *comet; + struct s_comet_reg *comet; int portnum, mask; u_int32_t wdata, rdata; @@ -561,7 +561,7 @@ c4_init (ci_t *ci, u_char *func0, u_char *func1) for (portnum = 0; portnum < MUSYCC_NPORTS; portnum++) { pi = &ci->port[portnum]; - pi->cometbase = (comet_t *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum))); + pi->cometbase = (struct s_comet_reg *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum))); pi->reg = (struct musycc_globalr *) ((u_char *) ci->reg + (portnum * 0x800)); pi->portnum = portnum; pi->p.portnum = portnum; @@ -693,7 +693,7 @@ c4_init2 (ci_t *ci) int c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd) { - comet_t *comet; + struct s_comet_reg *comet; volatile u_int32_t loopValue; comet = ci->port[portnum].cometbase; @@ -752,7 +752,7 @@ c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd) status_t c4_frame_rw (ci_t *ci, struct sbecom_port_param *pp) { - comet_t *comet; + struct s_comet_reg *comet; volatile u_int32_t data; if (pp->portnum >= ci->max_port)/* sanity check */ diff --git a/drivers/staging/cxt1e1/pmcc4_private.h b/drivers/staging/cxt1e1/pmcc4_private.h index b2b6e3702630a7..7edbd4e492e39a 100644 --- a/drivers/staging/cxt1e1/pmcc4_private.h +++ b/drivers/staging/cxt1e1/pmcc4_private.h @@ -133,7 +133,7 @@ struct c4_port_info void *regram_saved; /* Original malloc value may have non-2KB * boundary. Need to save for use when * freeing. */ - comet_t *cometbase; + struct s_comet_reg *cometbase; struct sbe_card_info *up; /* diff --git a/drivers/staging/cxt1e1/sbeid.c b/drivers/staging/cxt1e1/sbeid.c index 6ec51bccceb118..97c5c6e7e2996b 100644 --- a/drivers/staging/cxt1e1/sbeid.c +++ b/drivers/staging/cxt1e1/sbeid.c @@ -20,190 +20,185 @@ #include "sbe_bid.h" char * -sbeid_get_bdname (ci_t *ci) +sbeid_get_bdname(ci_t *ci) { - char *np = NULL; + char *np = NULL; - switch (ci->brd_id) - { - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1): - np = "wanPTMC-256T3 "; - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1): - np = "wanPTMC-256T3 "; - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1): - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L): - np = "wanPMC-C4T1E1"; - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1): - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L): - np = "wanPMC-C2T1E1"; - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1): - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L): - np = "wanPMC-C1T1E1"; - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1): - np = "wanPCI-C4T1E1"; - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1): - np = "wanPCI-C2T1E1"; - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1): - np = "wanPCI-C1T1E1"; - break; - default: - /*** np = ""; ***/ - np = "wanPCI-CxT1E1"; - break; - } + switch (ci->brd_id) { + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1): + np = "wanPTMC-256T3 "; + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1): + np = "wanPTMC-256T3 "; + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1): + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L): + np = "wanPMC-C4T1E1"; + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1): + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L): + np = "wanPMC-C2T1E1"; + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1): + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L): + np = "wanPMC-C1T1E1"; + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1): + np = "wanPCI-C4T1E1"; + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1): + np = "wanPCI-C2T1E1"; + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1): + np = "wanPCI-C1T1E1"; + break; + default: + /*** np = ""; ***/ + np = "wanPCI-CxT1E1"; + break; + } - return np; + return np; } /* given the presetting of brd_id, set the corresponding hdw_id */ void -sbeid_set_hdwbid (ci_t *ci) +sbeid_set_hdwbid(ci_t *ci) { - /* - * set SBE's unique hardware identification (for legacy boards might not - * have this register implemented) - */ + /* + * set SBE's unique hardware identification (for legacy boards might not + * have this register implemented) + */ - switch (ci->brd_id) - { - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1): - ci->hdw_bid = SBE_BID_256T3_E1; /* 0x46 - SBE wanPTMC-256T3 (E1 - * Version) */ - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1): - ci->hdw_bid = SBE_BID_256T3_T1; /* 0x42 - SBE wanPTMC-256T3 (T1 - * Version) */ - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1): - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L): - /* - * This Board ID is a generic identification. Use the found number - * of ports to further define this hardware. - */ - switch (ci->max_port) - { - default: /* shouldn't need a default, but have one - * anyway */ - case 4: - ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0xC4 - SBE wanPMC-C4T1E1 */ - break; - case 2: - ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1); - break; - case 1: - ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1); - break; - } - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1): - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L): - ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */ - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1): - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L): - ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */ - break; + switch (ci->brd_id) { + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1): + ci->hdw_bid = SBE_BID_256T3_E1; /* 0x46 - SBE wanPTMC-256T3 (E1 + * Version) */ + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1): + ci->hdw_bid = SBE_BID_256T3_T1; /* 0x42 - SBE wanPTMC-256T3 (T1 + * Version) */ + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1): + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L): + /* + * This Board ID is a generic identification. Use the found number + * of ports to further define this hardware. + */ + switch (ci->max_port) { + default: /* shouldn't need a default, but have one + * anyway */ + case 4: + ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0xC4 - SBE wanPMC-C4T1E1 */ + break; + case 2: + ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1); + break; + case 1: + ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1); + break; + } + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1): + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L): + ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */ + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1): + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L): + ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */ + break; #ifdef SBE_PMCC4_ENABLE - /* - * This case is entered as a result of the inability to obtain the - * from the board's EEPROM. Assume a PCI board and set - * according to the number ofr found ports. - */ - case 0: - /* start by assuming 4-port for ZERO casing */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1); - /* drop thru to set hdw_bid and alternate PCI CxT1E1 settings */ + /* + * This case is entered as a result of the inability to obtain the + * from the board's EEPROM. Assume a PCI board and set + * according to the number ofr found ports. + */ + case 0: + /* start by assuming 4-port for ZERO casing */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1); + /* drop thru to set hdw_bid and alternate PCI CxT1E1 settings */ #endif - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1): - /* - * This Board ID is a generic identification. Use the number of - * found ports to further define this hardware. - */ - switch (ci->max_port) - { - default: /* shouldn't need a default, but have one - * anyway */ - case 4: - ci->hdw_bid = SBE_BID_PCI_C4T1E1; /* 0x04 - SBE wanPCI-C4T1E1 */ - break; - case 2: - ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1); - break; - case 1: - ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1); - break; - } - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1): - ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */ - break; - case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1): - ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */ - break; - default: - /*** bid = ""; ***/ - ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0x41 - SBE wanPTMC-C4T1E1 */ - break; - } + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1): + /* + * This Board ID is a generic identification. Use the number of + * found ports to further define this hardware. + */ + switch (ci->max_port) { + default: /* shouldn't need a default, but have one + * anyway */ + case 4: + ci->hdw_bid = SBE_BID_PCI_C4T1E1; /* 0x04 - SBE wanPCI-C4T1E1 */ + break; + case 2: + ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1); + break; + case 1: + ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1); + break; + } + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1): + ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */ + break; + case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1): + ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */ + break; + default: + /*** bid = ""; ***/ + ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0x41 - SBE wanPTMC-C4T1E1 */ + break; + } } /* given the presetting of hdw_bid, set the corresponding brd_id */ void -sbeid_set_bdtype (ci_t *ci) +sbeid_set_bdtype(ci_t *ci) { - /* set SBE's unique PCI VENDOR/DEVID */ - switch (ci->hdw_bid) - { - case SBE_BID_C1T3: /* SBE wanPMC-C1T3 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3); - break; - case SBE_BID_C24TE1: /* SBE wanPTMC-C24TE1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1); - break; - case SBE_BID_256T3_E1: /* SBE wanPTMC-256T3 E1 Version */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1); - break; - case SBE_BID_256T3_T1: /* SBE wanPTMC-256T3 T1 Version */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1); - break; - case SBE_BID_PMC_C4T1E1: /* 0xC4 - SBE wanPMC-C4T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1); - break; - case SBE_BID_PMC_C2T1E1: /* 0xC2 - SBE wanPMC-C2T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1); - break; - case SBE_BID_PMC_C1T1E1: /* 0xC1 - SBE wanPMC-C1T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1); - break; - case SBE_BID_PCI_C4T1E1: /* 0x04 - SBE wanPCI-C4T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1); - break; - case SBE_BID_PCI_C2T1E1: /* 0x02 - SBE wanPCI-C2T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1); - break; - case SBE_BID_PCI_C1T1E1: /* 0x01 - SBE wanPCI-C1T1E1 */ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1); - break; + /* set SBE's unique PCI VENDOR/DEVID */ + switch (ci->hdw_bid) { + case SBE_BID_C1T3: /* SBE wanPMC-C1T3 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3); + break; + case SBE_BID_C24TE1: /* SBE wanPTMC-C24TE1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1); + break; + case SBE_BID_256T3_E1: /* SBE wanPTMC-256T3 E1 Version */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1); + break; + case SBE_BID_256T3_T1: /* SBE wanPTMC-256T3 T1 Version */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1); + break; + case SBE_BID_PMC_C4T1E1: /* 0xC4 - SBE wanPMC-C4T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1); + break; + case SBE_BID_PMC_C2T1E1: /* 0xC2 - SBE wanPMC-C2T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1); + break; + case SBE_BID_PMC_C1T1E1: /* 0xC1 - SBE wanPMC-C1T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1); + break; + case SBE_BID_PCI_C4T1E1: /* 0x04 - SBE wanPCI-C4T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1); + break; + case SBE_BID_PCI_C2T1E1: /* 0x02 - SBE wanPCI-C2T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1); + break; + case SBE_BID_PCI_C1T1E1: /* 0x01 - SBE wanPCI-C1T1E1 */ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1); + break; - default: - /*** hdw_bid = ""; ***/ - ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1); - break; - } + default: + /*** hdw_bid = ""; ***/ + ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1); + break; + } } diff --git a/drivers/staging/dgap/dgap_conf.h b/drivers/staging/dgap/dgap_conf.h index 88097013ed0485..484ed726a4d6dd 100644 --- a/drivers/staging/dgap/dgap_conf.h +++ b/drivers/staging/dgap/dgap_conf.h @@ -138,7 +138,7 @@ #define CU 91 #define PRINT 92 #define XPRINT 93 -#define CMAJOR 94 +#define CMAJOR 94 #define ALTPIN 95 #define STARTO 96 #define USEINTR 97 @@ -262,9 +262,9 @@ struct cnode { } module; char *ttyname; - + char *cuname; - + char *printname; int majornumber; diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c index 4c1515ee56e58a..089d017fc2915a 100644 --- a/drivers/staging/dgap/dgap_driver.c +++ b/drivers/staging/dgap/dgap_driver.c @@ -506,7 +506,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id) /* get the board structure and prep it */ brd = dgap_Board[dgap_NumBoards] = - (struct board_t *) dgap_driver_kzmalloc(sizeof(struct board_t), GFP_KERNEL); + (struct board_t *) kzalloc(sizeof(struct board_t), GFP_KERNEL); if (!brd) { APR(("memory allocation for board structure failed\n")); return(-ENOMEM); @@ -514,7 +514,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id) /* make a temporary message buffer for the boot messages */ brd->msgbuf = brd->msgbuf_head = - (char *) dgap_driver_kzmalloc(sizeof(char) * 8192, GFP_KERNEL); + (char *) kzalloc(sizeof(char) * 8192, GFP_KERNEL); if(!brd->msgbuf) { kfree(brd); APR(("memory allocation for board msgbuf failed\n")); @@ -924,20 +924,6 @@ static void dgap_init_globals(void) ************************************************************************/ -/* - * dgap_driver_kzmalloc() - * - * Malloc and clear memory, - */ -void *dgap_driver_kzmalloc(size_t size, int priority) -{ - void *p = kmalloc(size, priority); - if(p) - memset(p, 0, size); - return(p); -} - - /* * dgap_mbuf() * diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h index 7d631e80c00e88..2f7a55a7e40daa 100644 --- a/drivers/staging/dgap/dgap_driver.h +++ b/drivers/staging/dgap/dgap_driver.h @@ -578,7 +578,6 @@ struct channel_t { *************************************************************************/ extern int dgap_ms_sleep(ulong ms); -extern void *dgap_driver_kzmalloc(size_t size, int priority); extern char *dgap_ioctl_name(int cmd); extern void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len); extern void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len); diff --git a/drivers/staging/dgap/dgap_fep5.c b/drivers/staging/dgap/dgap_fep5.c index 794cf9db8b8308..f75831a422e8a7 100644 --- a/drivers/staging/dgap/dgap_fep5.c +++ b/drivers/staging/dgap/dgap_fep5.c @@ -6,16 +6,6 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the - * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - * PURPOSE. See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * * * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE! * @@ -39,6 +29,7 @@ #include /* For copy_from_user/copy_to_user */ #include #include /* For tty_schedule_flip */ +#include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) #include @@ -75,7 +66,7 @@ void dgap_do_config_load(uchar __user *uaddr, int len) char buf[U2BSIZE]; int n; - to_addr = dgap_config_buf = dgap_driver_kzmalloc(len + 1, GFP_ATOMIC); + to_addr = dgap_config_buf = kzalloc(len + 1, GFP_ATOMIC); if (!dgap_config_buf) { DPR_INIT(("dgap_do_config_load - unable to allocate memory for file\n")); dgap_driver_state = DRIVER_NEED_CONFIG_LOAD; @@ -99,7 +90,7 @@ void dgap_do_config_load(uchar __user *uaddr, int len) to_addr += n; from_addr += n; n = U2BSIZE; - } + } dgap_config_buf[orig_len] = '\0'; @@ -130,8 +121,8 @@ int dgap_after_config_loaded(void) /* * allocate flip buffer for board. */ - dgap_Board[i]->flipbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC); - dgap_Board[i]->flipflagbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC); + dgap_Board[i]->flipbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC); + dgap_Board[i]->flipflagbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC); } return rc; @@ -166,9 +157,9 @@ static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *fro /* increment counts */ len -= n; to_addr += n; - from_addr += n; + from_addr += n; n = U2BSIZE; - } + } return 0; } @@ -195,7 +186,7 @@ void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len) */ for (i = 0; i < 16; i++) writeb(0, addr + POSTAREA + i); - + /* * Download bios */ @@ -364,7 +355,7 @@ static void dgap_do_reset_board(struct board_t *brd) int i = 0; if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase || !brd->re_map_port) { - DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n", + DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n", brd, brd ? brd->re_map_membase : 0, brd ? brd->re_map_port : 0)); return; } @@ -470,7 +461,7 @@ static void dgap_get_vpd(struct board_t *brd) /* * To get to the OTPROM memory, we have to send the boards base - * address or'ed with 1 into the PCI Rom Address location. + * address or'ed with 1 into the PCI Rom Address location. */ magic = brd->membase | 0x01; pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic); @@ -492,7 +483,7 @@ static void dgap_get_vpd(struct board_t *brd) * for the VPD offset. */ while (base_offset <= EXPANSION_ROM_SIZE) { - + /* * Lots of magic numbers here. * @@ -551,7 +542,7 @@ static void dgap_get_vpd(struct board_t *brd) */ void dgap_poll_tasklet(unsigned long data) { - struct board_t *bd = (struct board_t *) data; + struct board_t *bd = (struct board_t *) data; ulong lock_flags; ulong lock_flags2; char *vaddr; @@ -816,13 +807,13 @@ void dgap_poll_tasklet(unsigned long data) * *=======================================================================*/ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds) -{ +{ char *vaddr = NULL; struct cm_t *cm_addr = NULL; uint count; uint n; u16 head; - u16 tail; + u16 tail; if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) return; @@ -833,7 +824,7 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n if (ch->ch_bd->state == BOARD_FAILED) { DPR_CORE(("%s:%d board is in failed state.\n", __FILE__, __LINE__)); return; - } + } /* * Make sure the pointers are in range before @@ -847,13 +838,13 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n cm_addr = (struct cm_t *) (vaddr + CMDBUF); head = readw(&(cm_addr->cm_head)); - /* + /* * Forget it if pointers out of range. */ if (head >= (CMDMAX - CMDSTART) || (head & 03)) { DPR_CORE(("%s:%d pointers out of range, failing board!\n", __FILE__, __LINE__)); ch->ch_bd->state = BOARD_FAILED; - return; + return; } /* @@ -869,7 +860,7 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n writew(head, &(cm_addr->cm_head)); /* - * Wait if necessary before updating the head + * Wait if necessary before updating the head * pointer to limit the number of outstanding * commands to the FEP. If the time spent waiting * is outlandish, declare the FEP dead. @@ -890,14 +881,14 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n return; } udelay(10); - } + } } /*======================================================================= * * dgap_cmdw - Sends a 1 word command to the FEP. - * + * * ch - Pointer to channel structure. * cmd - Command to be sent. * word - Integer containing word to be sent. @@ -936,7 +927,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds) cm_addr = (struct cm_t *) (vaddr + CMDBUF); head = readw(&(cm_addr->cm_head)); - /* + /* * Forget it if pointers out of range. */ if (head >= (CMDMAX - CMDSTART) || (head & 03)) { @@ -958,7 +949,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds) /* * Wait if necessary before updating the head - * pointer to limit the number of outstanding + * pointer to limit the number of outstanding * commands to the FEP. If the time spent waiting * is outlandish, declare the FEP dead. */ @@ -978,7 +969,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds) return; } udelay(10); - } + } } @@ -986,7 +977,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds) /*======================================================================= * * dgap_cmdw_ext - Sends a extended word command to the FEP. - * + * * ch - Pointer to channel structure. * cmd - Command to be sent. * word - Integer containing word to be sent. @@ -1025,7 +1016,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds) cm_addr = (struct cm_t *) (vaddr + CMDBUF); head = readw(&(cm_addr->cm_head)); - /* + /* * Forget it if pointers out of range. */ if (head >= (CMDMAX - CMDSTART) || (head & 03)) { @@ -1060,7 +1051,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds) /* * Wait if necessary before updating the head - * pointer to limit the number of outstanding + * pointer to limit the number of outstanding * commands to the FEP. If the time spent waiting * is outlandish, declare the FEP dead. */ @@ -1080,7 +1071,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds) return; } udelay(10); - } + } } @@ -1102,7 +1093,7 @@ void dgap_wmove(struct channel_t *ch, char *buf, uint cnt) if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) return; - + /* * Check parameters. */ @@ -1172,9 +1163,9 @@ uint dgap_get_custom_baud(struct channel_t *ch) /* * Go get from fep mem, what the fep - * believes the custom baud rate is. + * believes the custom baud rate is. */ - offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) + + offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) + (ch->ch_portnum * 0x28) + LINE_SPEED)); value = readw(vaddr + offset); @@ -1210,7 +1201,7 @@ void dgap_firmware_reset_port(struct channel_t *ch) /*======================================================================= - * + * * dgap_param - Set Digi parameters. * * struct tty_struct * - TTY for port. @@ -1244,7 +1235,7 @@ int dgap_param(struct tty_struct *tty) if (!bd || bd->magic != DGAP_BOARD_MAGIC) return -ENXIO; - bs = ch->ch_bs; + bs = ch->ch_bs; if (!bs) return -ENXIO; @@ -1284,13 +1275,13 @@ int dgap_param(struct tty_struct *tty) /* * Now go get from fep mem, what the fep - * believes the custom baud rate is. + * believes the custom baud rate is. */ ch->ch_baud_info = ch->ch_custom_speed = dgap_get_custom_baud(ch); DPR_PARAM(("param: Got %d speed\n", ch->ch_custom_speed)); - /* Handle transition from B0 */ + /* Handle transition from B0 */ if (ch->ch_flags & CH_BAUD0) { ch->ch_flags &= ~(CH_BAUD0); ch->ch_mval |= (D_RTS(ch)|D_DTR(ch)); @@ -1352,7 +1343,7 @@ int dgap_param(struct tty_struct *tty) baud = 0; } - if (baud == 0) + if (baud == 0) baud = 9600; ch->ch_baud_info = baud; @@ -1425,7 +1416,7 @@ int dgap_param(struct tty_struct *tty) dgap_cmdw(ch, SCFLAG, (u16) cflag, 0); } - /* Handle transition from B0 */ + /* Handle transition from B0 */ if (ch->ch_flags & CH_BAUD0) { ch->ch_flags &= ~(CH_BAUD0); ch->ch_mval |= (D_RTS(ch)|D_DTR(ch)); @@ -1475,7 +1466,7 @@ int dgap_param(struct tty_struct *tty) if (ch->ch_digi.digi_flags & RTSPACE) hflow |= D_RTS(ch); if (ch->ch_digi.digi_flags & DTRPACE) - hflow |= D_DTR(ch); + hflow |= D_DTR(ch); if (ch->ch_digi.digi_flags & CTSPACE) hflow |= D_CTS(ch); if (ch->ch_digi.digi_flags & DSRPACE) @@ -1488,7 +1479,7 @@ int dgap_param(struct tty_struct *tty) /* Okay to have channel and board locks held calling this */ dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0); - } + } /* @@ -1507,7 +1498,7 @@ int dgap_param(struct tty_struct *tty) } /* - * Set modem control lines. + * Set modem control lines. */ mval ^= ch->ch_mforce & (mval ^ ch->ch_mval); @@ -1524,12 +1515,12 @@ int dgap_param(struct tty_struct *tty) } /* - * Read modem signals, and then call carrier function. + * Read modem signals, and then call carrier function. */ ch->ch_mistat = readb(&(bs->m_stat)); dgap_carrier(ch); - /* + /* * Set the start and stop characters. */ if (ch->ch_startc != ch->ch_fepstartc || ch->ch_stopc != ch->ch_fepstopc) { @@ -1542,7 +1533,7 @@ int dgap_param(struct tty_struct *tty) /* * Set the Auxiliary start and stop characters. - */ + */ if (ch->ch_astartc != ch->ch_fepastartc || ch->ch_astopc != ch->ch_fepastopc) { ch->ch_fepastartc = ch->ch_astartc; ch->ch_fepastopc = ch->ch_astopc; @@ -1609,7 +1600,7 @@ void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char * } else { /* save value examination in next state */ ch->pscan_savechar = c; - ch->pscan_state = 2; + ch->pscan_state = 2; } break; @@ -1637,7 +1628,7 @@ void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char * count += 1; ch->pscan_state = 0; - } + } } *len = count; DPR_PSCAN(("dgap_parity_scan finish\n")); @@ -1721,9 +1712,8 @@ static int dgap_event(struct board_t *bd) /* * Make sure the interrupt is valid. */ - if ( port >= bd->nasync) { + if (port >= bd->nasync) goto next; - } if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA))) { goto next; @@ -1779,7 +1769,7 @@ static int dgap_event(struct board_t *bd) } /* - * Process Modem change signals. + * Process Modem change signals. */ if (reason & IFMODEM) { ch->ch_mistat = modem; @@ -1813,7 +1803,7 @@ static int dgap_event(struct board_t *bd) ch->ch_tun.un_flags &= ~UN_LOW; if (ch->ch_tun.un_flags & UN_ISOPEN) { - if ((ch->ch_tun.un_tty->flags & + if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) ch->ch_tun.un_tty->ldisc->ops->write_wakeup) @@ -1841,7 +1831,7 @@ static int dgap_event(struct board_t *bd) if (ch->ch_pun.un_flags & UN_LOW) { ch->ch_pun.un_flags &= ~UN_LOW; if (ch->ch_pun.un_flags & UN_ISOPEN) { - if ((ch->ch_pun.un_tty->flags & + if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) ch->ch_pun.un_tty->ldisc->ops->write_wakeup) @@ -1879,7 +1869,7 @@ static int dgap_event(struct board_t *bd) if (ch->ch_tun.un_flags & UN_EMPTY) { ch->ch_tun.un_flags &= ~UN_EMPTY; if (ch->ch_tun.un_flags & UN_ISOPEN) { - if ((ch->ch_tun.un_tty->flags & + if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) ch->ch_tun.un_tty->ldisc->ops->write_wakeup) @@ -1905,7 +1895,7 @@ static int dgap_event(struct board_t *bd) if (ch->ch_pun.un_flags & UN_EMPTY) { ch->ch_pun.un_flags &= ~UN_EMPTY; if (ch->ch_pun.un_flags & UN_ISOPEN) { - if ((ch->ch_pun.un_tty->flags & + if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) ch->ch_pun.un_tty->ldisc->ops->write_wakeup) @@ -1945,4 +1935,4 @@ static int dgap_event(struct board_t *bd) DGAP_UNLOCK(bd->bd_lock, lock_flags); return 0; -} +} diff --git a/drivers/staging/dgap/dgap_parse.c b/drivers/staging/dgap/dgap_parse.c index ff9d19449b43c1..36fd93d3f5f632 100644 --- a/drivers/staging/dgap/dgap_parse.c +++ b/drivers/staging/dgap/dgap_parse.c @@ -42,6 +42,7 @@ #include "dgap_types.h" #include "dgap_fep5.h" #include "dgap_driver.h" +#include "dgap_parse.h" #include "dgap_conf.h" diff --git a/drivers/staging/dgap/dgap_trace.c b/drivers/staging/dgap/dgap_trace.c index 0f9a9569ea2772..a53db9e0a5776f 100644 --- a/drivers/staging/dgap/dgap_trace.c +++ b/drivers/staging/dgap/dgap_trace.c @@ -17,15 +17,15 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * - * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE! + * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE! * * This is shared code between Digi's CVS archive and the * Linux Kernel sources. * Changing the source just for reformatting needlessly breaks * our CVS diff history. * - * Send any bug fixes/changes to: Eng.Linux at digi dot com. - * Thank you. + * Send any bug fixes/changes to: Eng.Linux at digi dot com. + * Thank you. * */ @@ -37,6 +37,7 @@ #include #include "dgap_driver.h" +#include "dgap_trace.h" #define TRC_TO_CONSOLE 1 @@ -107,16 +108,16 @@ void dgap_tracef(const char *fmt, ...) dgap_trcbufi = 0; initd++; - printk("dgap: tracing enabled - " TRC_DTRC + printk("dgap: tracing enabled - " TRC_DTRC " 0x%lx 0x%x\n", - (unsigned long)dgap_trcbuf, + (unsigned long)dgap_trcbuf, dgap_trcbuf_size); } # if defined(TRC_ON_OVERFLOW_WRAP_AROUND) /* * This is the less CPU-intensive way to do things. We simply - * wrap around before we fall off the end of the buffer. A + * wrap around before we fall off the end of the buffer. A * tilde (~) demarcates the current end of the trace. * * This method should be used if you are concerned about race @@ -131,14 +132,14 @@ void dgap_tracef(const char *fmt, ...) dgap_trcbufi = 0; } - strcpy(&dgap_trcbuf[dgap_trcbufi], buf); + strcpy(&dgap_trcbuf[dgap_trcbufi], buf); dgap_trcbufi += lenbuf; dgap_trcbuf[dgap_trcbufi] = '~'; # elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER) /* * This is the more CPU-intensive way to do things. If we - * venture into the last 1/8 of the buffer, we shift the + * venture into the last 1/8 of the buffer, we shift the * last 7/8 of the buffer forward, wiping out the first 1/8. * Advantage: No wrap-around, only truncation from the * beginning. diff --git a/drivers/staging/dgap/dgap_tty.c b/drivers/staging/dgap/dgap_tty.c index 2a7a37298da445..39fb4dfb8b7e1c 100644 --- a/drivers/staging/dgap/dgap_tty.c +++ b/drivers/staging/dgap/dgap_tty.c @@ -17,22 +17,22 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * - * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE! + * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE! * * This is shared code between Digi's CVS archive and the * Linux Kernel sources. * Changing the source just for reformatting needlessly breaks * our CVS diff history. * - * Send any bug fixes/changes to: Eng.Linux at digi dot com. - * Thank you. + * Send any bug fixes/changes to: Eng.Linux at digi dot com. + * Thank you. */ /************************************************************************ - * + * * This file implements the tty driver functionality for the * FEP5 based product lines. - * + * ************************************************************************ * * $Id: dgap_tty.c,v 1.3 2011/06/23 12:11:31 markh Exp $ @@ -155,7 +155,7 @@ static const struct tty_operations dgap_tty_ops = { .flush_chars = dgap_tty_flush_chars, .ioctl = dgap_tty_ioctl, .set_termios = dgap_tty_set_termios, - .stop = dgap_tty_stop, + .stop = dgap_tty_stop, .start = dgap_tty_start, .throttle = dgap_tty_throttle, .unthrottle = dgap_tty_unthrottle, @@ -173,11 +173,11 @@ static const struct tty_operations dgap_tty_ops = { /************************************************************************ - * + * * TTY Initialization/Cleanup Functions - * + * ************************************************************************/ - + /* * dgap_tty_preinit() * @@ -187,7 +187,7 @@ int dgap_tty_preinit(void) { unsigned long flags; - DGAP_LOCK(dgap_global_lock, flags); + DGAP_LOCK(dgap_global_lock, flags); /* * Allocate a buffer for doing the copy from user space to @@ -202,7 +202,7 @@ int dgap_tty_preinit(void) DPR_INIT(("unable to allocate tmp write buf")); return (-ENOMEM); } - + DGAP_UNLOCK(dgap_global_lock, flags); return(0); } @@ -226,14 +226,14 @@ int dgap_tty_register(struct board_t *brd) brd->SerialDriver->name_base = 0; brd->SerialDriver->major = 0; brd->SerialDriver->minor_start = 0; - brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL; - brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL; + brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL; + brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL; brd->SerialDriver->init_termios = DgapDefaultTermios; brd->SerialDriver->driver_name = DRVSTR; brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK); /* The kernel wants space to store pointers to tty_structs */ - brd->SerialDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL); + brd->SerialDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL); if (!brd->SerialDriver->ttys) return(-ENOMEM); @@ -259,14 +259,14 @@ int dgap_tty_register(struct board_t *brd) brd->PrintDriver->name_base = 0; brd->PrintDriver->major = 0; brd->PrintDriver->minor_start = 0; - brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL; + brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL; brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL; brd->PrintDriver->init_termios = DgapDefaultTermios; brd->PrintDriver->driver_name = DRVSTR; brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK); /* The kernel wants space to store pointers to tty_structs */ - brd->PrintDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL); + brd->PrintDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL); if (!brd->PrintDriver->ttys) return(-ENOMEM); @@ -380,7 +380,7 @@ int dgap_tty_init(struct board_t *brd) */ for (i = 0; i < brd->nasync; i++) { if (!brd->channels[i]) { - brd->channels[i] = dgap_driver_kzmalloc(sizeof(struct channel_t), GFP_ATOMIC); + brd->channels[i] = kzalloc(sizeof(struct channel_t), GFP_ATOMIC); if (!brd->channels[i]) { DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n", __FILE__, __LINE__)); @@ -450,7 +450,7 @@ int dgap_tty_init(struct board_t *brd) /* * Set queue water marks, interrupt mask, - * and general tty parameters. + * and general tty parameters. */ ch->ch_tlw = tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : ch->ch_tsize / 2; @@ -479,7 +479,7 @@ int dgap_tty_init(struct board_t *brd) writew(0, &(ch->ch_bs->edelay)); else writew(100, &(ch->ch_bs->edelay)); - + writeb(1, &(ch->ch_bs->idata)); } @@ -506,7 +506,7 @@ void dgap_tty_post_uninit(void) * dgap_tty_uninit() * * Uninitialize the TTY portion of this driver. Free all memory and - * resources. + * resources. */ void dgap_tty_uninit(struct board_t *brd) { @@ -611,7 +611,7 @@ static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *b if (n == 0) { return; } - + /* * Copy as much data as will fit. */ @@ -661,9 +661,9 @@ static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *b /*======================================================================= * * dgap_input - Process received data. - * + * * ch - Pointer to channel structure. - * + * *=======================================================================*/ void dgap_input(struct channel_t *ch) @@ -704,8 +704,8 @@ void dgap_input(struct channel_t *ch) DGAP_LOCK(bd->bd_lock, lock_flags); DGAP_LOCK(ch->ch_lock, lock_flags2); - /* - * Figure the number of characters in the buffer. + /* + * Figure the number of characters in the buffer. * Exit immediately if none. */ @@ -775,13 +775,13 @@ void dgap_input(struct channel_t *ch) len = min(len, (N_TTY_BUF_SIZE - 1)); ld = tty_ldisc_ref(tp); - + #ifdef TTY_DONT_FLIP /* * If the DONT_FLIP flag is on, don't flush our buffer, and act - * like the ld doesn't have any space to put the data right now. + * like the ld doesn't have any space to put the data right now. */ - if (test_bit(TTY_DONT_FLIP, &tp->flags)) + if (test_bit(TTY_DONT_FLIP, &tp->flags)) len = 0; #endif @@ -879,9 +879,9 @@ void dgap_input(struct channel_t *ch) } -/************************************************************************ +/************************************************************************ * Determines when CARRIER changes state and takes appropriate - * action. + * action. ************************************************************************/ void dgap_carrier(struct channel_t *ch) { @@ -889,7 +889,7 @@ void dgap_carrier(struct channel_t *ch) int virt_carrier = 0; int phys_carrier = 0; - + DPR_CARR(("dgap_carrier called...\n")); if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) @@ -917,11 +917,11 @@ void dgap_carrier(struct channel_t *ch) if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) { virt_carrier = 1; - } + } if (ch->ch_c_cflag & CLOCAL) { virt_carrier = 1; - } + } DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier)); @@ -968,7 +968,7 @@ void dgap_carrier(struct channel_t *ch) * "make pretend that carrier is there". */ if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) && - (phys_carrier == 0)) + (phys_carrier == 0)) { /* @@ -991,7 +991,7 @@ void dgap_carrier(struct channel_t *ch) tty_hangup(ch->ch_tun.un_tty); } - if (ch->ch_pun.un_open_count > 0) { + if (ch->ch_pun.un_open_count > 0) { DPR_CARR(("Sending pr hangup\n")); tty_hangup(ch->ch_pun.un_tty); } @@ -1002,7 +1002,7 @@ void dgap_carrier(struct channel_t *ch) */ if (virt_carrier == 1) ch->ch_flags |= CH_FCAR; - else + else ch->ch_flags &= ~CH_FCAR; if (phys_carrier == 1) @@ -1013,9 +1013,9 @@ void dgap_carrier(struct channel_t *ch) /************************************************************************ - * + * * TTY Entry points and helper functions - * + * ************************************************************************/ /* @@ -1165,7 +1165,7 @@ static int dgap_tty_open(struct tty_struct *tty, struct file *file) */ dgap_param(tty); - /* + /* * follow protocol for opening port */ @@ -1195,13 +1195,13 @@ static int dgap_tty_open(struct tty_struct *tty, struct file *file) } -/* +/* * dgap_block_til_ready() * * Wait for DCD, if needed. */ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch) -{ +{ int retval = 0; struct un_t *un = NULL; ulong lock_flags; @@ -1246,7 +1246,7 @@ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struc * If either unit is in the middle of the fragile part of close, * we just cannot touch the channel safely. * Go back to sleep, knowing that when the channel can be - * touched safely, the close routine will signal the + * touched safely, the close routine will signal the * ch_wait_flags to wake us back up. */ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) { @@ -1354,7 +1354,7 @@ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struc * dgap_tty_hangup() * * Hangup the port. Like a close, but don't wait for output to drain. - */ + */ static void dgap_tty_hangup(struct tty_struct *tty) { struct board_t *bd; @@ -1436,7 +1436,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file) */ APR(("tty->count is 1, un open count is %d\n", un->un_open_count)); un->un_open_count = 1; - } + } if (--un->un_open_count < 0) { APR(("bad serial port open count of %d\n", un->un_open_count)); @@ -1497,7 +1497,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file) dgap_cmdb( ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0 ); /* - * Go to sleep to ensure RTS/DTR + * Go to sleep to ensure RTS/DTR * have been dropped for modems to see it. */ if (ch->ch_close_delay) { @@ -1535,7 +1535,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file) wake_up_interruptible(&un->un_flags_wait); DGAP_UNLOCK(ch->ch_lock, lock_flags); - + DPR_BASIC(("dgap_tty_close - complete\n")); } @@ -1637,7 +1637,7 @@ static int dgap_tty_chars_in_buffer(struct tty_struct *tty) } } - DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n", + DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n", ch->ch_portnum, chars, thead, ttail, ch->ch_tsize)); return(chars); } @@ -1702,14 +1702,14 @@ static int dgap_wait_for_drain(struct tty_struct *tty) } -/* +/* * dgap_maxcps_room * * Reduces bytes_available to the max number of characters * that can be sent currently given the maxcps value, and * returns the new bytes_available. This only affects printer * output. - */ + */ static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available) { struct channel_t *ch = NULL; @@ -1750,7 +1750,7 @@ static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available) } else { /* no room in the buffer */ - cps_limit = 0; + cps_limit = 0; } bytes_available = min(cps_limit, bytes_available); @@ -1793,7 +1793,7 @@ static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event) * dgap_tty_write_room() * * Return space available in Tx buffer - */ + */ static int dgap_tty_write_room(struct tty_struct *tty) { struct channel_t *ch = NULL; @@ -1831,7 +1831,7 @@ static int dgap_tty_write_room(struct tty_struct *tty) ret = dgap_maxcps_room(tty, ret); /* - * If we are printer device, leave space for + * If we are printer device, leave space for * possibly both the on and off strings. */ if (un->un_type == DGAP_PRINT) { @@ -1856,7 +1856,7 @@ static int dgap_tty_write_room(struct tty_struct *tty) */ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY); DGAP_UNLOCK(ch->ch_lock, lock_flags); - + DPR_WRITE(("dgap_tty_write_room - %d tail: %d head: %d\n", ret, tail, head)); return(ret); @@ -1867,7 +1867,7 @@ static int dgap_tty_write_room(struct tty_struct *tty) * dgap_tty_put_char() * * Put a character into ch->ch_buf - * + * * - used by the line discipline for OPOST processing */ static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c) @@ -2094,7 +2094,7 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int if (from_user) { DGAP_UNLOCK(ch->ch_lock, lock_flags); up(&dgap_TmpWriteSem); - } + } else { DGAP_UNLOCK(ch->ch_lock, lock_flags); } @@ -2206,12 +2206,12 @@ static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file, if (set & TIOCM_RTS) { ch->ch_mforce |= D_RTS(ch); ch->ch_mval |= D_RTS(ch); - } + } if (set & TIOCM_DTR) { ch->ch_mforce |= D_DTR(ch); ch->ch_mval |= D_DTR(ch); - } + } if (clear & TIOCM_RTS) { ch->ch_mforce |= D_RTS(ch); @@ -2316,7 +2316,7 @@ static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout) /* * dgap_send_xchar() - * + * * send a high priority character, called by ld. */ static void dgap_tty_send_xchar(struct tty_struct *tty, char c) @@ -2529,7 +2529,7 @@ static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, uns /* - * dgap_tty_digigeta() + * dgap_tty_digigeta() * * Ioctl to get the information for ditty. * @@ -2571,7 +2571,7 @@ static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retin /* - * dgap_tty_digiseta() + * dgap_tty_digiseta() * * Ioctl to set the information for ditty. * @@ -2614,10 +2614,10 @@ static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t)); - if (ch->ch_digi.digi_maxcps < 1) + if (ch->ch_digi.digi_maxcps < 1) ch->ch_digi.digi_maxcps = 1; - if (ch->ch_digi.digi_maxcps > 10000) + if (ch->ch_digi.digi_maxcps > 10000) ch->ch_digi.digi_maxcps = 10000; if (ch->ch_digi.digi_bufsize < 10) @@ -2647,7 +2647,7 @@ static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i /* - * dgap_tty_digigetedelay() + * dgap_tty_digigetedelay() * * Ioctl to get the current edelay setting. * @@ -2689,7 +2689,7 @@ static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo) /* - * dgap_tty_digisetedelay() + * dgap_tty_digisetedelay() * * Ioctl to set the EDELAY setting * @@ -2783,7 +2783,7 @@ static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinf /* - * dgap_tty_digisetcustombaud() + * dgap_tty_digisetcustombaud() * * Ioctl to set the custom baud rate setting */ @@ -2898,7 +2898,7 @@ static void dgap_tty_throttle(struct tty_struct *tty) un = tty->driver_data; if (!un || un->magic != DGAP_UNIT_MAGIC) return; - + ch = un->un_ch; if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) return; @@ -2938,7 +2938,7 @@ static void dgap_tty_unthrottle(struct tty_struct *tty) un = tty->driver_data; if (!un || un->magic != DGAP_UNIT_MAGIC) return; - + ch = un->un_ch; if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) return; @@ -2979,7 +2979,7 @@ static void dgap_tty_start(struct tty_struct *tty) un = tty->driver_data; if (!un || un->magic != DGAP_UNIT_MAGIC) return; - + ch = un->un_ch; if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) return; @@ -3016,7 +3016,7 @@ static void dgap_tty_stop(struct tty_struct *tty) un = tty->driver_data; if (!un || un->magic != DGAP_UNIT_MAGIC) return; - + ch = un->un_ch; if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) return; @@ -3039,7 +3039,7 @@ static void dgap_tty_stop(struct tty_struct *tty) } -/* +/* * dgap_tty_flush_chars() * * Flush the cook buffer @@ -3066,7 +3066,7 @@ static void dgap_tty_flush_chars(struct tty_struct *tty) un = tty->driver_data; if (!un || un->magic != DGAP_UNIT_MAGIC) return; - + ch = un->un_ch; if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) return; @@ -3092,7 +3092,7 @@ static void dgap_tty_flush_chars(struct tty_struct *tty) /* * dgap_tty_flush_buffer() - * + * * Flush Tx buffer (make in == out) */ static void dgap_tty_flush_buffer(struct tty_struct *tty) @@ -3110,7 +3110,7 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty) un = tty->driver_data; if (!un || un->magic != DGAP_UNIT_MAGIC) return; - + ch = un->un_ch; if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) return; @@ -3153,7 +3153,7 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty) * The IOCTL function and all of its helpers * *****************************************************************************/ - + /* * dgap_tty_ioctl() * @@ -3186,7 +3186,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, if (!bd || bd->magic != DGAP_BOARD_MAGIC) return (-ENODEV); - DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n", + DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n", ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg)); DGAP_LOCK(bd->bd_lock, lock_flags); @@ -3205,7 +3205,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, case TCSBRK: /* - * TCSBRK is SVID version: non-zero arg --> no break + * TCSBRK is SVID version: non-zero arg --> no break * this behaviour is exploited by tcdrain(). * * According to POSIX.1 spec (7.2.2.1.2) breaks should be @@ -3236,7 +3236,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, DGAP_UNLOCK(ch->ch_lock, lock_flags2); DGAP_UNLOCK(bd->bd_lock, lock_flags); - DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n", + DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n", ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg)); return(0); @@ -3270,7 +3270,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, DGAP_UNLOCK(ch->ch_lock, lock_flags2); DGAP_UNLOCK(bd->bd_lock, lock_flags); - DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n", + DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n", ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg)); return(0); @@ -3303,11 +3303,11 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, DGAP_UNLOCK(ch->ch_lock, lock_flags2); DGAP_UNLOCK(bd->bd_lock, lock_flags); - DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n", + DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n", ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg)); return 0; - + case TIOCCBRK: /* * FEP5 doesn't support turning off a break unconditionally. @@ -3343,7 +3343,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, DGAP_UNLOCK(bd->bd_lock, lock_flags); return(0); - + case TIOCMGET: DGAP_UNLOCK(ch->ch_lock, lock_flags2); DGAP_UNLOCK(bd->bd_lock, lock_flags); @@ -3359,8 +3359,8 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, /* * Here are any additional ioctl's that we want to implement */ - - case TCFLSH: + + case TCFLSH: /* * The linux tty driver doesn't have a flush * input routine for the driver, assuming all backed @@ -3369,7 +3369,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, * act on the ioctl, but then lie and say we didn't * so the line discipline will process the flush * also. - */ + */ rc = tty_check_change(tty); if (rc) { DGAP_UNLOCK(ch->ch_lock, lock_flags2); @@ -3407,13 +3407,13 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, tty_wakeup(tty); DGAP_LOCK(bd->bd_lock, lock_flags); DGAP_LOCK(ch->ch_lock, lock_flags2); - } + } - /* pretend we didn't recognize this IOCTL */ + /* pretend we didn't recognize this IOCTL */ DGAP_UNLOCK(ch->ch_lock, lock_flags2); DGAP_UNLOCK(bd->bd_lock, lock_flags); - DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n", + DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n", __LINE__, ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg)); return(-ENOIOCTLCMD); @@ -3445,7 +3445,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, return(-EINTR); } - DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n", + DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n", ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg)); /* pretend we didn't recognize this */ @@ -3462,7 +3462,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, } /* pretend we didn't recognize this */ - return(-ENOIOCTLCMD); + return(-ENOIOCTLCMD); case TCXONC: /* @@ -3572,7 +3572,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, DGAP_UNLOCK(bd->bd_lock, lock_flags); DPR_IOCTL(("dgap_tty_ioctl - in default\n")); - DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n", + DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n", dgap_ioctl_name(cmd), cmd, arg)); return(-ENOIOCTLCMD); diff --git a/drivers/staging/dgap/downld.c b/drivers/staging/dgap/downld.c index 638c5da43c85fb..1f4aa2eca437bf 100644 --- a/drivers/staging/dgap/downld.c +++ b/drivers/staging/dgap/downld.c @@ -24,7 +24,7 @@ ** ** This is the daemon that sends the fep, bios, and concentrator images ** from user space to the driver. -** BUGS: +** BUGS: ** If the file changes in the middle of the download, you probably ** will get what you deserve. ** @@ -121,7 +121,7 @@ struct downld_t *dp; /* conc. download */ /* - * The same for either the FEP or the BIOS. + * The same for either the FEP or the BIOS. * Append the downldio header, issue the ioctl, then free * the buffer. Not horribly CPU efficient, but quite RAM efficient. */ @@ -136,7 +136,7 @@ void squirt(int req_type, int bdid, struct image_info *ii) /* * If this binary comes from a file, stat it to see how * large it is. Yes, we intentionally do this each - * time for the binary may change between loads. + * time for the binary may change between loads. */ if (ii->pathname) { @@ -144,7 +144,7 @@ void squirt(int req_type, int bdid, struct image_info *ii) if (sfd < 0 ) { myperror(ii->pathname); - goto squirt_end; + goto squirt_end; } if (fstat(sfd, &sb) == -1 ) { @@ -152,7 +152,7 @@ void squirt(int req_type, int bdid, struct image_info *ii) goto squirt_end; } - ii->len = sb.st_size ; + ii->len = sb.st_size; } size_buf = ii->len + sizeof(struct downldio); @@ -165,7 +165,7 @@ void squirt(int req_type, int bdid, struct image_info *ii) dliop = (struct downldio *) malloc(size_buf); if (dliop == NULL) { - fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n", + fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n", pgm, size_buf); exit (1); } @@ -185,7 +185,7 @@ void squirt(int req_type, int bdid, struct image_info *ii) if (debugflag) printf("sending %d bytes of %s %s from %s\n", - ii->len, + ii->len, (ii->type == IFEP) ? "FEP" : (ii->type == IBIOS) ? "BIOS" : "CONFIG", ii->name ? ii->name : "", (ii->pathname) ? ii->pathname : "internal image" ); @@ -209,13 +209,13 @@ void squirt(int req_type, int bdid, struct image_info *ii) /* - * See if we need to reload the download image in core - * + * See if we need to reload the download image in core + * */ void consider_file_rescan(struct image_info *ii) { - int sfd ; - int len ; + int sfd; + int len; struct stat sb; /* This operation only makes sense when we're working from a file */ @@ -232,14 +232,14 @@ void consider_file_rescan(struct image_info *ii) myperror(ii->pathname); exit(1); } - - /* If the file hasn't changed since we last did this, - * and we have not done a free() on the image, bail + + /* If the file hasn't changed since we last did this, + * and we have not done a free() on the image, bail */ if (ii->image && (sb.st_mtime == ii->mtime)) goto end_rescan; - ii->len = len = sb.st_size ; + ii->len = len = sb.st_size; /* Record the timestamp of the file */ ii->mtime = sb.st_mtime; @@ -249,12 +249,12 @@ void consider_file_rescan(struct image_info *ii) * have a memory leak. */ if ( ii->image ) { - free( ii->image ); + free( ii->image ); /* ii->image = NULL; */ /* not necessary */ } - /* This image will be kept only long enough for the - * download to happen. After sending the last block, + /* This image will be kept only long enough for the + * download to happen. After sending the last block, * it will be freed */ ii->image = malloc(len) ; @@ -267,14 +267,14 @@ void consider_file_rescan(struct image_info *ii) } if (read(sfd, ii->image, len) < len) { - fprintf(stderr,"%s: read error on %s; aborting\n", + fprintf(stderr,"%s: read error on %s; aborting\n", pgm, ii->pathname); exit (1); } end_rescan: close(sfd); - + } } @@ -284,12 +284,12 @@ void consider_file_rescan(struct image_info *ii) struct image_info * find_conc_image() { - int x ; - struct image_info *i = NULL ; + int x; + struct image_info *i = NULL; for ( x = 0; x < nimages; x++ ) { i=&image_list[x]; - + if(i->type != ICONC) continue; @@ -305,8 +305,8 @@ struct image_info * find_conc_image() */ if ((dp->dl_type != 'P' ) && ( ip->dl_srev == dp->dl_srev )) return i; - } - return NULL ; + } + return NULL; } @@ -378,7 +378,7 @@ int main(int argc, char **argv) ** the list before built in images so that the command line images ** can override the built in ones. */ - + /* allocate space for the list */ nimages = argc - 2; @@ -390,15 +390,15 @@ int main(int argc, char **argv) nimages += count; /* Really should just remove the variable "image_list".... robertl */ - image_list = images ; - + image_list = images; + /* get the images from the command line */ for(x = 2; x < argc; x++) { - int xx; + int xx; /* - * strip off any leading path information for - * determining file type + * strip off any leading path information for + * determining file type */ if( (fname = strrchr(argv[x],'/')) == NULL) fname = argv[x]; @@ -406,18 +406,18 @@ int main(int argc, char **argv) fname++; /* skip the slash */ for (xx = 0; xx < count; xx++) { - if (strcmp(fname, images[xx].fname) == 0 ) { + if (strcmp(fname, images[xx].fname) == 0 ) { images[xx].pathname = argv[x]; /* image should be NULL until */ /* space is malloced */ - images[xx].image = NULL ; + images[xx].image = NULL; } } } sleep(3); - + /* ** Endless loop: get a request from the fep, and service that request. */ @@ -425,7 +425,7 @@ int main(int argc, char **argv) /* get the request */ if (debugflag) printf("b4 get ioctl..."); - + if (ioctl(fd,DIGI_DLREQ_GET, &dlio) == -1 ) { if (errorprint) { fprintf(stderr, @@ -438,7 +438,7 @@ int main(int argc, char **argv) if (debugflag) printf("dlio.req_type is %d bd %d\n", dlio.req_type,dlio.bdid); - + switch(dlio.req_type) { case DLREQ_BIOS: /* @@ -447,18 +447,18 @@ int main(int argc, char **argv) for ( x = 0; x < nimages; x++ ) { if(image_list[x].type != IBIOS) continue; - - if ((dlio.image.fi.type & FAMILY) == + + if ((dlio.image.fi.type & FAMILY) == image_list[x].family) { - - if ( image_list[x].family == T_CX ) { - if ((dlio.image.fi.type & BUSTYPE) + + if ( image_list[x].family == T_CX ) { + if ((dlio.image.fi.type & BUSTYPE) == T_PCIBUS ) { - if ( image_list[x].subtype + if ( image_list[x].subtype == T_PCIBUS ) break; } - else { + else { break; } } @@ -466,15 +466,15 @@ int main(int argc, char **argv) /* If subtype of image is T_PCIBUS, it is */ /* a PCI EPC image, so the board must */ /* have bus type T_PCIBUS to match */ - if ((dlio.image.fi.type & BUSTYPE) + if ((dlio.image.fi.type & BUSTYPE) == T_PCIBUS ) { - if ( image_list[x].subtype + if ( image_list[x].subtype == T_PCIBUS ) break; } - else { + else { /* NON PCI EPC doesn't use PCI image */ - if ( image_list[x].subtype + if ( image_list[x].subtype != T_PCIBUS ) break; } @@ -484,12 +484,12 @@ int main(int argc, char **argv) } else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) { /* PCXR board will break out of the loop here */ - if ( image_list[x].subtype == T_PCXR ) { + if ( image_list[x].subtype == T_PCXR ) { break; } } } - + if ( x >= nimages) { /* ** no valid images exist @@ -514,7 +514,7 @@ int main(int argc, char **argv) } squirt(dlio.req_type, dlio.bdid, &image_list[x]); break ; - + case DLREQ_FEP: /* ** find the fep image for this type @@ -522,17 +522,17 @@ int main(int argc, char **argv) for ( x = 0; x < nimages; x++ ) { if(image_list[x].type != IFEP) continue; - if( (dlio.image.fi.type & FAMILY) == + if( (dlio.image.fi.type & FAMILY) == image_list[x].family ) { - if ( image_list[x].family == T_CX ) { + if ( image_list[x].family == T_CX ) { /* C/X PCI board */ - if ((dlio.image.fi.type & BUSTYPE) + if ((dlio.image.fi.type & BUSTYPE) == T_PCIBUS ) { if ( image_list[x].subtype == T_PCIBUS ) break; } - else { + else { /* Regular CX */ break; } @@ -541,15 +541,15 @@ int main(int argc, char **argv) /* If subtype of image is T_PCIBUS, it is */ /* a PCI EPC image, so the board must */ /* have bus type T_PCIBUS to match */ - if ((dlio.image.fi.type & BUSTYPE) + if ((dlio.image.fi.type & BUSTYPE) == T_PCIBUS ) { - if ( image_list[x].subtype + if ( image_list[x].subtype == T_PCIBUS ) break; } - else { + else { /* NON PCI EPC doesn't use PCI image */ - if ( image_list[x].subtype + if ( image_list[x].subtype != T_PCIBUS ) break; } @@ -559,12 +559,12 @@ int main(int argc, char **argv) } else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) { /* PCXR board will break out of the loop here */ - if ( image_list[x].subtype == T_PCXR ) { + if ( image_list[x].subtype == T_PCXR ) { break; } } } - + if ( x >= nimages) { /* ** no valid images exist @@ -613,7 +613,7 @@ int main(int argc, char **argv) } break; - + case DLREQ_CONFIG: for ( x = 0; x < nimages; x++ ) { if(image_list[x].type != ICONFIG) @@ -658,15 +658,15 @@ int main(int argc, char **argv) */ for ( x = 0; x < nimages; x++ ) { ii=&image_list[x]; - + if(image_list[x].type != ICONC) continue; - + consider_file_rescan(ii) ; - + ip = (struct downld_t *) image_list[x].image; if (ip == NULL) continue; - + /* * When I removed Clusterport, I kept only the * code that I was SURE wasn't ClusterPort. @@ -674,11 +674,11 @@ int main(int argc, char **argv) */ if ((dp->dl_type != 'P' ) && - (ip->dl_lrev <= dp->dl_lrev ) && + (ip->dl_lrev <= dp->dl_lrev ) && ( dp->dl_lrev <= ip->dl_hrev)) break; } - + if ( x >= nimages ) { /* ** No valid images exist @@ -691,7 +691,7 @@ int main(int argc, char **argv) } continue; } - + } else { /* ** find image version required @@ -706,40 +706,40 @@ int main(int argc, char **argv) continue; } } - + /* ** download block of image */ - + offset = 1024 * dp->dl_seq; - + /* ** test if block requested within image */ - if ( offset < ii->len ) { - + if ( offset < ii->len ) { + /* ** if it is, determine block size, set segment, ** set size, set pointers, and copy block */ if (( bsize = ii->len - offset ) > 1024 ) bsize = 1024; - + /* ** copy image version info to download area */ dp->dl_srev = ip->dl_srev; dp->dl_lrev = ip->dl_lrev; dp->dl_hrev = ip->dl_hrev; - + dp->dl_seg = (64 * dp->dl_seq) + ip->dl_seg; dp->dl_size = bsize; - + down = (char *)&dp->dl_data[0]; image = (char *)((char *)ip + offset); - + memcpy(down, image, bsize); - } + } else { /* ** Image has been downloaded, set segment and @@ -747,24 +747,24 @@ int main(int argc, char **argv) */ dp->dl_seg = ip->dl_seg; dp->dl_size = 0; - + /* Now, we can release the concentrator */ /* image from memory if we're running */ /* from filesystem images */ - + if (ii->pathname) if (ii->image) { free(ii->image); - ii->image = NULL ; - } + ii->image = NULL; + } } - + if (debugflag) printf( "sending conc dl section %d to %s from %s\n", dp->dl_seq, ii->name, ii->pathname ? ii->pathname : "Internal Image"); - + if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) { if (errorprint) { fprintf(stderr, diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c index fdc1aabc7fdeb6..708adbbcedbd7a 100644 --- a/drivers/staging/dgnc/dgnc_cls.c +++ b/drivers/staging/dgnc/dgnc_cls.c @@ -119,7 +119,10 @@ static inline void cls_set_cts_flow_control(struct channel_t *ch) /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); - /* Enable interrupts for CTS flow, turn off interrupts for received XOFF chars */ + /* + * Enable interrupts for CTS flow, turn off interrupts for + * received XOFF chars + */ ier |= (UART_EXAR654_IER_CTSDSR); ier &= ~(UART_EXAR654_IER_XOFF); writeb(ier, &ch->ch_cls_uart->ier); @@ -167,7 +170,10 @@ static inline void cls_set_ixon_flow_control(struct channel_t *ch) /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); - /* Disable interrupts for CTS flow, turn on interrupts for received XOFF chars */ + /* + * Disable interrupts for CTS flow, turn on interrupts for + * received XOFF chars + */ ier &= ~(UART_EXAR654_IER_CTSDSR); ier |= (UART_EXAR654_IER_XOFF); writeb(ier, &ch->ch_cls_uart->ier); @@ -207,7 +213,10 @@ static inline void cls_set_no_output_flow_control(struct channel_t *ch) /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); - /* Disable interrupts for CTS flow, turn off interrupts for received XOFF chars */ + /* + * Disable interrupts for CTS flow, turn off interrupts for + * received XOFF chars + */ ier &= ~(UART_EXAR654_IER_CTSDSR); ier &= ~(UART_EXAR654_IER_XOFF); writeb(ier, &ch->ch_cls_uart->ier); @@ -220,8 +229,8 @@ static inline void cls_set_no_output_flow_control(struct channel_t *ch) &ch->ch_cls_uart->isr_fcr); ch->ch_r_watermark = 0; - ch->ch_t_tlevel = 16; - ch->ch_r_tlevel = 16; + ch->ch_t_tlevel = 16; + ch->ch_r_tlevel = 16; } @@ -350,8 +359,8 @@ static inline void cls_set_no_input_flow_control(struct channel_t *ch) UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr); - ch->ch_t_tlevel = 16; - ch->ch_r_tlevel = 16; + ch->ch_t_tlevel = 16; + ch->ch_r_tlevel = 16; } @@ -380,12 +389,13 @@ static inline void cls_clear_break(struct channel_t *ch, int force) /* Turn break off, and unset some variables */ if (ch->ch_flags & CH_BREAK_SENDING) { - if ((jiffies >= ch->ch_stop_sending_break) || force) { + if (time_after(jiffies, ch->ch_stop_sending_break) || force) { uchar temp = readb(&ch->ch_cls_uart->lcr); - writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr); + writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr); ch->ch_flags &= ~(CH_BREAK_SENDING); ch->ch_stop_sending_break = 0; - DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies)); + DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", + jiffies)); } } DGNC_UNLOCK(ch->ch_lock, lock_flags); @@ -420,7 +430,8 @@ static inline void cls_parse_isr(struct dgnc_board *brd, uint port) if (isr & UART_IIR_NO_INT) break; - DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__, port, isr)); + DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__, + port, isr)); /* Receive Interrupt pending */ if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) { @@ -473,11 +484,11 @@ static void cls_param(struct tty_struct *tty) uchar uart_lcr = 0; uchar ier = 0; uchar uart_ier = 0; - uint baud = 9600; + uint baud = 9600; int quot = 0; - struct dgnc_board *bd; + struct dgnc_board *bd; struct channel_t *ch; - struct un_t *un; + struct un_t *un; if (!tty || tty->magic != TTY_MAGIC) return; @@ -495,7 +506,8 @@ static void cls_param(struct tty_struct *tty) return; DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n", - ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag)); + ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, + ch->ch_c_iflag)); /* * If baud rate is zero, flush queues, and set mval to drop DTR. @@ -506,7 +518,7 @@ static void cls_param(struct tty_struct *tty) ch->ch_w_head = ch->ch_w_tail = 0; cls_flush_uart_write(ch); - cls_flush_uart_read(ch); + cls_flush_uart_read(ch); /* The baudrate is B0 so all modem lines are to be dropped. */ ch->ch_flags |= (CH_BAUD0); @@ -558,8 +570,12 @@ static void cls_param(struct tty_struct *tty) 4800, 9600, 19200, 38400 } }; - /* Only use the TXPrint baud rate if the terminal unit is NOT open */ - if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT)) + /* + * Only use the TXPrint baud rate if the terminal + * unit is NOT open + */ + if (!(ch->ch_tun.un_flags & UN_ISOPEN) && + (un->un_type == DGNC_PRINT)) baud = C_BAUD(ch->ch_pun.un_tty) & 0xff; else baud = C_BAUD(ch->ch_tun.un_tty) & 0xff; @@ -572,7 +588,8 @@ static void cls_param(struct tty_struct *tty) jindex = baud; - if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) { + if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && + (jindex < 16)) { baud = bauds[iindex][jindex]; } else { DPR_IOCTL(("baud indices were out of range (%d)(%d)", @@ -598,13 +615,11 @@ static void cls_param(struct tty_struct *tty) } } - if (ch->ch_c_cflag & PARENB) { + if (ch->ch_c_cflag & PARENB) lcr |= UART_LCR_PARITY; - } - if (!(ch->ch_c_cflag & PARODD)) { + if (!(ch->ch_c_cflag & PARODD)) lcr |= UART_LCR_EPAR; - } /* * Not all platforms support mark/space parity, @@ -648,31 +663,28 @@ static void cls_param(struct tty_struct *tty) writeb((quot & 0xff), &ch->ch_cls_uart->txrx); writeb((quot >> 8), &ch->ch_cls_uart->ier); writeb(lcr, &ch->ch_cls_uart->lcr); - } + } if (uart_lcr != lcr) writeb(lcr, &ch->ch_cls_uart->lcr); - if (ch->ch_c_cflag & CREAD) { + if (ch->ch_c_cflag & CREAD) ier |= (UART_IER_RDI | UART_IER_RLSI); - } - else { + else ier &= ~(UART_IER_RDI | UART_IER_RLSI); - } /* * Have the UART interrupt on modem signal changes ONLY when * we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set. */ - if ((ch->ch_digi.digi_flags & CTSPACE) || (ch->ch_digi.digi_flags & RTSPACE) || - (ch->ch_c_cflag & CRTSCTS) || !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) || + if ((ch->ch_digi.digi_flags & CTSPACE) || + (ch->ch_digi.digi_flags & RTSPACE) || + (ch->ch_c_cflag & CRTSCTS) || + !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) || !(ch->ch_c_cflag & CLOCAL)) - { - ier |= UART_IER_MSI; - } - else { - ier &= ~UART_IER_MSI; - } + ier |= UART_IER_MSI; + else + ier &= ~UART_IER_MSI; ier |= UART_IER_THRI; @@ -681,29 +693,33 @@ static void cls_param(struct tty_struct *tty) if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) { cls_set_cts_flow_control(ch); - } - else if (ch->ch_c_iflag & IXON) { - /* If start/stop is set to disable, then we should disable flow control */ - if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) + } else if (ch->ch_c_iflag & IXON) { + /* + * If start/stop is set to disable, then we should + * disable flow control + */ + if ((ch->ch_startc == _POSIX_VDISABLE) || + (ch->ch_stopc == _POSIX_VDISABLE)) cls_set_no_output_flow_control(ch); else cls_set_ixon_flow_control(ch); - } - else { + } else { cls_set_no_output_flow_control(ch); } if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) { cls_set_rts_flow_control(ch); - } - else if (ch->ch_c_iflag & IXOFF) { - /* If start/stop is set to disable, then we should disable flow control */ - if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) + } else if (ch->ch_c_iflag & IXOFF) { + /* + * If start/stop is set to disable, then we should disable + * flow control + */ + if ((ch->ch_startc == _POSIX_VDISABLE) || + (ch->ch_stopc == _POSIX_VDISABLE)) cls_set_no_input_flow_control(ch); else cls_set_ixoff_flow_control(ch); - } - else { + } else { cls_set_no_input_flow_control(ch); } @@ -719,7 +735,7 @@ static void cls_param(struct tty_struct *tty) */ static void cls_tasklet(unsigned long data) { - struct dgnc_board *bd = (struct dgnc_board *) data; + struct dgnc_board *bd = (struct dgnc_board *) data; struct channel_t *ch; ulong lock_flags; int i; @@ -802,7 +818,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd) unsigned long lock_flags; if (!brd) { - APR(("Received interrupt (%d) with null board associated\n", irq)); + APR(("Received interrupt (%d) with null board associated\n", + irq)); return IRQ_NONE; } @@ -810,7 +827,9 @@ static irqreturn_t cls_intr(int irq, void *voidbrd) * Check to make sure its for us. */ if (brd->magic != DGNC_BOARD_MAGIC) { - APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq)); + APR(( + "Received interrupt (%d) with a board pointer " + "that wasn't ours!\n", irq)); return IRQ_NONE; } @@ -826,7 +845,9 @@ static irqreturn_t cls_intr(int irq, void *voidbrd) /* If 0, no interrupts pending */ if (!poll_reg) { - DPR_INTR(("Kernel interrupted to me, but no pending interrupts...\n")); + DPR_INTR(( + "Kernel interrupted to me, but no pending " + "interrupts...\n")); DGNC_UNLOCK(brd->bd_intr_lock, lock_flags); return IRQ_NONE; } @@ -834,9 +855,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd) DPR_INTR(("%s:%d poll_reg: %x\n", __FILE__, __LINE__, poll_reg)); /* Parse each port to find out what caused the interrupt */ - for (i = 0; i < brd->nasync; i++) { + for (i = 0; i < brd->nasync; i++) cls_parse_isr(brd, i); - } /* * Schedule tasklet to more in-depth servicing at a better time. @@ -868,8 +888,8 @@ static void cls_enable_receiver(struct channel_t *ch) static void cls_copy_data_from_uart_to_queue(struct channel_t *ch) { - int qleft = 0; - uchar linestatus = 0; + int qleft = 0; + uchar linestatus = 0; uchar error_mask = 0; ushort head; ushort tail; @@ -885,7 +905,8 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch) tail = ch->ch_r_tail; /* Store how much space we have left in the queue */ - if ((qleft = tail - head - 1) < 0) + qleft = (tail - head - 1); + if (qleft < 0) qleft += RQUEUEMASK + 1; /* @@ -912,9 +933,9 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch) } /* - * If our queue is full, we have no choice but to drop some data. - * The assumption is that HWFLOW or SWFLOW should have stopped - * things way way before we got to this point. + * If our queue is full, we have no choice but to drop some + * data. The assumption is that HWFLOW or SWFLOW should have + * stopped things way way before we got to this point. * * I decided that I wanted to ditch the oldest data first, * I hope thats okay with everyone? Yes? Good. @@ -928,13 +949,16 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch) qleft++; } - ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE); + ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE + | UART_LSR_FE); ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx); - dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1); + dgnc_sniff_nowait_nolock(ch, "UART READ", + ch->ch_rqueue + head, 1); qleft--; - DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head])); + DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], + ch->ch_equeue[head])); if (ch->ch_equeue[head] & UART_LSR_PE) ch->ch_err_parity++; @@ -966,22 +990,19 @@ static int cls_drain(struct tty_struct *tty, uint seconds) { ulong lock_flags; struct channel_t *ch; - struct un_t *un; + struct un_t *un; int rc = 0; - if (!tty || tty->magic != TTY_MAGIC) { + if (!tty || tty->magic != TTY_MAGIC) return -ENXIO; - } un = (struct un_t *) tty->driver_data; - if (!un || un->magic != DGNC_UNIT_MAGIC) { + if (!un || un->magic != DGNC_UNIT_MAGIC) return -ENXIO; - } ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) { + if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return -ENXIO; - } DGNC_LOCK(ch->ch_lock, lock_flags); un->un_flags |= UN_EMPTY; @@ -990,24 +1011,25 @@ static int cls_drain(struct tty_struct *tty, uint seconds) /* * NOTE: Do something with time passed in. */ - rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0)); + rc = wait_event_interruptible(un->un_flags_wait, + ((un->un_flags & UN_EMPTY) == 0)); /* If ret is non-zero, user ctrl-c'ed us */ if (rc) DPR_IOCTL(("%d Drain - User ctrl c'ed\n", __LINE__)); - return rc; + return rc; } /* Channel lock MUST be held before calling this function! */ static void cls_flush_uart_write(struct channel_t *ch) { - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) { + if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; - } - writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr); + writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), + &ch->ch_cls_uart->isr_fcr); udelay(10); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); @@ -1017,9 +1039,8 @@ static void cls_flush_uart_write(struct channel_t *ch) /* Channel lock MUST be held before calling this function! */ static void cls_flush_uart_read(struct channel_t *ch) { - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) { + if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; - } /* * For complete POSIX compatibility, we should be purging the @@ -1032,7 +1053,8 @@ static void cls_flush_uart_read(struct channel_t *ch) * So for now, we will leave the code #ifdef'ed out... */ #if 0 - writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr); + writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), + &ch->ch_cls_uart->isr_fcr); #endif udelay(10); } @@ -1059,7 +1081,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch) } /* If port is "stopped", don't send any data to the UART */ - if ((ch->ch_flags & CH_FORCED_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) { + if ((ch->ch_flags & CH_FORCED_STOP) || + (ch->ch_flags & CH_BREAK_SENDING)) { DGNC_UNLOCK(ch->ch_lock, lock_flags); return; } @@ -1071,10 +1094,10 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch) n = 32; - /* cache head and tail of queue */ - head = ch->ch_w_head & WQUEUEMASK; - tail = ch->ch_w_tail & WQUEUEMASK; - qlen = (head - tail) & WQUEUEMASK; + /* cache head and tail of queue */ + head = ch->ch_w_head & WQUEUEMASK; + tail = ch->ch_w_tail & WQUEUEMASK; + qlen = (head - tail) & WQUEUEMASK; /* Find minimum of the FIFO space, versus queue length */ n = min(n, qlen); @@ -1083,7 +1106,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch) /* * If RTS Toggle mode is on, turn on RTS now if not already set, - * and make sure we get an event when the data transfer has completed. + * and make sure we get an event when the data transfer has + * completed. */ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) { if (!(ch->ch_mostat & UART_MCR_RTS)) { @@ -1095,7 +1119,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch) /* * If DTR Toggle mode is on, turn on DTR now if not already set, - * and make sure we get an event when the data transfer has completed. + * and make sure we get an event when the data transfer has + * completed. */ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) { if (!(ch->ch_mostat & UART_MCR_DTR)) { @@ -1105,7 +1130,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch) ch->ch_tun.un_flags |= (UN_EMPTY); } writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx); - dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + ch->ch_w_tail, 1); + dgnc_sniff_nowait_nolock(ch, "UART WRITE", + ch->ch_wqueue + ch->ch_w_tail, 1); DPR_WRITE(("Tx data: %x\n", ch->ch_wqueue[ch->ch_w_tail])); ch->ch_w_tail++; ch->ch_w_tail &= WQUEUEMASK; @@ -1125,17 +1151,20 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch) static void cls_parse_modem(struct channel_t *ch, uchar signals) { - volatile uchar msignals = signals; + uchar msignals = signals; + ulong lock_flags; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; - DPR_MSIGS(("cls_parse_modem: port: %d signals: %d\n", ch->ch_portnum, msignals)); + DPR_MSIGS(("cls_parse_modem: port: %d signals: %d\n", + ch->ch_portnum, msignals)); /* * Do altpin switching. Altpin switches DCD and DSR. * This prolly breaks DSRPACE, so we should be more clever here. */ + DGNC_LOCK(ch->ch_lock, lock_flags); if (ch->ch_digi.digi_flags & DIGI_ALTPIN) { uchar mswap = signals; if (mswap & UART_MSR_DDCD) { @@ -1155,10 +1184,15 @@ static void cls_parse_modem(struct channel_t *ch, uchar signals) msignals |= UART_MSR_DCD; } } + DGNC_UNLOCK(ch->ch_lock, lock_flags); - /* Scrub off lower bits. They signify delta's, which I don't care about */ + /* + * Scrub off lower bits. They signify delta's, which I don't + * care about + */ signals &= 0xf0; + DGNC_LOCK(ch->ch_lock, lock_flags); if (msignals & UART_MSR_DCD) ch->ch_mistat |= UART_MSR_DCD; else @@ -1178,9 +1212,11 @@ static void cls_parse_modem(struct channel_t *ch, uchar signals) ch->ch_mistat |= UART_MSR_CTS; else ch->ch_mistat &= ~UART_MSR_CTS; + DGNC_UNLOCK(ch->ch_lock, lock_flags); - DPR_MSIGS(("Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n", + DPR_MSIGS(( + "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n", ch->ch_portnum, !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR), !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS), @@ -1204,7 +1240,7 @@ static void cls_assert_modem_signals(struct channel_t *ch) if (ch->ch_flags & CH_LOOPBACK) out |= UART_MCR_LOOP; - writeb(out, &ch->ch_cls_uart->mcr); + writeb(out, &ch->ch_cls_uart->mcr); /* Give time for the UART to actually drop the signals */ udelay(10); @@ -1219,7 +1255,7 @@ static void cls_send_start_character(struct channel_t *ch) if (ch->ch_startc != _POSIX_VDISABLE) { ch->ch_xon_sends++; writeb(ch->ch_startc, &ch->ch_cls_uart->txrx); - } + } } @@ -1231,7 +1267,7 @@ static void cls_send_stop_character(struct channel_t *ch) if (ch->ch_stopc != _POSIX_VDISABLE) { ch->ch_xoff_sends++; writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx); - } + } } @@ -1259,10 +1295,11 @@ static void cls_uart_init(struct channel_t *ch) /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); - /* Clear out UART and FIFO */ + /* Clear out UART and FIFO */ readb(&ch->ch_cls_uart->txrx); - writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr); + writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), + &ch->ch_cls_uart->isr_fcr); udelay(10); ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); @@ -1302,8 +1339,7 @@ static uint cls_get_uart_bytes_left(struct channel_t *ch) if (ch->ch_flags & CH_TX_FIFO_EMPTY) tasklet_schedule(&ch->ch_bd->helper_tasklet); left = 1; - } - else { + } else { ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); left = 0; } @@ -1333,10 +1369,11 @@ static void cls_send_break(struct channel_t *ch, int msecs) writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr); ch->ch_flags &= ~(CH_BREAK_SENDING); ch->ch_stop_sending_break = 0; - DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies)); + DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", + jiffies)); } return; - } + } /* * Set the time we should stop sending the break. @@ -1350,7 +1387,9 @@ static void cls_send_break(struct channel_t *ch, int msecs) uchar temp = readb(&ch->ch_cls_uart->lcr); writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr); ch->ch_flags |= (CH_BREAK_SENDING); - DPR_IOCTL(("Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n", + DPR_IOCTL(( + "Port %d. Starting UART_LCR_SBC! start: %lx " + "should end: %lx\n", ch->ch_portnum, jiffies, ch->ch_stop_sending_break)); } } @@ -1373,8 +1412,8 @@ static void cls_send_immediate_char(struct channel_t *ch, unsigned char c) static void cls_vpd(struct dgnc_board *brd) { - ulong vpdbase; /* Start of io base of the card */ - u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */ + ulong vpdbase; /* Start of io base of the card */ + u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */ int i = 0; @@ -1389,12 +1428,12 @@ static void cls_vpd(struct dgnc_board *brd) if (!re_map_vpdbase) return; - /* Store the VPD into our buffer */ - for (i = 0; i < 0x40; i++) { + /* Store the VPD into our buffer */ + for (i = 0; i < 0x40; i++) { brd->vpd[i] = readb(re_map_vpdbase + i); - printk("%x ", brd->vpd[i]); - } - printk("\n"); + pr_info("%x ", brd->vpd[i]); + } + pr_info("\n"); if (re_map_vpdbase) iounmap(re_map_vpdbase); diff --git a/drivers/staging/dgnc/dgnc_trace.c b/drivers/staging/dgnc/dgnc_trace.c index a98b7d4255c859..2f62f2a43542eb 100644 --- a/drivers/staging/dgnc/dgnc_trace.c +++ b/drivers/staging/dgnc/dgnc_trace.c @@ -35,6 +35,7 @@ #include #include "dgnc_driver.h" +#include "dgnc_trace.h" #define TRC_TO_CONSOLE 1 @@ -63,16 +64,16 @@ void dgnc_tracef(const char *fmt, ...) void dgnc_tracef(const char *fmt, ...) { - va_list ap; - char buf[TRC_MAXMSG+1]; - size_t lenbuf; - int i; - static int failed = FALSE; + va_list ap; + char buf[TRC_MAXMSG+1]; + size_t lenbuf; + int i; + static int failed = FALSE; # if defined(TRC_TO_KMEM) unsigned long flags; #endif - if(failed) + if (failed) return; # if defined(TRC_TO_KMEM) DGNC_LOCK(dgnc_tracef_lock, flags); @@ -86,7 +87,7 @@ void dgnc_tracef(const char *fmt, ...) # if defined(TRC_TO_KMEM) { - static int initd=0; + static int initd = 0; /* * Now, in addition to (or instead of) printing this stuff out @@ -95,7 +96,7 @@ void dgnc_tracef(const char *fmt, ...) */ if (!initd) { dgnc_trcbuf = (char *) vmalloc(dgnc_trcbuf_size); - if(!dgnc_trcbuf) { + if (!dgnc_trcbuf) { failed = TRUE; printk("dgnc: tracing init failed!\n"); return; @@ -179,6 +180,6 @@ void dgnc_tracef(const char *fmt, ...) */ void dgnc_tracer_free(void) { - if(dgnc_trcbuf) + if (dgnc_trcbuf) vfree(dgnc_trcbuf); } diff --git a/drivers/staging/dgrp/dgrp_driver.c b/drivers/staging/dgrp/dgrp_driver.c index 08eedf0867e6c0..b60a8da6350ace 100644 --- a/drivers/staging/dgrp/dgrp_driver.c +++ b/drivers/staging/dgrp/dgrp_driver.c @@ -23,7 +23,6 @@ #include #include #include -#include /* * PortServer includes diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c index 33ac7fb88cbd3b..1f61b89eca44c0 100644 --- a/drivers/staging/dgrp/dgrp_net_ops.c +++ b/drivers/staging/dgrp/dgrp_net_ops.c @@ -2232,6 +2232,177 @@ static ssize_t dgrp_net_read(struct file *file, char __user *buf, size_t count, return rtn; } +/* + * Common Packet Handling code + */ + +static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch, + long dlen, long plen, int n1, u8 *dbuf) +{ + char *error; + long n; + long remain; + u8 *buf; + u8 *b; + + remain = nd->nd_remain; + nd->nd_tx_work = 1; + + /* + * Otherwise data should appear only when we are + * in the CS_READY state. + */ + + if (ch->ch_state < CS_READY) { + error = "Data received before RWIN established"; + nd->nd_remain = 0; + nd->nd_state = NS_SEND_ERROR; + nd->nd_error = error; + } + + /* + * Assure that the data received is within the + * allowable window. + */ + + n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff; + + if (dlen > n) { + error = "Receive data overrun"; + nd->nd_remain = 0; + nd->nd_state = NS_SEND_ERROR; + nd->nd_error = error; + } + + /* + * If we received 3 or less characters, + * assume it is a human typing, and set RTIME + * to 10 milliseconds. + * + * If we receive 10 or more characters, + * assume its not a human typing, and set RTIME + * to 100 milliseconds. + */ + + if (ch->ch_edelay != DGRP_RTIME) { + if (ch->ch_rtime != ch->ch_edelay) { + ch->ch_rtime = ch->ch_edelay; + ch->ch_flag |= CH_PARAM; + } + } else if (dlen <= 3) { + if (ch->ch_rtime != 10) { + ch->ch_rtime = 10; + ch->ch_flag |= CH_PARAM; + } + } else { + if (ch->ch_rtime != DGRP_RTIME) { + ch->ch_rtime = DGRP_RTIME; + ch->ch_flag |= CH_PARAM; + } + } + + /* + * If a portion of the packet is outside the + * buffer, shorten the effective length of the + * data packet to be the amount of data received. + */ + + if (remain < plen) + dlen -= plen - remain; + + /* + * Detect if receive flush is now complete. + */ + + if ((ch->ch_flag & CH_RX_FLUSH) != 0 && + ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >= + ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) { + ch->ch_flag &= ~CH_RX_FLUSH; + } + + /* + * If we are ready to receive, move the data into + * the receive buffer. + */ + + ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff; + + if (ch->ch_state == CS_READY && + (ch->ch_tun.un_open_count != 0) && + (ch->ch_tun.un_flag & UN_CLOSING) == 0 && + (ch->ch_cflag & CF_CREAD) != 0 && + (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 && + (ch->ch_send & RR_RX_FLUSH) == 0) { + + if (ch->ch_rin + dlen >= RBUF_MAX) { + n = RBUF_MAX - ch->ch_rin; + + memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n); + + ch->ch_rin = 0; + dbuf += n; + dlen -= n; + } + + memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen); + + ch->ch_rin += dlen; + + + /* + * If we are not in fastcook mode, or + * if there is a fastcook thread + * waiting for data, send the data to + * the line discipline. + */ + + if ((ch->ch_flag & CH_FAST_READ) == 0 || + ch->ch_inwait != 0) { + dgrp_input(ch); + } + + /* + * If there is a read thread waiting + * in select, and we are in fastcook + * mode, wake him up. + */ + + if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) && + (ch->ch_flag & CH_FAST_READ) != 0) + wake_up_interruptible(&ch->ch_tun.un_tty->read_wait); + + /* + * Wake any thread waiting in the + * fastcook loop. + */ + + if ((ch->ch_flag & CH_INPUT) != 0) { + ch->ch_flag &= ~CH_INPUT; + wake_up_interruptible(&ch->ch_flag_wait); + } + } + + /* + * Fabricate and insert a data packet header to + * preced the remaining data when it comes in. + */ + + if (remain < plen) { + dlen = plen - remain; + b = buf; + + b[0] = 0x90 + n1; + put_unaligned_be16(dlen, b + 1); + + remain = 3; + if (remain > 0 && b != buf) + memcpy(buf, b, remain); + + nd->nd_remain = remain; + return; + } +} + /** * dgrp_receive() -- decode data packets received from the remote PortServer. * @nd: pointer to a node structure @@ -2306,7 +2477,8 @@ static void dgrp_receive(struct nd_struct *nd) plen = dlen + 1; dbuf = b + 1; - goto data; + handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); + break; /* * Process 2-byte header data packet. @@ -2320,7 +2492,8 @@ static void dgrp_receive(struct nd_struct *nd) plen = dlen + 2; dbuf = b + 2; - goto data; + handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); + break; /* * Process 3-byte header data packet. @@ -2335,159 +2508,6 @@ static void dgrp_receive(struct nd_struct *nd) dbuf = b + 3; - /* - * Common packet handling code. - */ - -data: - nd->nd_tx_work = 1; - - /* - * Otherwise data should appear only when we are - * in the CS_READY state. - */ - - if (ch->ch_state < CS_READY) { - error = "Data received before RWIN established"; - goto prot_error; - } - - /* - * Assure that the data received is within the - * allowable window. - */ - - n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff; - - if (dlen > n) { - error = "Receive data overrun"; - goto prot_error; - } - - /* - * If we received 3 or less characters, - * assume it is a human typing, and set RTIME - * to 10 milliseconds. - * - * If we receive 10 or more characters, - * assume its not a human typing, and set RTIME - * to 100 milliseconds. - */ - - if (ch->ch_edelay != DGRP_RTIME) { - if (ch->ch_rtime != ch->ch_edelay) { - ch->ch_rtime = ch->ch_edelay; - ch->ch_flag |= CH_PARAM; - } - } else if (dlen <= 3) { - if (ch->ch_rtime != 10) { - ch->ch_rtime = 10; - ch->ch_flag |= CH_PARAM; - } - } else { - if (ch->ch_rtime != DGRP_RTIME) { - ch->ch_rtime = DGRP_RTIME; - ch->ch_flag |= CH_PARAM; - } - } - - /* - * If a portion of the packet is outside the - * buffer, shorten the effective length of the - * data packet to be the amount of data received. - */ - - if (remain < plen) - dlen -= plen - remain; - - /* - * Detect if receive flush is now complete. - */ - - if ((ch->ch_flag & CH_RX_FLUSH) != 0 && - ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >= - ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) { - ch->ch_flag &= ~CH_RX_FLUSH; - } - - /* - * If we are ready to receive, move the data into - * the receive buffer. - */ - - ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff; - - if (ch->ch_state == CS_READY && - (ch->ch_tun.un_open_count != 0) && - (ch->ch_tun.un_flag & UN_CLOSING) == 0 && - (ch->ch_cflag & CF_CREAD) != 0 && - (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 && - (ch->ch_send & RR_RX_FLUSH) == 0) { - - if (ch->ch_rin + dlen >= RBUF_MAX) { - n = RBUF_MAX - ch->ch_rin; - - memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n); - - ch->ch_rin = 0; - dbuf += n; - dlen -= n; - } - - memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen); - - ch->ch_rin += dlen; - - - /* - * If we are not in fastcook mode, or - * if there is a fastcook thread - * waiting for data, send the data to - * the line discipline. - */ - - if ((ch->ch_flag & CH_FAST_READ) == 0 || - ch->ch_inwait != 0) { - dgrp_input(ch); - } - - /* - * If there is a read thread waiting - * in select, and we are in fastcook - * mode, wake him up. - */ - - if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) && - (ch->ch_flag & CH_FAST_READ) != 0) - wake_up_interruptible(&ch->ch_tun.un_tty->read_wait); - - /* - * Wake any thread waiting in the - * fastcook loop. - */ - - if ((ch->ch_flag & CH_INPUT) != 0) { - ch->ch_flag &= ~CH_INPUT; - - wake_up_interruptible(&ch->ch_flag_wait); - } - } - - /* - * Fabricate and insert a data packet header to - * preced the remaining data when it comes in. - */ - - if (remain < plen) { - dlen = plen - remain; - b = buf; - - b[0] = 0x90 + n1; - put_unaligned_be16(dlen, b + 1); - - remain = 3; - goto done; - } break; /* diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c index 0d52de3729c6c1..7a9694c1d9c4f6 100644 --- a/drivers/staging/dgrp/dgrp_tty.c +++ b/drivers/staging/dgrp/dgrp_tty.c @@ -371,7 +371,7 @@ static void drp_param(struct ch_struct *ch) ch->ch_flag |= CH_BAUD0; } } else if (ch->ch_custom_speed) { - ch->ch_brate = PORTSERVER_DIVIDEND / ch->ch_custom_speed ; + ch->ch_brate = PORTSERVER_DIVIDEND / ch->ch_custom_speed; if (ch->ch_flag & CH_BAUD0) { ch->ch_mout |= DM_DTR | DM_RTS; @@ -752,7 +752,7 @@ static int dgrp_tty_open(struct tty_struct *tty, struct file *file) if (ch->ch_open_error != 0 && otype == ch->ch_otype) { retval = (ch->ch_open_error <= 2) ? - delay_error : -ENXIO ; + delay_error : -ENXIO; goto unlock; } diff --git a/drivers/staging/dwc2/TODO b/drivers/staging/dwc2/TODO deleted file mode 100644 index 282470d5531599..00000000000000 --- a/drivers/staging/dwc2/TODO +++ /dev/null @@ -1,33 +0,0 @@ -TODO: - - Dan Carpenter would like to see some cleanups to the microframe - scheduler code: - http://www.mail-archive.com/linux-usb@vger.kernel.org/msg26650.html - - - Should merge the NAK holdoff patch from Raspberry Pi - (http://marc.info/?l=linux-usb&m=137625067103833). But as it stands - that patch is incomplete, it needs more investigation to see if it - can be made to work for non-Raspberry Pi platforms that lack the - special FIQ interrupt that the Pi has. Without this patch, the driver - has a high interrupt rate (8K/sec). - - - The Raspberry Pi platform needs to have support for its FIQ interrupt - added, to get the same level of functionality as the downstream - driver. The raspberrypi.org developers have indicated they are - willing to help with that. - - - Some of the default driver parameters (see 'struct dwc2_core_params' - in core.h) won't work for many platforms. So DT attributes will need - to be added for some of these. But that can be done as-needed as new - platforms are added. - - - Eventually the driver should be merged with the s3c-hsotg peripheral - mode driver, so that both modes of operation can be supported with a - single driver. But I think that can wait till after the driver has - been moved to mainline. - - - After that, OTG support can be added. I'm not sure how much demand - there is for that, though, so I have that as a low priority. - -Please send any patches for this driver to Paul Zimmerman -and Greg Kroah-Hartman . And please CC linux-usb - too. diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README index 8da96a6d2c9279..3befc45fab8ac8 100644 --- a/drivers/staging/et131x/README +++ b/drivers/staging/et131x/README @@ -13,10 +13,6 @@ TODO: - Implement NAPI support - In et131x_tx(), don't return NETDEV_TX_BUSY, just drop the packet with kfree_skb(). - Reduce the number of split lines by careful consideration of variable names etc. - - Do this in et131x.c: - struct fbr_lookup *fbr; - fbr = rx_local->fbr[id]; - Then replace all the instances of "rx_local->fbr[id]" with fbr. Please send patches to: Greg Kroah-Hartman diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c index ab8b29d2cb2609..e516bb69f3b450 100644 --- a/drivers/staging/et131x/et131x.c +++ b/drivers/staging/et131x/et131x.c @@ -54,7 +54,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include #include @@ -813,20 +812,21 @@ static void et131x_rx_dma_enable(struct et131x_adapter *adapter) { /* Setup the receive dma configuration register for normal operation */ u32 csr = ET_RXDMA_CSR_FBR1_ENABLE; + struct rx_ring *rx_ring = &adapter->rx_ring; - if (adapter->rx_ring.fbr[1]->buffsize == 4096) + if (rx_ring->fbr[1]->buffsize == 4096) csr |= ET_RXDMA_CSR_FBR1_SIZE_LO; - else if (adapter->rx_ring.fbr[1]->buffsize == 8192) + else if (rx_ring->fbr[1]->buffsize == 8192) csr |= ET_RXDMA_CSR_FBR1_SIZE_HI; - else if (adapter->rx_ring.fbr[1]->buffsize == 16384) + else if (rx_ring->fbr[1]->buffsize == 16384) csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI; csr |= ET_RXDMA_CSR_FBR0_ENABLE; - if (adapter->rx_ring.fbr[0]->buffsize == 256) + if (rx_ring->fbr[0]->buffsize == 256) csr |= ET_RXDMA_CSR_FBR0_SIZE_LO; - else if (adapter->rx_ring.fbr[0]->buffsize == 512) + else if (rx_ring->fbr[0]->buffsize == 512) csr |= ET_RXDMA_CSR_FBR0_SIZE_HI; - else if (adapter->rx_ring.fbr[0]->buffsize == 1024) + else if (rx_ring->fbr[0]->buffsize == 1024) csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI; writel(csr, &adapter->regs->rxdma.csr); @@ -968,7 +968,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter) /* Set up the if mode bits */ cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK; - if (phydev && phydev->speed == SPEED_1000) { + if (phydev->speed == SPEED_1000) { cfg2 |= ET_MAC_CFG2_IFMODE_1000; /* Phy mode bit */ ifctrl &= ~ET_MAC_IFCTRL_PHYMODE; @@ -999,11 +999,11 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter) cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX; /* Turn on duplex if needed */ - if (phydev && phydev->duplex == DUPLEX_FULL) + if (phydev->duplex == DUPLEX_FULL) cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX; ifctrl &= ~ET_MAC_IFCTRL_GHDMODE; - if (phydev && phydev->duplex == DUPLEX_HALF) + if (phydev->duplex == DUPLEX_HALF) ifctrl |= ET_MAC_IFCTRL_GHDMODE; writel(ifctrl, &mac->if_ctrl); @@ -1039,9 +1039,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter) */ static int et1310_in_phy_coma(struct et131x_adapter *adapter) { - u32 pmcsr; - - pmcsr = readl(&adapter->regs->global.pm_csr); + u32 pmcsr = readl(&adapter->regs->global.pm_csr); return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; } @@ -1351,8 +1349,6 @@ static void et1310_config_macstat_regs(struct et131x_adapter *adapter) * @addr: the address of the transceiver * @reg: the register to read * @value: pointer to a 16-bit value in which the value will be stored - * - * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, u8 reg, u16 *value) @@ -1425,10 +1421,6 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) * @adapter: pointer to our private adapter structure * @reg: the register to read * @value: 16-bit value to write - * - * FIXME: one caller in netdev still - * - * Return 0 on success, errno on failure (as defined in errno.h) */ static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) { @@ -1494,10 +1486,10 @@ static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) return status; } -/* Still used from _mac for BIT_READ */ -static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, - u16 action, u16 regnum, u16 bitnum, - u8 *value) +static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, + u16 regnum, + u16 bitnum, + u8 *value) { u16 reg; u16 mask = 1 << bitnum; @@ -1505,22 +1497,7 @@ static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, /* Read the requested register */ et131x_mii_read(adapter, regnum, ®); - switch (action) { - case TRUEPHY_BIT_READ: - *value = (reg & mask) >> bitnum; - break; - - case TRUEPHY_BIT_SET: - et131x_mii_write(adapter, regnum, reg | mask); - break; - - case TRUEPHY_BIT_CLEAR: - et131x_mii_write(adapter, regnum, reg & ~mask); - break; - - default: - break; - } + *value = (reg & mask) >> bitnum; } static void et1310_config_flow_control(struct et131x_adapter *adapter) @@ -1532,27 +1509,19 @@ static void et1310_config_flow_control(struct et131x_adapter *adapter) } else { char remote_pause, remote_async_pause; - et1310_phy_access_mii_bit(adapter, - TRUEPHY_BIT_READ, 5, 10, &remote_pause); - et1310_phy_access_mii_bit(adapter, - TRUEPHY_BIT_READ, 5, 11, - &remote_async_pause); + et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause); + et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause); - if ((remote_pause == TRUEPHY_BIT_SET) && - (remote_async_pause == TRUEPHY_BIT_SET)) { + if (remote_pause && remote_async_pause) { adapter->flowcontrol = adapter->wanted_flow; - } else if ((remote_pause == TRUEPHY_BIT_SET) && - (remote_async_pause == TRUEPHY_BIT_CLEAR)) { + } else if (remote_pause && !remote_async_pause) { if (adapter->wanted_flow == FLOW_BOTH) adapter->flowcontrol = FLOW_BOTH; else adapter->flowcontrol = FLOW_NONE; - } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && - (remote_async_pause == TRUEPHY_BIT_CLEAR)) { + } else if (!remote_pause && !remote_async_pause) { adapter->flowcontrol = FLOW_NONE; - } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && - * remote_async_pause == TRUEPHY_SET_BIT) - */ + } else { if (adapter->wanted_flow == FLOW_BOTH) adapter->flowcontrol = FLOW_RXONLY; else @@ -1561,9 +1530,7 @@ static void et1310_config_flow_control(struct et131x_adapter *adapter) } } -/* et1310_update_macstat_host_counters - Update the local copy of the statistics - * @adapter: pointer to the adapter structure - */ +/* et1310_update_macstat_host_counters - Update local copy of the statistics */ static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) { struct ce_stats *stats = &adapter->stats; @@ -1589,7 +1556,6 @@ static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) } /* et1310_handle_macstat_interrupt - * @adapter: pointer to the adapter structure * * One of the MACSTAT counters has wrapped. Update the local copy of * the statistics held in the adapter structure, checking the "wrap" @@ -1679,7 +1645,7 @@ static int et131x_mdio_reset(struct mii_bus *bus) return 0; } -/* et1310_phy_power_down - PHY power control +/* et1310_phy_power_switch - PHY power control * @adapter: device to control * @down: true for off/false for back on * @@ -1688,7 +1654,7 @@ static int et131x_mdio_reset(struct mii_bus *bus) * Can't you see that this code processed * Phy power, phy power.. */ -static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) +static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) { u16 data; @@ -1699,10 +1665,7 @@ static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) et131x_mii_write(adapter, MII_BMCR, data); } -/* et131x_xcvr_init - Init the phy if we are setting it into force mode - * @adapter: pointer to our private adapter structure - * - */ +/* et131x_xcvr_init - Init the phy if we are setting it into force mode */ static void et131x_xcvr_init(struct et131x_adapter *adapter) { u16 lcr2; @@ -1731,7 +1694,6 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) } /* et131x_configure_global_regs - configure JAGCore global regs - * @adapter: pointer to our adapter structure * * Used to configure the global registers on the JAGCore */ @@ -1776,9 +1738,7 @@ static void et131x_configure_global_regs(struct et131x_adapter *adapter) writel(0, ®s->watchdog_timer); } -/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence - * @adapter: pointer to our adapter structure - */ +/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) { struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; @@ -1821,6 +1781,7 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) u32 __iomem *min_des; u32 __iomem *base_hi; u32 __iomem *base_lo; + struct fbr_lookup *fbr = rx_local->fbr[id]; if (id == 0) { num_des = &rx_dma->fbr0_num_des; @@ -1837,12 +1798,10 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) } /* Now's the best time to initialize FBR contents */ - fbr_entry = - (struct fbr_desc *) rx_local->fbr[id]->ring_virtaddr; - for (entry = 0; - entry < rx_local->fbr[id]->num_entries; entry++) { - fbr_entry->addr_hi = rx_local->fbr[id]->bus_high[entry]; - fbr_entry->addr_lo = rx_local->fbr[id]->bus_low[entry]; + fbr_entry = fbr->ring_virtaddr; + for (entry = 0; entry < fbr->num_entries; entry++) { + fbr_entry->addr_hi = fbr->bus_high[entry]; + fbr_entry->addr_lo = fbr->bus_low[entry]; fbr_entry->word2 = entry; fbr_entry++; } @@ -1850,19 +1809,16 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) /* Set the address and parameters of Free buffer ring 1 and 0 * into the 1310's registers */ - writel(upper_32_bits(rx_local->fbr[id]->ring_physaddr), - base_hi); - writel(lower_32_bits(rx_local->fbr[id]->ring_physaddr), - base_lo); - writel(rx_local->fbr[id]->num_entries - 1, num_des); + writel(upper_32_bits(fbr->ring_physaddr), base_hi); + writel(lower_32_bits(fbr->ring_physaddr), base_lo); + writel(fbr->num_entries - 1, num_des); writel(ET_DMA10_WRAP, full_offset); /* This variable tracks the free buffer ring 1 full position, * so it has to match the above. */ - rx_local->fbr[id]->local_full = ET_DMA10_WRAP; - writel(((rx_local->fbr[id]->num_entries * - LO_MARK_PERCENT_FOR_RX) / 100) - 1, + fbr->local_full = ET_DMA10_WRAP; + writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, min_des); } @@ -1884,7 +1840,6 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) } /* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. - * @adapter: pointer to our private adapter structure * * Configure the transmit engine with the ring buffers we have created * and prepare it for use. @@ -1892,33 +1847,26 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) { struct txdma_regs __iomem *txdma = &adapter->regs->txdma; + struct tx_ring *tx_ring = &adapter->tx_ring; /* Load the hardware with the start of the transmit descriptor ring. */ - writel(upper_32_bits(adapter->tx_ring.tx_desc_ring_pa), - &txdma->pr_base_hi); - writel(lower_32_bits(adapter->tx_ring.tx_desc_ring_pa), - &txdma->pr_base_lo); + writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); + writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); /* Initialise the transmit DMA engine */ writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); /* Load the completion writeback physical address */ - writel(upper_32_bits(adapter->tx_ring.tx_status_pa), - &txdma->dma_wb_base_hi); - writel(lower_32_bits(adapter->tx_ring.tx_status_pa), - &txdma->dma_wb_base_lo); + writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); + writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); - *adapter->tx_ring.tx_status = 0; + *tx_ring->tx_status = 0; writel(0, &txdma->service_request); - adapter->tx_ring.send_idx = 0; + tx_ring->send_idx = 0; } -/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation - * @adapter: pointer to our private adapter structure - * - * Returns 0 on success, errno on failure (as defined in errno.h) - */ +/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */ static void et131x_adapter_setup(struct et131x_adapter *adapter) { /* Configure the JAGCore */ @@ -1938,13 +1886,11 @@ static void et131x_adapter_setup(struct et131x_adapter *adapter) et1310_config_macstat_regs(adapter); - et1310_phy_power_down(adapter, 0); + et1310_phy_power_switch(adapter, 0); et131x_xcvr_init(adapter); } -/* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 - * @adapter: pointer to our private adapter structure - */ +/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */ static void et131x_soft_reset(struct et131x_adapter *adapter) { u32 reg; @@ -1965,7 +1911,6 @@ static void et131x_soft_reset(struct et131x_adapter *adapter) } /* et131x_enable_interrupts - enable interrupt - * @adapter: et131x device * * Enable the appropriate interrupts on the ET131x according to our * configuration @@ -1976,7 +1921,7 @@ static void et131x_enable_interrupts(struct et131x_adapter *adapter) /* Enable all global interrupts */ if (adapter->flowcontrol == FLOW_TXONLY || - adapter->flowcontrol == FLOW_BOTH) + adapter->flowcontrol == FLOW_BOTH) mask = INT_MASK_ENABLE; else mask = INT_MASK_ENABLE_NO_FLOW; @@ -1985,7 +1930,6 @@ static void et131x_enable_interrupts(struct et131x_adapter *adapter) } /* et131x_disable_interrupts - interrupt disable - * @adapter: et131x device * * Block all interrupts from the et131x device at the device itself */ @@ -1995,9 +1939,7 @@ static void et131x_disable_interrupts(struct et131x_adapter *adapter) writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); } -/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 - * @adapter: pointer to our adapter structure - */ +/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 */ static void et131x_tx_dma_disable(struct et131x_adapter *adapter) { /* Setup the tramsmit dma configuration register */ @@ -2005,9 +1947,7 @@ static void et131x_tx_dma_disable(struct et131x_adapter *adapter) &adapter->regs->txdma.csr); } -/* et131x_enable_txrx - Enable tx/rx queues - * @netdev: device to be enabled - */ +/* et131x_enable_txrx - Enable tx/rx queues */ static void et131x_enable_txrx(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); @@ -2024,9 +1964,7 @@ static void et131x_enable_txrx(struct net_device *netdev) netif_start_queue(netdev); } -/* et131x_disable_txrx - Disable tx/rx queues - * @netdev: device to be disabled - */ +/* et131x_disable_txrx - Disable tx/rx queues */ static void et131x_disable_txrx(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); @@ -2042,18 +1980,12 @@ static void et131x_disable_txrx(struct net_device *netdev) et131x_disable_interrupts(adapter); } -/* et131x_init_send - Initialize send data structures - * @adapter: pointer to our private adapter structure - */ +/* et131x_init_send - Initialize send data structures */ static void et131x_init_send(struct et131x_adapter *adapter) { - struct tcb *tcb; u32 ct; - struct tx_ring *tx_ring; - - /* Setup some convenience pointers */ - tx_ring = &adapter->tx_ring; - tcb = adapter->tx_ring.tcb_ring; + struct tx_ring *tx_ring = &adapter->tx_ring; + struct tcb *tcb = tx_ring->tcb_ring; tx_ring->tcb_qhead = tcb; @@ -2076,7 +2008,6 @@ static void et131x_init_send(struct et131x_adapter *adapter) } /* et1310_enable_phy_coma - called when network cable is unplugged - * @adapter: pointer to our adapter structure * * driver receive an phy status change interrupt while in D0 and check that * phy_status is down. @@ -2104,11 +2035,6 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter) /* Save the GbE PHY speed and duplex modes. Need to restore this * when cable is plugged back in */ - /* TODO - when PM is re-enabled, check if we need to - * perform a similar task as this - - * adapter->pdown_speed = adapter->ai_force_speed; - * adapter->pdown_duplex = adapter->ai_force_duplex; - */ /* Stop sending packets. */ spin_lock_irqsave(&adapter->send_hw_lock, flags); @@ -2128,9 +2054,7 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter) writel(pmcsr, &adapter->regs->global.pm_csr); } -/* et1310_disable_phy_coma - Disable the Phy Coma Mode - * @adapter: pointer to our adapter structure - */ +/* et1310_disable_phy_coma - Disable the Phy Coma Mode */ static void et1310_disable_phy_coma(struct et131x_adapter *adapter) { u32 pmcsr; @@ -2145,11 +2069,6 @@ static void et1310_disable_phy_coma(struct et131x_adapter *adapter) /* Restore the GbE PHY speed and duplex modes; * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY */ - /* TODO - when PM is re-enabled, check if we need to - * perform a similar task as this - - * adapter->ai_force_speed = adapter->pdown_speed; - * adapter->ai_force_duplex = adapter->pdown_duplex; - */ /* Re-initialize the send structures */ et131x_init_send(adapter); @@ -2183,15 +2102,12 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) tmp_free_buff_ring ^= ET_DMA10_WRAP; } /* For the 1023 case */ - tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); + tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP); *free_buff_ring = tmp_free_buff_ring; return tmp_free_buff_ring; } /* et131x_rx_dma_memory_alloc - * @adapter: pointer to our private adapter structure - * - * Returns 0 on success and errno on failure (as defined in errno.h) * * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, * and the Packet Status Ring. @@ -2203,10 +2119,8 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) u32 bufsize; u32 pktstat_ringsize; u32 fbr_chunksize; - struct rx_ring *rx_ring; - - /* Setup some convenience pointers */ - rx_ring = &adapter->rx_ring; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct fbr_lookup *fbr; /* Alloc memory for the lookup table */ rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); @@ -2247,20 +2161,18 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) rx_ring->fbr[1]->num_entries = 128; } - adapter->rx_ring.psr_num_entries = - adapter->rx_ring.fbr[0]->num_entries + - adapter->rx_ring.fbr[1]->num_entries; + rx_ring->psr_num_entries = rx_ring->fbr[0]->num_entries + + rx_ring->fbr[1]->num_entries; for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; /* Allocate an area of memory for Free Buffer Ring */ - bufsize = - (sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries); - rx_ring->fbr[id]->ring_virtaddr = - dma_alloc_coherent(&adapter->pdev->dev, - bufsize, - &rx_ring->fbr[id]->ring_physaddr, - GFP_KERNEL); - if (!rx_ring->fbr[id]->ring_virtaddr) { + bufsize = sizeof(struct fbr_desc) * fbr->num_entries; + fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, + bufsize, + &fbr->ring_physaddr, + GFP_KERNEL); + if (!fbr->ring_virtaddr) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Free Buffer Ring %d\n", id); return -ENOMEM; @@ -2268,25 +2180,25 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) } for (id = 0; id < NUM_FBRS; id++) { - fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[id]->buffsize); + fbr = rx_ring->fbr[id]; + fbr_chunksize = (FBR_CHUNKS * fbr->buffsize); - for (i = 0; - i < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); i++) { + for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) { dma_addr_t fbr_tmp_physaddr; - rx_ring->fbr[id]->mem_virtaddrs[i] = dma_alloc_coherent( + fbr->mem_virtaddrs[i] = dma_alloc_coherent( &adapter->pdev->dev, fbr_chunksize, - &rx_ring->fbr[id]->mem_physaddrs[i], + &fbr->mem_physaddrs[i], GFP_KERNEL); - if (!rx_ring->fbr[id]->mem_virtaddrs[i]) { + if (!fbr->mem_virtaddrs[i]) { dev_err(&adapter->pdev->dev, "Could not alloc memory\n"); return -ENOMEM; } /* See NOTE in "Save Physical Address" comment above */ - fbr_tmp_physaddr = rx_ring->fbr[id]->mem_physaddrs[i]; + fbr_tmp_physaddr = fbr->mem_physaddrs[i]; for (j = 0; j < FBR_CHUNKS; j++) { u32 index = (i * FBR_CHUNKS) + j; @@ -2294,26 +2206,25 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) /* Save the Virtual address of this index for * quick access later */ - rx_ring->fbr[id]->virt[index] = - (u8 *) rx_ring->fbr[id]->mem_virtaddrs[i] + - (j * rx_ring->fbr[id]->buffsize); + fbr->virt[index] = (u8 *)fbr->mem_virtaddrs[i] + + (j * fbr->buffsize); /* now store the physical address in the * descriptor so the device can access it */ - rx_ring->fbr[id]->bus_high[index] = + fbr->bus_high[index] = upper_32_bits(fbr_tmp_physaddr); - rx_ring->fbr[id]->bus_low[index] = + fbr->bus_low[index] = lower_32_bits(fbr_tmp_physaddr); - fbr_tmp_physaddr += rx_ring->fbr[id]->buffsize; + fbr_tmp_physaddr += fbr->buffsize; } } } /* Allocate an area of memory for FIFO of Packet Status ring entries */ pktstat_ringsize = - sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; + sizeof(struct pkt_stat_desc) * rx_ring->psr_num_entries; rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, pktstat_ringsize, @@ -2325,8 +2236,6 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) "Cannot alloc memory for Packet Status Ring\n"); return -ENOMEM; } - pr_info("Packet Status Ring %llx\n", - (unsigned long long) rx_ring->ps_ring_physaddr); /* NOTE : dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses @@ -2345,7 +2254,6 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) return -ENOMEM; } rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; - pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus); /* The RFDs are going to be put on lists later on, so initialize the * lists now. @@ -2354,9 +2262,7 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) return 0; } -/* et131x_rx_dma_memory_free - Free all memory allocated within this module. - * @adapter: pointer to our private adapter structure - */ +/* et131x_rx_dma_memory_free - Free all memory allocated within this module */ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) { u8 id; @@ -2364,17 +2270,15 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) u32 bufsize; u32 pktstat_ringsize; struct rfd *rfd; - struct rx_ring *rx_ring; - - /* Setup some convenience pointers */ - rx_ring = &adapter->rx_ring; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct fbr_lookup *fbr; /* Free RFDs and associated packet descriptors */ WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); while (!list_empty(&rx_ring->recv_list)) { - rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, - struct rfd, list_node); + rfd = list_entry(rx_ring->recv_list.next, + struct rfd, list_node); list_del(&rfd->list_node); rfd->skb = NULL; @@ -2383,40 +2287,41 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) /* Free Free Buffer Rings */ for (id = 0; id < NUM_FBRS; id++) { - if (!rx_ring->fbr[id]->ring_virtaddr) + fbr = rx_ring->fbr[id]; + + if (!fbr->ring_virtaddr) continue; /* First the packet memory */ for (index = 0; - index < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); + index < fbr->num_entries / FBR_CHUNKS; index++) { - if (rx_ring->fbr[id]->mem_virtaddrs[index]) { - bufsize = - rx_ring->fbr[id]->buffsize * FBR_CHUNKS; + if (fbr->mem_virtaddrs[index]) { + bufsize = fbr->buffsize * FBR_CHUNKS; dma_free_coherent(&adapter->pdev->dev, - bufsize, - rx_ring->fbr[id]->mem_virtaddrs[index], - rx_ring->fbr[id]->mem_physaddrs[index]); + bufsize, + fbr->mem_virtaddrs[index], + fbr->mem_physaddrs[index]); - rx_ring->fbr[id]->mem_virtaddrs[index] = NULL; + fbr->mem_virtaddrs[index] = NULL; } } - bufsize = - sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries; + bufsize = sizeof(struct fbr_desc) * fbr->num_entries; - dma_free_coherent(&adapter->pdev->dev, bufsize, - rx_ring->fbr[id]->ring_virtaddr, - rx_ring->fbr[id]->ring_physaddr); + dma_free_coherent(&adapter->pdev->dev, + bufsize, + fbr->ring_virtaddr, + fbr->ring_physaddr); - rx_ring->fbr[id]->ring_virtaddr = NULL; + fbr->ring_virtaddr = NULL; } /* Free Packet Status Ring */ if (rx_ring->ps_ring_virtaddr) { pktstat_ringsize = sizeof(struct pkt_stat_desc) * - adapter->rx_ring.psr_num_entries; + rx_ring->psr_num_entries; dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, rx_ring->ps_ring_virtaddr, @@ -2441,20 +2346,12 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) rx_ring->num_ready_recv = 0; } -/* et131x_init_recv - Initialize receive data structures. - * @adapter: pointer to our private adapter structure - * - * Returns 0 on success and errno on failure (as defined in errno.h) - */ +/* et131x_init_recv - Initialize receive data structures */ static int et131x_init_recv(struct et131x_adapter *adapter) { struct rfd *rfd; u32 rfdct; - u32 numrfd = 0; - struct rx_ring *rx_ring; - - /* Setup some convenience pointers */ - rx_ring = &adapter->rx_ring; + struct rx_ring *rx_ring = &adapter->rx_ring; /* Setup each RFD */ for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { @@ -2467,24 +2364,18 @@ static int et131x_init_recv(struct et131x_adapter *adapter) /* Add this RFD to the recv_list */ list_add_tail(&rfd->list_node, &rx_ring->recv_list); - /* Increment both the available RFD's, and the total RFD's. */ + /* Increment the available RFD's */ rx_ring->num_ready_recv++; - numrfd++; } return 0; } -/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. - * @adapter: pointer to our adapter structure - */ +/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) { struct phy_device *phydev = adapter->phydev; - if (!phydev) - return; - /* For version B silicon, we do not use the RxDMA timer for 10 and 100 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. */ @@ -2505,11 +2396,13 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) u16 buff_index = rfd->bufferindex; u8 ring_index = rfd->ringindex; unsigned long flags; + struct fbr_lookup *fbr = rx_local->fbr[ring_index]; /* We don't use any of the OOB data besides status. Otherwise, we * need to clean up OOB data */ - if (buff_index < rx_local->fbr[ring_index]->num_entries) { + if (buff_index < fbr->num_entries) { + u32 free_buff_ring; u32 __iomem *offset; struct fbr_desc *next; @@ -2520,22 +2413,20 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) else offset = &rx_dma->fbr1_full_offset; - next = (struct fbr_desc *) - (rx_local->fbr[ring_index]->ring_virtaddr) + - INDEX10(rx_local->fbr[ring_index]->local_full); + next = (struct fbr_desc *)(fbr->ring_virtaddr) + + INDEX10(fbr->local_full); /* Handle the Free Buffer Ring advancement here. Write * the PA / Buffer Index for the returned buffer into * the oldest (next to be freed)FBR entry */ - next->addr_hi = rx_local->fbr[ring_index]->bus_high[buff_index]; - next->addr_lo = rx_local->fbr[ring_index]->bus_low[buff_index]; + next->addr_hi = fbr->bus_high[buff_index]; + next->addr_lo = fbr->bus_low[buff_index]; next->word2 = buff_index; - writel(bump_free_buff_ring( - &rx_local->fbr[ring_index]->local_full, - rx_local->fbr[ring_index]->num_entries - 1), - offset); + free_buff_ring = bump_free_buff_ring(&fbr->local_full, + fbr->num_entries - 1); + writel(free_buff_ring, offset); spin_unlock_irqrestore(&adapter->fbr_lock, flags); } else { @@ -2555,7 +2446,6 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) } /* nic_rx_pkts - Checks the hardware for available packets - * @adapter: pointer to our adapter * * Returns rfd, a pointer to our MPRFD. * @@ -2580,6 +2470,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) u32 word0; u32 word1; struct sk_buff *skb; + struct fbr_lookup *fbr; /* RX Status block is written by the DMA engine prior to every * interrupt. It contains the next to be used entry in the Packet @@ -2601,6 +2492,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) */ len = psr->word1 & 0xFFFF; ring_index = (psr->word1 >> 26) & 0x03; + fbr = rx_local->fbr[ring_index]; buff_index = (psr->word1 >> 16) & 0x3FF; word0 = psr->word0; @@ -2616,8 +2508,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); - if (ring_index > 1 || - buff_index > rx_local->fbr[ring_index]->num_entries - 1) { + if (ring_index > 1 || buff_index > fbr->num_entries - 1) { /* Illegal buffer or ring index cannot be used by S/W*/ dev_err(&adapter->pdev->dev, "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n", @@ -2629,7 +2520,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) spin_lock_irqsave(&adapter->rcv_lock, flags); element = rx_local->recv_list.next; - rfd = (struct rfd *) list_entry(element, struct rfd, list_node); + rfd = list_entry(element, struct rfd, list_node); if (!rfd) { spin_unlock_irqrestore(&adapter->rcv_lock, flags); @@ -2670,7 +2561,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS) && !(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { - buf = rx_local->fbr[ring_index]->virt[buff_index]; + buf = fbr->virt[buff_index]; /* Loop through our list to see if the destination * address of this packet matches one in our list. @@ -2708,7 +2599,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) adapter->stats.unicast_pkts_rcvd++; } - if (len == 0) { + if (!len) { rfd->len = 0; goto out; } @@ -2723,9 +2614,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) adapter->net_stats.rx_bytes += rfd->len; - memcpy(skb_put(skb, rfd->len), - rx_local->fbr[ring_index]->virt[buff_index], - rfd->len); + memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len); skb->protocol = eth_type_trans(skb, adapter->netdev); skb->ip_summed = CHECKSUM_NONE; @@ -2737,7 +2626,6 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) } /* et131x_handle_recv_interrupt - Interrupt handler for receive processing - * @adapter: pointer to our adapter * * Assumption, Rcv spinlock has been acquired. */ @@ -2746,11 +2634,12 @@ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) struct rfd *rfd = NULL; u32 count = 0; bool done = true; + struct rx_ring *rx_ring = &adapter->rx_ring; /* Process up to available RFD's */ while (count < NUM_PACKETS_HANDLED) { - if (list_empty(&adapter->rx_ring.recv_list)) { - WARN_ON(adapter->rx_ring.num_ready_recv != 0); + if (list_empty(&rx_ring->recv_list)) { + WARN_ON(rx_ring->num_ready_recv != 0); done = false; break; } @@ -2774,25 +2663,22 @@ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) adapter->net_stats.rx_packets++; /* Set the status on the packet, either resources or success */ - if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) + if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK) dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); count++; } if (count == NUM_PACKETS_HANDLED || !done) { - adapter->rx_ring.unfinished_receives = true; + rx_ring->unfinished_receives = true; writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, &adapter->regs->global.watchdog_timer); } else /* Watchdog timer will disable itself if appropriate. */ - adapter->rx_ring.unfinished_receives = false; + rx_ring->unfinished_receives = false; } /* et131x_tx_dma_memory_alloc - * @adapter: pointer to our private adapter structure - * - * Returns 0 on success and errno on failure (as defined in errno.h). * * Allocates memory that will be visible both to the device and to the CPU. * The OS will pass us packets, pointers to which we will insert in the Tx @@ -2806,18 +2692,17 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) struct tx_ring *tx_ring = &adapter->tx_ring; /* Allocate memory for the TCB's (Transmit Control Block) */ - adapter->tx_ring.tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), - GFP_ATOMIC | GFP_DMA); - if (!adapter->tx_ring.tcb_ring) + tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), + GFP_ATOMIC | GFP_DMA); + if (!tx_ring->tcb_ring) return -ENOMEM; desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); - tx_ring->tx_desc_ring = - (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, - desc_size, - &tx_ring->tx_desc_ring_pa, - GFP_KERNEL); - if (!adapter->tx_ring.tx_desc_ring) { + tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev, + desc_size, + &tx_ring->tx_desc_ring_pa, + GFP_KERNEL); + if (!tx_ring->tx_desc_ring) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n"); return -ENOMEM; @@ -2835,51 +2720,46 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) sizeof(u32), &tx_ring->tx_status_pa, GFP_KERNEL); - if (!adapter->tx_ring.tx_status_pa) { + if (!tx_ring->tx_status_pa) { dev_err(&adapter->pdev->dev, - "Cannot alloc memory for Tx status block\n"); + "Cannot alloc memory for Tx status block\n"); return -ENOMEM; } return 0; } -/* et131x_tx_dma_memory_free - Free all memory allocated within this module - * @adapter: pointer to our private adapter structure - * - * Returns 0 on success and errno on failure (as defined in errno.h). - */ +/* et131x_tx_dma_memory_free - Free all memory allocated within this module */ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) { int desc_size = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; - if (adapter->tx_ring.tx_desc_ring) { + if (tx_ring->tx_desc_ring) { /* Free memory relating to Tx rings here */ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); dma_free_coherent(&adapter->pdev->dev, - desc_size, - adapter->tx_ring.tx_desc_ring, - adapter->tx_ring.tx_desc_ring_pa); - adapter->tx_ring.tx_desc_ring = NULL; + desc_size, + tx_ring->tx_desc_ring, + tx_ring->tx_desc_ring_pa); + tx_ring->tx_desc_ring = NULL; } /* Free memory for the Tx status block */ - if (adapter->tx_ring.tx_status) { + if (tx_ring->tx_status) { dma_free_coherent(&adapter->pdev->dev, - sizeof(u32), - adapter->tx_ring.tx_status, - adapter->tx_ring.tx_status_pa); + sizeof(u32), + tx_ring->tx_status, + tx_ring->tx_status_pa); - adapter->tx_ring.tx_status = NULL; + tx_ring->tx_status = NULL; } /* Free the memory for the tcb structures */ - kfree(adapter->tx_ring.tcb_ring); + kfree(tx_ring->tcb_ring); } /* nic_send_packet - NIC specific send handler for version B silicon. * @adapter: pointer to our adapter * @tcb: pointer to struct tcb - * - * Returns 0 or errno. */ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) { @@ -2893,6 +2773,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) unsigned long flags; struct phy_device *phydev = adapter->phydev; dma_addr_t dma_addr; + struct tx_ring *tx_ring = &adapter->tx_ring; /* Part of the optimizations of this send routine restrict us to * sending 24 fragments at a pass. In practice we should never see @@ -2968,11 +2849,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) } if (phydev && phydev->speed == SPEED_1000) { - if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { + if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) { /* Last element & Interrupt flag */ desc[frag - 1].flags = TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; - adapter->tx_ring.since_irq = 0; + tx_ring->since_irq = 0; } else { /* Last element */ desc[frag - 1].flags = TXDESC_FLAG_LASTPKT; } @@ -2982,12 +2863,12 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) desc[0].flags |= TXDESC_FLAG_FIRSTPKT; - tcb->index_start = adapter->tx_ring.send_idx; + tcb->index_start = tx_ring->send_idx; tcb->stale = 0; spin_lock_irqsave(&adapter->send_hw_lock, flags); - thiscopy = NUM_DESC_PER_RING_TX - INDEX10(adapter->tx_ring.send_idx); + thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx); if (thiscopy >= frag) { remainder = 0; @@ -2996,52 +2877,51 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) remainder = frag - thiscopy; } - memcpy(adapter->tx_ring.tx_desc_ring + - INDEX10(adapter->tx_ring.send_idx), desc, + memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx), + desc, sizeof(struct tx_desc) * thiscopy); - add_10bit(&adapter->tx_ring.send_idx, thiscopy); + add_10bit(&tx_ring->send_idx, thiscopy); - if (INDEX10(adapter->tx_ring.send_idx) == 0 || - INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { - adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; - adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; + if (INDEX10(tx_ring->send_idx) == 0 || + INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) { + tx_ring->send_idx &= ~ET_DMA10_MASK; + tx_ring->send_idx ^= ET_DMA10_WRAP; } if (remainder) { - memcpy(adapter->tx_ring.tx_desc_ring, + memcpy(tx_ring->tx_desc_ring, desc + thiscopy, sizeof(struct tx_desc) * remainder); - add_10bit(&adapter->tx_ring.send_idx, remainder); + add_10bit(&tx_ring->send_idx, remainder); } - if (INDEX10(adapter->tx_ring.send_idx) == 0) { - if (adapter->tx_ring.send_idx) + if (INDEX10(tx_ring->send_idx) == 0) { + if (tx_ring->send_idx) tcb->index = NUM_DESC_PER_RING_TX - 1; else tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); } else - tcb->index = adapter->tx_ring.send_idx - 1; + tcb->index = tx_ring->send_idx - 1; spin_lock(&adapter->tcb_send_qlock); - if (adapter->tx_ring.send_tail) - adapter->tx_ring.send_tail->next = tcb; + if (tx_ring->send_tail) + tx_ring->send_tail->next = tcb; else - adapter->tx_ring.send_head = tcb; + tx_ring->send_head = tcb; - adapter->tx_ring.send_tail = tcb; + tx_ring->send_tail = tcb; WARN_ON(tcb->next != NULL); - adapter->tx_ring.used++; + tx_ring->used++; spin_unlock(&adapter->tcb_send_qlock); /* Write the new write pointer back to the device. */ - writel(adapter->tx_ring.send_idx, - &adapter->regs->txdma.service_request); + writel(tx_ring->send_idx, &adapter->regs->txdma.service_request); /* For Gig only, we use Tx Interrupt coalescing. Enable the software * timer to wake us up if this packet isn't followed by N more. @@ -3056,19 +2936,16 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) } /* send_packet - Do the work to send a packet - * @skb: the packet(s) to send - * @adapter: a pointer to the device's private adapter structure - * - * Return 0 in almost all cases; non-zero value in extreme hard failure only. * * Assumption: Send spinlock has been acquired */ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) { int status; - struct tcb *tcb = NULL; + struct tcb *tcb; u16 *shbufva; unsigned long flags; + struct tx_ring *tx_ring = &adapter->tx_ring; /* All packets must have at least a MAC address and a protocol type */ if (skb->len < ETH_HLEN) @@ -3077,17 +2954,17 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) /* Get a TCB for this packet */ spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); - tcb = adapter->tx_ring.tcb_qhead; + tcb = tx_ring->tcb_qhead; if (tcb == NULL) { spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); return -ENOMEM; } - adapter->tx_ring.tcb_qhead = tcb->next; + tx_ring->tcb_qhead = tcb->next; - if (adapter->tx_ring.tcb_qhead == NULL) - adapter->tx_ring.tcb_qtail = NULL; + if (tx_ring->tcb_qhead == NULL) + tx_ring->tcb_qtail = NULL; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); @@ -3111,30 +2988,26 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) if (status != 0) { spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); - if (adapter->tx_ring.tcb_qtail) - adapter->tx_ring.tcb_qtail->next = tcb; + if (tx_ring->tcb_qtail) + tx_ring->tcb_qtail->next = tcb; else /* Apparently ready Q is empty. */ - adapter->tx_ring.tcb_qhead = tcb; + tx_ring->tcb_qhead = tcb; - adapter->tx_ring.tcb_qtail = tcb; + tx_ring->tcb_qtail = tcb; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); return status; } - WARN_ON(adapter->tx_ring.used > NUM_TCB); + WARN_ON(tx_ring->used > NUM_TCB); return 0; } -/* et131x_send_packets - This function is called by the OS to send packets - * @skb: the packet(s) to send - * @netdev:device on which to TX the above packet(s) - * - * Return 0 in almost all cases; non-zero value in extreme hard failure only - */ +/* et131x_send_packets - This function is called by the OS to send packets */ static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) { int status = 0; struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; /* Send these packets * @@ -3143,7 +3016,7 @@ static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) */ /* TCB is not available */ - if (adapter->tx_ring.used >= NUM_TCB) { + if (tx_ring->used >= NUM_TCB) { /* NOTE: If there's an error on send, no need to queue the * packet under Linux; if we just send an error up to the * netif layer, it will resend the skb to us. @@ -3187,6 +3060,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter, unsigned long flags; struct tx_desc *desc = NULL; struct net_device_stats *stats = &adapter->net_stats; + struct tx_ring *tx_ring = &adapter->tx_ring; u64 dma_addr; if (tcb->flags & FMP_DEST_BROAD) @@ -3204,9 +3078,8 @@ static inline void free_send_packet(struct et131x_adapter *adapter, * they point to */ do { - desc = (struct tx_desc *) - (adapter->tx_ring.tx_desc_ring + - INDEX10(tcb->index_start)); + desc = tx_ring->tx_desc_ring + + INDEX10(tcb->index_start); dma_addr = desc->addr_lo; dma_addr |= (u64)desc->addr_hi << 32; @@ -3221,8 +3094,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter, tcb->index_start &= ~ET_DMA10_MASK; tcb->index_start ^= ET_DMA10_WRAP; } - } while (desc != (adapter->tx_ring.tx_desc_ring + - INDEX10(tcb->index))); + } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index)); dev_kfree_skb_any(tcb->skb); } @@ -3234,20 +3106,19 @@ static inline void free_send_packet(struct et131x_adapter *adapter, adapter->net_stats.tx_packets++; - if (adapter->tx_ring.tcb_qtail) - adapter->tx_ring.tcb_qtail->next = tcb; + if (tx_ring->tcb_qtail) + tx_ring->tcb_qtail->next = tcb; else /* Apparently ready Q is empty. */ - adapter->tx_ring.tcb_qhead = tcb; + tx_ring->tcb_qhead = tcb; - adapter->tx_ring.tcb_qtail = tcb; + tx_ring->tcb_qtail = tcb; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); - WARN_ON(adapter->tx_ring.used < 0); + WARN_ON(tx_ring->used < 0); } /* et131x_free_busy_send_packets - Free and complete the stopped active sends - * @adapter: pointer to our adapter * * Assumption - Send spinlock has been acquired */ @@ -3256,21 +3127,22 @@ static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) struct tcb *tcb; unsigned long flags; u32 freed = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; /* Any packets being sent? Check the first TCB on the send list */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); - tcb = adapter->tx_ring.send_head; + tcb = tx_ring->send_head; while (tcb != NULL && freed < NUM_TCB) { struct tcb *next = tcb->next; - adapter->tx_ring.send_head = next; + tx_ring->send_head = next; if (next == NULL) - adapter->tx_ring.send_tail = NULL; + tx_ring->send_tail = NULL; - adapter->tx_ring.used--; + tx_ring->used--; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); @@ -3279,18 +3151,17 @@ static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) spin_lock_irqsave(&adapter->tcb_send_qlock, flags); - tcb = adapter->tx_ring.send_head; + tcb = tx_ring->send_head; } WARN_ON(freed == NUM_TCB); spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); - adapter->tx_ring.used = 0; + tx_ring->used = 0; } /* et131x_handle_send_interrupt - Interrupt handler for sending processing - * @adapter: pointer to our adapter * * Re-claim the send resources, complete sends and get more to send from * the send wait queue. @@ -3303,6 +3174,7 @@ static void et131x_handle_send_interrupt(struct et131x_adapter *adapter) u32 serviced; struct tcb *tcb; u32 index; + struct tx_ring *tx_ring = &adapter->tx_ring; serviced = readl(&adapter->regs->txdma.new_service_complete); index = INDEX10(serviced); @@ -3312,41 +3184,41 @@ static void et131x_handle_send_interrupt(struct et131x_adapter *adapter) */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); - tcb = adapter->tx_ring.send_head; + tcb = tx_ring->send_head; while (tcb && ((serviced ^ tcb->index) & ET_DMA10_WRAP) && index < INDEX10(tcb->index)) { - adapter->tx_ring.used--; - adapter->tx_ring.send_head = tcb->next; + tx_ring->used--; + tx_ring->send_head = tcb->next; if (tcb->next == NULL) - adapter->tx_ring.send_tail = NULL; + tx_ring->send_tail = NULL; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); free_send_packet(adapter, tcb); spin_lock_irqsave(&adapter->tcb_send_qlock, flags); /* Goto the next packet */ - tcb = adapter->tx_ring.send_head; + tcb = tx_ring->send_head; } while (tcb && !((serviced ^ tcb->index) & ET_DMA10_WRAP) && index > (tcb->index & ET_DMA10_MASK)) { - adapter->tx_ring.used--; - adapter->tx_ring.send_head = tcb->next; + tx_ring->used--; + tx_ring->send_head = tcb->next; if (tcb->next == NULL) - adapter->tx_ring.send_tail = NULL; + tx_ring->send_tail = NULL; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); free_send_packet(adapter, tcb); spin_lock_irqsave(&adapter->tcb_send_qlock, flags); /* Goto the next packet */ - tcb = adapter->tx_ring.send_head; + tcb = tx_ring->send_head; } /* Wake up the queue when we hit a low-water mark */ - if (adapter->tx_ring.used <= NUM_TCB / 3) + if (tx_ring->used <= NUM_TCB / 3) netif_wake_queue(adapter->netdev); spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); @@ -3548,9 +3420,7 @@ static struct ethtool_ops et131x_ethtool_ops = { .get_link = ethtool_op_get_link, }; -/* et131x_hwaddr_init - set up the MAC Address on the ET1310 - * @adapter: pointer to our private adapter structure - */ +/* et131x_hwaddr_init - set up the MAC Address on the ET1310 */ static void et131x_hwaddr_init(struct et131x_adapter *adapter) { /* If have our default mac from init and no mac address from @@ -3580,14 +3450,12 @@ static void et131x_hwaddr_init(struct et131x_adapter *adapter) } /* et131x_pci_init - initial PCI setup - * @adapter: pointer to our private adapter structure - * @pdev: our PCI device * * Perform the initial setup of PCI registers and if possible initialise * the MAC address. At this point the I/O registers have yet to be mapped */ static int et131x_pci_init(struct et131x_adapter *adapter, - struct pci_dev *pdev) + struct pci_dev *pdev) { u16 max_payload; int i, rc; @@ -3704,21 +3572,14 @@ static void et131x_error_timer_handler(unsigned long data) mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000); } -/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx - * @adapter: pointer to our private adapter structure - */ +/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx */ static void et131x_adapter_memory_free(struct et131x_adapter *adapter) { - /* Free DMA memory */ et131x_tx_dma_memory_free(adapter); et131x_rx_dma_memory_free(adapter); } /* et131x_adapter_memory_alloc - * @adapter: pointer to our private adapter structure - * - * Returns 0 on success, errno on failure (as defined in errno.h). - * * Allocate all the memory blocks for send, receive and others. */ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) @@ -3727,14 +3588,14 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) /* Allocate memory for the Tx Ring */ status = et131x_tx_dma_memory_alloc(adapter); - if (status != 0) { + if (status) { dev_err(&adapter->pdev->dev, "et131x_tx_dma_memory_alloc FAILED\n"); return status; } /* Receive buffer memory allocation */ status = et131x_rx_dma_memory_alloc(adapter); - if (status != 0) { + if (status) { dev_err(&adapter->pdev->dev, "et131x_rx_dma_memory_alloc FAILED\n"); et131x_tx_dma_memory_free(adapter); @@ -3744,8 +3605,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) /* Init receive data structures */ status = et131x_init_recv(adapter); if (status) { - dev_err(&adapter->pdev->dev, - "et131x_init_recv FAILED\n"); + dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); et131x_adapter_memory_free(adapter); } return status; @@ -3756,97 +3616,89 @@ static void et131x_adjust_link(struct net_device *netdev) struct et131x_adapter *adapter = netdev_priv(netdev); struct phy_device *phydev = adapter->phydev; - if (phydev && phydev->link != adapter->link) { - /* Check to see if we are in coma mode and if - * so, disable it because we will not be able - * to read PHY values until we are out. - */ - if (et1310_in_phy_coma(adapter)) - et1310_disable_phy_coma(adapter); - - adapter->link = phydev->link; - phy_print_status(phydev); - - if (phydev->link) { - adapter->boot_coma = 20; - if (phydev && phydev->speed == SPEED_10) { - /* NOTE - Is there a way to query this without - * TruePHY? - * && TRU_QueryCoreType(adapter->hTruePhy, 0)== - * EMI_TRUEPHY_A13O) { - */ - u16 register18; - - et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, - ®ister18); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18 | 0x4); - et131x_mii_write(adapter, PHY_INDEX_REG, - register18 | 0x8402); - et131x_mii_write(adapter, PHY_DATA_REG, - register18 | 511); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18); - } + if (!phydev) + return; + if (phydev->link == adapter->link) + return; - et1310_config_flow_control(adapter); + /* Check to see if we are in coma mode and if + * so, disable it because we will not be able + * to read PHY values until we are out. + */ + if (et1310_in_phy_coma(adapter)) + et1310_disable_phy_coma(adapter); - if (phydev && phydev->speed == SPEED_1000 && - adapter->registry_jumbo_packet > 2048) { - u16 reg; + adapter->link = phydev->link; + phy_print_status(phydev); - et131x_mii_read(adapter, PHY_CONFIG, ®); - reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; - reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; - et131x_mii_write(adapter, PHY_CONFIG, reg); - } + if (phydev->link) { + adapter->boot_coma = 20; + if (phydev->speed == SPEED_10) { + u16 register18; + + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, + ®ister18); + et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, PHY_INDEX_REG, + register18 | 0x8402); + et131x_mii_write(adapter, PHY_DATA_REG, + register18 | 511); + et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, + register18); + } - et131x_set_rx_dma_timer(adapter); - et1310_config_mac_regs2(adapter); - } else { - adapter->boot_coma = 0; + et1310_config_flow_control(adapter); - if (phydev->speed == SPEED_10) { - /* NOTE - Is there a way to query this without - * TruePHY? - * && TRU_QueryCoreType(adapter->hTruePhy, 0) == - * EMI_TRUEPHY_A13O) - */ - u16 register18; - - et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, - ®ister18); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18 | 0x4); - et131x_mii_write(adapter, PHY_INDEX_REG, - register18 | 0x8402); - et131x_mii_write(adapter, PHY_DATA_REG, - register18 | 511); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18); - } + if (phydev->speed == SPEED_1000 && + adapter->registry_jumbo_packet > 2048) { + u16 reg; - /* Free the packets being actively sent & stopped */ - et131x_free_busy_send_packets(adapter); + et131x_mii_read(adapter, PHY_CONFIG, ®); + reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; + reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; + et131x_mii_write(adapter, PHY_CONFIG, reg); + } - /* Re-initialize the send structures */ - et131x_init_send(adapter); + et131x_set_rx_dma_timer(adapter); + et1310_config_mac_regs2(adapter); + } else { + adapter->boot_coma = 0; + + if (phydev->speed == SPEED_10) { + u16 register18; + + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, + ®ister18); + et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, PHY_INDEX_REG, + register18 | 0x8402); + et131x_mii_write(adapter, PHY_DATA_REG, + register18 | 511); + et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, + register18); + } - /* Bring the device back to the state it was during - * init prior to autonegotiation being complete. This - * way, when we get the auto-neg complete interrupt, - * we can complete init by calling config_mac_regs2. - */ - et131x_soft_reset(adapter); + /* Free the packets being actively sent & stopped */ + et131x_free_busy_send_packets(adapter); - /* Setup ET1310 as per the documentation */ - et131x_adapter_setup(adapter); + /* Re-initialize the send structures */ + et131x_init_send(adapter); - /* perform reset of tx/rx */ - et131x_disable_txrx(netdev); - et131x_enable_txrx(netdev); - } + /* Bring the device back to the state it was during + * init prior to autonegotiation being complete. This + * way, when we get the auto-neg complete interrupt, + * we can complete init by calling config_mac_regs2. + */ + et131x_soft_reset(adapter); + + /* Setup ET1310 as per the documentation */ + et131x_adapter_setup(adapter); + /* perform reset of tx/rx */ + et131x_disable_txrx(netdev); + et131x_enable_txrx(netdev); } } @@ -3883,21 +3735,20 @@ static int et131x_mii_probe(struct net_device *netdev) phydev->advertising = phydev->supported; adapter->phydev = phydev; - dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + dev_info(&adapter->pdev->dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", phydev->drv->name, dev_name(&phydev->dev)); return 0; } /* et131x_adapter_init - * @adapter: pointer to the private adapter struct - * @pdev: pointer to the PCI device * * Initialize the data structures for the et131x_adapter object and link * them together with the platform provided device structures. */ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, - struct pci_dev *pdev) + struct pci_dev *pdev) { static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; @@ -3925,7 +3776,6 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, } /* et131x_pci_remove - * @pdev: a pointer to the device's pci_dev structure * * Registered in the pci_driver structure, this function is called when the * PCI subsystem detects that a PCI device which matches the information @@ -3952,9 +3802,7 @@ static void et131x_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -/* et131x_up - Bring up a device for use. - * @netdev: device to be opened - */ +/* et131x_up - Bring up a device for use. */ static void et131x_up(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); @@ -3963,9 +3811,7 @@ static void et131x_up(struct net_device *netdev) phy_start(adapter->phydev); } -/* et131x_down - Bring down the device - * @netdev: device to be brought down - */ +/* et131x_down - Bring down the device */ static void et131x_down(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); @@ -4022,7 +3868,9 @@ static irqreturn_t et131x_isr(int irq, void *dev_id) { bool handled = true; struct net_device *netdev = (struct net_device *)dev_id; - struct et131x_adapter *adapter = NULL; + struct et131x_adapter *adapter = netdev_priv(netdev); + struct rx_ring *rx_ring = &adapter->rx_ring; + struct tx_ring *tx_ring = &adapter->tx_ring; u32 status; if (!netif_device_present(netdev)) { @@ -4030,8 +3878,6 @@ static irqreturn_t et131x_isr(int irq, void *dev_id) goto out; } - adapter = netdev_priv(netdev); - /* If the adapter is in low power state, then it should not * recognize any interrupt */ @@ -4061,13 +3907,13 @@ static irqreturn_t et131x_isr(int irq, void *dev_id) /* This is our interrupt, so process accordingly */ if (status & ET_INTR_WATCHDOG) { - struct tcb *tcb = adapter->tx_ring.send_head; + struct tcb *tcb = tx_ring->send_head; if (tcb) if (++tcb->stale > 1) status |= ET_INTR_TXDMA_ISR; - if (adapter->rx_ring.unfinished_receives) + if (rx_ring->unfinished_receives) status |= ET_INTR_RXDMA_XFR_DONE; else if (tcb == NULL) writel(0, &adapter->regs->global.watchdog_timer); @@ -4075,7 +3921,7 @@ static irqreturn_t et131x_isr(int irq, void *dev_id) status &= ~ET_INTR_WATCHDOG; } - if (status == 0) { + if (!status) { /* This interrupt has in some way been "handled" by * the ISR. Either it was a spurious Rx interrupt, or * it was a Tx interrupt that has been filtered by @@ -4101,7 +3947,6 @@ static irqreturn_t et131x_isr(int irq, void *dev_id) } /* et131x_isr_handler - The ISR handler - * @p_adapter, a pointer to the device's private adapter structure * * scheduled to run in a deferred context by the ISR. This is where the ISR's * work actually gets done. @@ -4125,17 +3970,15 @@ static void et131x_isr_handler(struct work_struct *work) if (status & ET_INTR_RXDMA_XFR_DONE) et131x_handle_recv_interrupt(adapter); - status &= 0xffffffd7; + status &= ~(ET_INTR_TXDMA_ERR | ET_INTR_RXDMA_XFR_DONE); if (!status) goto out; /* Handle the TXDMA Error interrupt */ if (status & ET_INTR_TXDMA_ERR) { - u32 txdma_err; - /* Following read also clears the register (COR) */ - txdma_err = readl(&iomem->txdma.tx_dma_error); + u32 txdma_err = readl(&iomem->txdma.tx_dma_error); dev_warn(&adapter->pdev->dev, "TXDMA_ERR interrupt, error = %d\n", @@ -4281,11 +4124,7 @@ static void et131x_isr_handler(struct work_struct *work) et131x_enable_interrupts(adapter); } -/* et131x_stats - Return the current device statistics. - * @netdev: device whose stats are being queried - * - * Returns 0 on success, errno on failure (as defined in errno.h) - */ +/* et131x_stats - Return the current device statistics */ static struct net_device_stats *et131x_stats(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); @@ -4327,11 +4166,7 @@ static struct net_device_stats *et131x_stats(struct net_device *netdev) return stats; } -/* et131x_open - Open the device for use. - * @netdev: device to be opened - * - * Returns 0 on success, errno on failure (as defined in errno.h) - */ +/* et131x_open - Open the device for use. */ static int et131x_open(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); @@ -4360,11 +4195,7 @@ static int et131x_open(struct net_device *netdev) return result; } -/* et131x_close - Close the device - * @netdev: device to be closed - * - * Returns 0 on success, errno on failure (as defined in errno.h) - */ +/* et131x_close - Close the device */ static int et131x_close(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); @@ -4382,8 +4213,6 @@ static int et131x_close(struct net_device *netdev) * @netdev: device on which the control request is being made * @reqbuf: a pointer to the IOCTL request buffer * @cmd: the IOCTL command code - * - * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) @@ -4400,8 +4229,6 @@ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, * @adapter: pointer to our private adapter structure * * FIXME: lot of dups with MAC code - * - * Returns 0 on success, errno on failure */ static int et131x_set_packet_filter(struct et131x_adapter *adapter) { @@ -4460,9 +4287,7 @@ static int et131x_set_packet_filter(struct et131x_adapter *adapter) return status; } -/* et131x_multicast - The handler to configure multicasting on the interface - * @netdev: a pointer to a net_device struct representing the device - */ +/* et131x_multicast - The handler to configure multicasting on the interface */ static void et131x_multicast(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); @@ -4522,27 +4347,21 @@ static void et131x_multicast(struct net_device *netdev) * NOTE - This block will always update the multicast_list with the * hardware, even if the addresses aren't the same. */ - if (packet_filter != adapter->packet_filter) { - /* Call the device's filter function */ + if (packet_filter != adapter->packet_filter) et131x_set_packet_filter(adapter); - } + spin_unlock_irqrestore(&adapter->lock, flags); } -/* et131x_tx - The handler to tx a packet on the device - * @skb: data to be Tx'd - * @netdev: device on which data is to be Tx'd - * - * Returns 0 on success, errno on failure (as defined in errno.h) - */ +/* et131x_tx - The handler to tx a packet on the device */ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) { int status = 0; struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; /* stop the queue if it's getting full */ - if (adapter->tx_ring.used >= NUM_TCB - 1 && - !netif_queue_stopped(netdev)) + if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) netif_stop_queue(netdev); /* Save the timestamp for the TX timeout watchdog */ @@ -4562,7 +4381,6 @@ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) } /* et131x_tx_timeout - Timeout handler - * @netdev: a pointer to a net_device struct representing the device * * The handler called when a Tx request times out. The timeout period is * specified by the 'tx_timeo" element in the net_device structure (see @@ -4571,6 +4389,7 @@ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) static void et131x_tx_timeout(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; struct tcb *tcb; unsigned long flags; @@ -4593,7 +4412,7 @@ static void et131x_tx_timeout(struct net_device *netdev) /* Is send stuck? */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); - tcb = adapter->tx_ring.send_head; + tcb = tx_ring->send_head; if (tcb != NULL) { tcb->count++; @@ -4619,12 +4438,7 @@ static void et131x_tx_timeout(struct net_device *netdev) spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); } -/* et131x_change_mtu - The handler called to change the MTU for the device - * @netdev: device whose MTU is to be changed - * @new_mtu: the desired MTU - * - * Returns 0 on success, errno on failure (as defined in errno.h) - */ +/* et131x_change_mtu - The handler called to change the MTU for the device */ static int et131x_change_mtu(struct net_device *netdev, int new_mtu) { int result = 0; @@ -4669,22 +4483,13 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu) return result; } -/* et131x_set_mac_addr - handler to change the MAC address for the device - * @netdev: device whose MAC is to be changed - * @new_mac: the desired MAC address - * - * Returns 0 on success, errno on failure (as defined in errno.h) - * - * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 - */ +/* et131x_set_mac_addr - handler to change the MAC address for the device */ static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); struct sockaddr *address = new_mac; - /* begin blux */ - if (adapter == NULL) return -ENODEV; @@ -4746,15 +4551,13 @@ static const struct net_device_ops et131x_netdev_ops = { * @pdev: a pointer to the device's pci_dev structure * @ent: this device's entry in the pci_device_id table * - * Returns 0 on success, errno on failure (as defined in errno.h) - * * Registered in the pci_driver structure, this function is called when the * PCI subsystem finds a new PCI device which matches the information * contained in the pci_device_id table. This routine is the equivalent to * a device insertion routine. */ static int et131x_pci_setup(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { struct net_device *netdev; struct et131x_adapter *adapter; @@ -4930,7 +4733,7 @@ static int et131x_pci_setup(struct pci_dev *pdev, goto out; } -static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { +static const struct pci_device_id et131x_pci_table[] = { { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, {0,} diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h index bbe78a703a2377..2ac6e998011794 100644 --- a/drivers/staging/et131x/et131x.h +++ b/drivers/staging/et131x/et131x.h @@ -1668,43 +1668,3 @@ struct address_map { #define LED_100TX_SHIFT 4 /* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */ - -/* Defines for PHY access routines */ - -/* Define bit operation flags */ -#define TRUEPHY_BIT_CLEAR 0 -#define TRUEPHY_BIT_SET 1 -#define TRUEPHY_BIT_READ 2 - -/* Define read/write operation flags */ -#ifndef TRUEPHY_READ -#define TRUEPHY_READ 0 -#define TRUEPHY_WRITE 1 -#define TRUEPHY_MASK 2 -#endif - -/* Define master/slave configuration values */ -#define TRUEPHY_CFG_SLAVE 0 -#define TRUEPHY_CFG_MASTER 1 - -/* Define MDI/MDI-X settings */ -#define TRUEPHY_MDI 0 -#define TRUEPHY_MDIX 1 -#define TRUEPHY_AUTO_MDI_MDIX 2 - -/* Define 10Base-T link polarities */ -#define TRUEPHY_POLARITY_NORMAL 0 -#define TRUEPHY_POLARITY_INVERTED 1 - -/* Define auto-negotiation results */ -#define TRUEPHY_ANEG_NOT_COMPLETE 0 -#define TRUEPHY_ANEG_COMPLETE 1 -#define TRUEPHY_ANEG_DISABLED 2 - -/* Define duplex advertisement flags */ -#define TRUEPHY_ADV_DUPLEX_NONE 0x00 -#define TRUEPHY_ADV_DUPLEX_FULL 0x01 -#define TRUEPHY_ADV_DUPLEX_HALF 0x02 -#define TRUEPHY_ADV_DUPLEX_BOTH \ - (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF) - diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c index 817f837b240d65..edd5cef300d034 100644 --- a/drivers/staging/frontier/alphatrack.c +++ b/drivers/staging/frontier/alphatrack.c @@ -35,7 +35,6 @@ #include #include -#include #include #include #include diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c index 074b0e5bcc68cb..0e499ce5f0d775 100644 --- a/drivers/staging/frontier/tranzport.c +++ b/drivers/staging/frontier/tranzport.c @@ -34,7 +34,6 @@ #include #include -#include #include #include #include diff --git a/drivers/staging/ft1000/ft1000-pcmcia/boot.h b/drivers/staging/ft1000/ft1000-pcmcia/boot.h index 9dce54eae1cf5b..60c015c1c28a25 100644 --- a/drivers/staging/ft1000/ft1000-pcmcia/boot.h +++ b/drivers/staging/ft1000/ft1000-pcmcia/boot.h @@ -1,158 +1,158 @@ -//--------------------------------------------------------------------------- -// FT1000 driver for Flarion Flash OFDM NIC Device -// -// Copyright (C) 2002 Flarion Technologies, All rights reserved. -// -// This program is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by the Free -// Software Foundation; either version 2 of the License, or (at your option) any -// later version. This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -// more details. You should have received a copy of the GNU General Public -// License along with this program; if not, write to the -// Free Software Foundation, Inc., 59 Temple Place - -// Suite 330, Boston, MA 02111-1307, USA. -//--------------------------------------------------------------------------- -// -// File: boot.h -// -// Description: boatloader -// -// History: -// 1/11/05 Whc Ported to Linux. -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + FT1000 driver for Flarion Flash OFDM NIC Device + + Copyright (C) 2002 Flarion Technologies, All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) any + later version. This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. You should have received a copy of the GNU General Public + License along with this program; if not, write to the + Free Software Foundation, Inc., 59 Temple Place - + Suite 330, Boston, MA 02111-1307, USA. + --------------------------------------------------------------------------- + + File: boot.h + + Description: boatloader + + History: + 1/11/05 Whc Ported to Linux. + +---------------------------------------------------------------------------*/ #ifndef _BOOTH_ #define _BOOTH_ -// Official bootloader -static unsigned char bootimage [] = { -0x00,0x00,0x01,0x5E,0x00,0x00 -,0x00,0x00,0x00,0x00,0x02,0xD7 -,0x00,0x00,0x01,0x5E,0x46,0xB3 -,0xE6,0x02,0x00,0x98,0xE6,0x8C -,0x00,0x98,0xFB,0x92,0xFF,0xFF -,0x98,0xFB,0x94,0xFF,0xFF,0x98 -,0xFB,0x06,0x08,0x00,0x98,0xFB -,0x96,0x84,0x00,0x98,0xFB,0x08 -,0x1C,0x00,0x98,0xFB,0x51,0x25 -,0x10,0x1C,0x00,0xE6,0x51,0x01 -,0x07,0xFD,0x4C,0xFF,0x20,0xF5 -,0x51,0x02,0x20,0x08,0x00,0x4C -,0xFF,0x20,0x3C,0x00,0xC0,0x64 -,0x98,0xC0,0x66,0x98,0xC0,0x68 -,0x98,0xC0,0x6A,0x98,0xC0,0x6C -,0x98,0x90,0x08,0x90,0x09,0x90 -,0x0A,0x90,0x0B,0x90,0x0C,0x90 -,0x0D,0x90,0x0E,0x90,0x0F,0x90 -,0x04,0x90,0x06,0xFB,0x51,0x22 -,0x16,0x08,0x03,0xFB,0x51,0x52 -,0x16,0x08,0x04,0xFB,0x51,0x24 -,0x2B,0x08,0x06,0xFB,0x51,0x54 -,0x2B,0x08,0x07,0xFB,0x51,0x24 -,0x2B,0x08,0x09,0xFB,0x51,0x54 -,0x2B,0x08,0x0A,0xFB,0x51,0x12 -,0x16,0x08,0x0C,0xFB,0x51,0x52 -,0x16,0x08,0x0D,0x78,0x00,0x00 -,0x00,0x16,0x00,0x00,0xEC,0x31 -,0xAE,0x00,0x00,0x81,0x4C,0x0F -,0xE6,0x43,0xFF,0xEC,0x31,0x4E -,0x00,0x00,0x91,0xEC,0x31,0xAE -,0x00,0x00,0x91,0x4C,0x0F,0xE6 -,0x43,0xFF,0xEC,0x31,0x5E,0x00 -,0x00,0xA1,0xEB,0x31,0x08,0x00 -,0x00,0xA6,0xEB,0x31,0x08,0x00 -,0x00,0xAC,0x3C,0x00,0xEB,0x31 -,0x08,0x00,0x00,0xA8,0x76,0xFE -,0xFE,0x08,0xEB,0x31,0x08,0x20 -,0x00,0x00,0x76,0xFF,0xFF,0x18 -,0xED,0x31,0x08,0x20,0x00,0x00 -,0x26,0x10,0x04,0x10,0xF5,0x3C -,0x01,0x3C,0x00,0x08,0x01,0x12 -,0x3C,0x11,0x3C,0x00,0x08,0x01 -,0x0B,0x08,0x00,0x6D,0xEC,0x31 -,0xAE,0x20,0x00,0x06,0xED,0x4D -,0x08,0x00,0x00,0x67,0x80,0x6F -,0x00,0x01,0x0B,0x6F,0x00,0x02 -,0x2E,0x76,0xEE,0x01,0x48,0x06 -,0x01,0x39,0xED,0x4D,0x18,0x00 -,0x02,0xED,0x4D,0x08,0x00,0x04 -,0x14,0x06,0xA4,0xED,0x31,0x22 -,0x00,0x00,0xAC,0x76,0xEE,0x07 -,0x48,0x6D,0x22,0x01,0x1E,0x08 -,0x01,0x58,0xEB,0x31,0x08,0x00 -,0x00,0xAC,0x06,0xFF,0xBA,0x3C -,0x00,0xEB,0x31,0x08,0x20,0x00 -,0x04,0x3C,0x30,0xEB,0x31,0x08 -,0x20,0x00,0x02,0x3C,0x10,0xEB -,0x31,0x08,0x20,0x00,0x00,0xED -,0x31,0x08,0x20,0x00,0x00,0x04 -,0x10,0xF7,0xED,0x31,0x08,0x00 -,0x00,0xA2,0x91,0x00,0x9C,0x3C -,0x80,0xEB,0x31,0x08,0x20,0x00 -,0x04,0x3C,0x20,0xEB,0x31,0x08 -,0x20,0x00,0x02,0x3C,0x10,0xEB -,0x31,0x08,0x20,0x00,0x00,0xED -,0x31,0x08,0x20,0x00,0x00,0x04 -,0x10,0xF7,0xED,0x31,0x08,0x20 -,0x00,0x04,0x42,0x10,0x90,0x08 -,0xEC,0x31,0xAE,0x20,0x00,0x06 -,0xA4,0x41,0x08,0x00,0xB6,0xED -,0x41,0x28,0x7D,0xFF,0xFF,0x22 -,0xB3,0x40,0x98,0x2A,0x32,0xEB -,0x41,0x28,0xB4,0x43,0xFC,0x05 -,0xFF,0xE6,0xA0,0x31,0x20,0x00 -,0x06,0xEB,0x31,0x08,0x20,0x00 -,0x04,0x3C,0x20,0xEB,0x31,0x08 -,0x20,0x00,0x02,0x3C,0x10,0xEB -,0x31,0x08,0x20,0x00,0x00,0xED -,0x31,0x08,0x20,0x00,0x00,0x04 -,0x10,0xF7,0xED,0x31,0x08,0x20 -,0x00,0x04,0x42,0x10,0x90,0x08 -,0xEC,0x31,0xAE,0x20,0x00,0x06 -,0xA4,0x41,0x08,0x00,0x68,0xED -,0x41,0x28,0x7D,0xFF,0xFF,0x22 -,0xB3,0x40,0x98,0x2A,0x32,0xEB -,0x41,0x28,0xB4,0x43,0xFC,0x05 -,0xFF,0xE6,0x48,0x04,0xEB,0x31 -,0x08,0x20,0x00,0x04,0xEB,0x31 -,0x18,0x20,0x00,0x02,0x3C,0x11 -,0xEB,0x31,0x18,0x20,0x00,0x00 -,0xED,0x31,0x08,0x20,0x00,0x00 -,0x04,0x10,0xF7,0xED,0x31,0x08 -,0x20,0x00,0x02,0x66,0x00,0x6F -,0x00,0x01,0x16,0x76,0xEE,0x06 -,0x48,0x4A,0x1E,0x48,0x04,0xED -,0x31,0x08,0x20,0x00,0x04,0xEB -,0x31,0x08,0x00,0x00,0xA4,0x48 -,0x04,0xED,0x31,0x08,0x20,0x00 -,0x04,0xEB,0x31,0x08,0x00,0x00 -,0xA2,0x48,0x04,0x20,0x20,0x4A -,0x7C,0x46,0x82,0x50,0x05,0x50 -,0x15,0xB5,0x1E,0x98,0xED,0x31 -,0x08,0x00,0x00,0xA8,0x10,0x47 -,0x3B,0x2C,0x01,0xDB,0x40,0x11 -,0x98,0xC1,0x1E,0x98,0x10,0x07 -,0x30,0xF9,0x40,0x07,0x18,0x98 -,0x2A,0x10,0xEB,0x31,0x08,0x00 -,0x00,0xA8,0xA4,0x1E,0x98,0xBB -,0x1E,0x98,0x50,0x14,0x50,0x04 -,0x46,0x83,0x48,0x04,0x02,0x01 -,0x00,0x50,0x05,0x50,0x15,0x10 -,0x87,0x3F,0x90,0x2B,0x18,0x01 -,0x00,0xC0,0x31,0x00,0x00,0xAE -,0xDF,0x41,0x00,0x08,0x00,0x1A -,0x42,0x11,0x67,0x01,0xDF,0x41 -,0x02,0x08,0x00,0x10,0x42,0x11 -,0x62,0x01,0xB4,0x43,0x4A,0x68 -,0x50,0x14,0x50,0x04,0x24,0x10 -,0x48,0x04,0xF2,0x31,0x00,0x01 -,0x00,0x00,0xAE,0xF6,0x31,0x00 -,0x01,0x00,0x00,0xAE,0x62,0xE4 -,0xE5,0x61,0x04,0x48,0x04,0xE5 -,0x63,0x05,0x48,0x04,0x20,0x20 -,0x00,0x00,0x00,0x00 +/* Official bootloader */ +static unsigned char bootimage[] = { + 0x00, 0x00, 0x01, 0x5E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0xD7, + 0x00, 0x00, 0x01, 0x5E, 0x46, 0xB3, + 0xE6, 0x02, 0x00, 0x98, 0xE6, 0x8C, + 0x00, 0x98, 0xFB, 0x92, 0xFF, 0xFF, + 0x98, 0xFB, 0x94, 0xFF, 0xFF, 0x98, + 0xFB, 0x06, 0x08, 0x00, 0x98, 0xFB, + 0x96, 0x84, 0x00, 0x98, 0xFB, 0x08, + 0x1C, 0x00, 0x98, 0xFB, 0x51, 0x25, + 0x10, 0x1C, 0x00, 0xE6, 0x51, 0x01, + 0x07, 0xFD, 0x4C, 0xFF, 0x20, 0xF5, + 0x51, 0x02, 0x20, 0x08, 0x00, 0x4C, + 0xFF, 0x20, 0x3C, 0x00, 0xC0, 0x64, + 0x98, 0xC0, 0x66, 0x98, 0xC0, 0x68, + 0x98, 0xC0, 0x6A, 0x98, 0xC0, 0x6C, + 0x98, 0x90, 0x08, 0x90, 0x09, 0x90, + 0x0A, 0x90, 0x0B, 0x90, 0x0C, 0x90, + 0x0D, 0x90, 0x0E, 0x90, 0x0F, 0x90, + 0x04, 0x90, 0x06, 0xFB, 0x51, 0x22, + 0x16, 0x08, 0x03, 0xFB, 0x51, 0x52, + 0x16, 0x08, 0x04, 0xFB, 0x51, 0x24, + 0x2B, 0x08, 0x06, 0xFB, 0x51, 0x54, + 0x2B, 0x08, 0x07, 0xFB, 0x51, 0x24, + 0x2B, 0x08, 0x09, 0xFB, 0x51, 0x54, + 0x2B, 0x08, 0x0A, 0xFB, 0x51, 0x12, + 0x16, 0x08, 0x0C, 0xFB, 0x51, 0x52, + 0x16, 0x08, 0x0D, 0x78, 0x00, 0x00, + 0x00, 0x16, 0x00, 0x00, 0xEC, 0x31, + 0xAE, 0x00, 0x00, 0x81, 0x4C, 0x0F, + 0xE6, 0x43, 0xFF, 0xEC, 0x31, 0x4E, + 0x00, 0x00, 0x91, 0xEC, 0x31, 0xAE, + 0x00, 0x00, 0x91, 0x4C, 0x0F, 0xE6, + 0x43, 0xFF, 0xEC, 0x31, 0x5E, 0x00, + 0x00, 0xA1, 0xEB, 0x31, 0x08, 0x00, + 0x00, 0xA6, 0xEB, 0x31, 0x08, 0x00, + 0x00, 0xAC, 0x3C, 0x00, 0xEB, 0x31, + 0x08, 0x00, 0x00, 0xA8, 0x76, 0xFE, + 0xFE, 0x08, 0xEB, 0x31, 0x08, 0x20, + 0x00, 0x00, 0x76, 0xFF, 0xFF, 0x18, + 0xED, 0x31, 0x08, 0x20, 0x00, 0x00, + 0x26, 0x10, 0x04, 0x10, 0xF5, 0x3C, + 0x01, 0x3C, 0x00, 0x08, 0x01, 0x12, + 0x3C, 0x11, 0x3C, 0x00, 0x08, 0x01, + 0x0B, 0x08, 0x00, 0x6D, 0xEC, 0x31, + 0xAE, 0x20, 0x00, 0x06, 0xED, 0x4D, + 0x08, 0x00, 0x00, 0x67, 0x80, 0x6F, + 0x00, 0x01, 0x0B, 0x6F, 0x00, 0x02, + 0x2E, 0x76, 0xEE, 0x01, 0x48, 0x06, + 0x01, 0x39, 0xED, 0x4D, 0x18, 0x00, + 0x02, 0xED, 0x4D, 0x08, 0x00, 0x04, + 0x14, 0x06, 0xA4, 0xED, 0x31, 0x22, + 0x00, 0x00, 0xAC, 0x76, 0xEE, 0x07, + 0x48, 0x6D, 0x22, 0x01, 0x1E, 0x08, + 0x01, 0x58, 0xEB, 0x31, 0x08, 0x00, + 0x00, 0xAC, 0x06, 0xFF, 0xBA, 0x3C, + 0x00, 0xEB, 0x31, 0x08, 0x20, 0x00, + 0x04, 0x3C, 0x30, 0xEB, 0x31, 0x08, + 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB, + 0x31, 0x08, 0x20, 0x00, 0x00, 0xED, + 0x31, 0x08, 0x20, 0x00, 0x00, 0x04, + 0x10, 0xF7, 0xED, 0x31, 0x08, 0x00, + 0x00, 0xA2, 0x91, 0x00, 0x9C, 0x3C, + 0x80, 0xEB, 0x31, 0x08, 0x20, 0x00, + 0x04, 0x3C, 0x20, 0xEB, 0x31, 0x08, + 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB, + 0x31, 0x08, 0x20, 0x00, 0x00, 0xED, + 0x31, 0x08, 0x20, 0x00, 0x00, 0x04, + 0x10, 0xF7, 0xED, 0x31, 0x08, 0x20, + 0x00, 0x04, 0x42, 0x10, 0x90, 0x08, + 0xEC, 0x31, 0xAE, 0x20, 0x00, 0x06, + 0xA4, 0x41, 0x08, 0x00, 0xB6, 0xED, + 0x41, 0x28, 0x7D, 0xFF, 0xFF, 0x22, + 0xB3, 0x40, 0x98, 0x2A, 0x32, 0xEB, + 0x41, 0x28, 0xB4, 0x43, 0xFC, 0x05, + 0xFF, 0xE6, 0xA0, 0x31, 0x20, 0x00, + 0x06, 0xEB, 0x31, 0x08, 0x20, 0x00, + 0x04, 0x3C, 0x20, 0xEB, 0x31, 0x08, + 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB, + 0x31, 0x08, 0x20, 0x00, 0x00, 0xED, + 0x31, 0x08, 0x20, 0x00, 0x00, 0x04, + 0x10, 0xF7, 0xED, 0x31, 0x08, 0x20, + 0x00, 0x04, 0x42, 0x10, 0x90, 0x08, + 0xEC, 0x31, 0xAE, 0x20, 0x00, 0x06, + 0xA4, 0x41, 0x08, 0x00, 0x68, 0xED, + 0x41, 0x28, 0x7D, 0xFF, 0xFF, 0x22, + 0xB3, 0x40, 0x98, 0x2A, 0x32, 0xEB, + 0x41, 0x28, 0xB4, 0x43, 0xFC, 0x05, + 0xFF, 0xE6, 0x48, 0x04, 0xEB, 0x31, + 0x08, 0x20, 0x00, 0x04, 0xEB, 0x31, + 0x18, 0x20, 0x00, 0x02, 0x3C, 0x11, + 0xEB, 0x31, 0x18, 0x20, 0x00, 0x00, + 0xED, 0x31, 0x08, 0x20, 0x00, 0x00, + 0x04, 0x10, 0xF7, 0xED, 0x31, 0x08, + 0x20, 0x00, 0x02, 0x66, 0x00, 0x6F, + 0x00, 0x01, 0x16, 0x76, 0xEE, 0x06, + 0x48, 0x4A, 0x1E, 0x48, 0x04, 0xED, + 0x31, 0x08, 0x20, 0x00, 0x04, 0xEB, + 0x31, 0x08, 0x00, 0x00, 0xA4, 0x48, + 0x04, 0xED, 0x31, 0x08, 0x20, 0x00, + 0x04, 0xEB, 0x31, 0x08, 0x00, 0x00, + 0xA2, 0x48, 0x04, 0x20, 0x20, 0x4A, + 0x7C, 0x46, 0x82, 0x50, 0x05, 0x50, + 0x15, 0xB5, 0x1E, 0x98, 0xED, 0x31, + 0x08, 0x00, 0x00, 0xA8, 0x10, 0x47, + 0x3B, 0x2C, 0x01, 0xDB, 0x40, 0x11, + 0x98, 0xC1, 0x1E, 0x98, 0x10, 0x07, + 0x30, 0xF9, 0x40, 0x07, 0x18, 0x98, + 0x2A, 0x10, 0xEB, 0x31, 0x08, 0x00, + 0x00, 0xA8, 0xA4, 0x1E, 0x98, 0xBB, + 0x1E, 0x98, 0x50, 0x14, 0x50, 0x04, + 0x46, 0x83, 0x48, 0x04, 0x02, 0x01, + 0x00, 0x50, 0x05, 0x50, 0x15, 0x10, + 0x87, 0x3F, 0x90, 0x2B, 0x18, 0x01, + 0x00, 0xC0, 0x31, 0x00, 0x00, 0xAE, + 0xDF, 0x41, 0x00, 0x08, 0x00, 0x1A, + 0x42, 0x11, 0x67, 0x01, 0xDF, 0x41, + 0x02, 0x08, 0x00, 0x10, 0x42, 0x11, + 0x62, 0x01, 0xB4, 0x43, 0x4A, 0x68, + 0x50, 0x14, 0x50, 0x04, 0x24, 0x10, + 0x48, 0x04, 0xF2, 0x31, 0x00, 0x01, + 0x00, 0x00, 0xAE, 0xF6, 0x31, 0x00, + 0x01, 0x00, 0x00, 0xAE, 0x62, 0xE4, + 0xE5, 0x61, 0x04, 0x48, 0x04, 0xE5, + 0x63, 0x05, 0x48, 0x04, 0x20, 0x20, + 0x00, 0x00, 0x00, 0x00 }; #endif diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c index 29d0a72f0d65d7..d6421b9b5981c2 100644 --- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c +++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c @@ -15,7 +15,7 @@ License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ------------------------------------------------------------------------------*/ + -------------------------------------------------------------------------*/ #include #include @@ -80,19 +80,19 @@ MODULE_SUPPORTED_DEVICE("FT1000"); #define MAX_RCV_LOOP 100 -//--------------------------------------------------------------------------- -// -// Function: ft1000_read_fifo_len -// Description: This function will read the ASIC Uplink FIFO status register -// which will return the number of bytes remaining in the Uplink FIFO. -// Sixteen bytes are subtracted to make sure that the ASIC does not -// reach its threshold. -// Input: -// dev - network device structure -// Output: -// value - number of bytes available in the ASIC Uplink FIFO. -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_read_fifo_len + Description: This function will read the ASIC Uplink FIFO status register + which will return the number of bytes remaining in the Uplink FIFO. + Sixteen bytes are subtracted to make sure that the ASIC does not + reach its threshold. + Input: + dev - network device structure + Output: + value - number of bytes available in the ASIC Uplink FIFO. + + -------------------------------------------------------------------------*/ static inline u16 ft1000_read_fifo_len(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); @@ -103,25 +103,25 @@ static inline u16 ft1000_read_fifo_len(struct net_device *dev) return (ft1000_read_reg(dev, FT1000_REG_MAG_UFSR) - 16); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_read_dpram -// Description: This function will read the specific area of dpram -// (Electrabuzz ASIC only) -// Input: -// dev - device structure -// offset - index of dpram -// Output: -// value - value of dpram -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_read_dpram + Description: This function will read the specific area of dpram + (Electrabuzz ASIC only) + Input: + dev - device structure + offset - index of dpram + Output: + value - value of dpram + + -------------------------------------------------------------------------*/ u16 ft1000_read_dpram(struct net_device *dev, int offset) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; u16 data; - // Provide mutual exclusive access while reading ASIC registers. + /* Provide mutual exclusive access while reading ASIC registers. */ spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); data = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA); @@ -130,54 +130,54 @@ u16 ft1000_read_dpram(struct net_device *dev, int offset) return (data); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_write_dpram -// Description: This function will write to a specific area of dpram -// (Electrabuzz ASIC only) -// Input: -// dev - device structure -// offset - index of dpram -// value - value to write -// Output: -// none. -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_write_dpram + Description: This function will write to a specific area of dpram + (Electrabuzz ASIC only) + Input: + dev - device structure + offset - index of dpram + value - value to write + Output: + none. + + -------------------------------------------------------------------------*/ static inline void ft1000_write_dpram(struct net_device *dev, int offset, u16 value) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; - // Provide mutual exclusive access while reading ASIC registers. + /* Provide mutual exclusive access while reading ASIC registers. */ spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, value); spin_unlock_irqrestore(&info->dpram_lock, flags); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_read_dpram_mag_16 -// Description: This function will read the specific area of dpram -// (Magnemite ASIC only) -// Input: -// dev - device structure -// offset - index of dpram -// Output: -// value - value of dpram -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_read_dpram_mag_16 + Description: This function will read the specific area of dpram + (Magnemite ASIC only) + Input: + dev - device structure + offset - index of dpram + Output: + value - value of dpram + + -------------------------------------------------------------------------*/ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; u16 data; - // Provide mutual exclusive access while reading ASIC registers. + /* Provide mutual exclusive access while reading ASIC registers. */ spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); - // check if we want to read upper or lower 32-bit word + /* check if we want to read upper or lower 32-bit word */ if (Index) { data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAL); } else { @@ -188,26 +188,26 @@ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index) return (data); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_write_dpram_mag_16 -// Description: This function will write to a specific area of dpram -// (Magnemite ASIC only) -// Input: -// dev - device structure -// offset - index of dpram -// value - value to write -// Output: -// none. -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_write_dpram_mag_16 + Description: This function will write to a specific area of dpram + (Magnemite ASIC only) + Input: + dev - device structure + offset - index of dpram + value - value to write + Output: + none. + + -------------------------------------------------------------------------*/ static inline void ft1000_write_dpram_mag_16(struct net_device *dev, int offset, u16 value, int Index) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; - // Provide mutual exclusive access while reading ASIC registers. + /* Provide mutual exclusive access while reading ASIC registers. */ spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); if (Index) { @@ -218,25 +218,25 @@ static inline void ft1000_write_dpram_mag_16(struct net_device *dev, spin_unlock_irqrestore(&info->dpram_lock, flags); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_read_dpram_mag_32 -// Description: This function will read the specific area of dpram -// (Magnemite ASIC only) -// Input: -// dev - device structure -// offset - index of dpram -// Output: -// value - value of dpram -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_read_dpram_mag_32 + Description: This function will read the specific area of dpram + (Magnemite ASIC only) + Input: + dev - device structure + offset - index of dpram + Output: + value - value of dpram + + -------------------------------------------------------------------------*/ u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; u32 data; - // Provide mutual exclusive access while reading ASIC registers. + /* Provide mutual exclusive access while reading ASIC registers. */ spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); data = inl(dev->base_addr + FT1000_REG_MAG_DPDATAL); @@ -245,41 +245,41 @@ u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset) return (data); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_write_dpram_mag_32 -// Description: This function will write to a specific area of dpram -// (Magnemite ASIC only) -// Input: -// dev - device structure -// offset - index of dpram -// value - value to write -// Output: -// none. -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_write_dpram_mag_32 + Description: This function will write to a specific area of dpram + (Magnemite ASIC only) + Input: + dev - device structure + offset - index of dpram + value - value to write + Output: + none. + + -------------------------------------------------------------------------*/ void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; - // Provide mutual exclusive access while reading ASIC registers. + /* Provide mutual exclusive access while reading ASIC registers. */ spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); outl(value, dev->base_addr + FT1000_REG_MAG_DPDATAL); spin_unlock_irqrestore(&info->dpram_lock, flags); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_enable_interrupts -// Description: This function will enable interrupts base on the current interrupt mask. -// Input: -// dev - device structure -// Output: -// None. -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_enable_interrupts + Description: This function will enable interrupts base on the current interrupt mask. + Input: + dev - device structure + Output: + None. + + -------------------------------------------------------------------------*/ static void ft1000_enable_interrupts(struct net_device *dev) { u16 tempword; @@ -292,16 +292,16 @@ static void ft1000_enable_interrupts(struct net_device *dev) tempword); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_disable_interrupts -// Description: This function will disable all interrupts. -// Input: -// dev - device structure -// Output: -// None. -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_disable_interrupts + Description: This function will disable all interrupts. + Input: + dev - device structure + Output: + None. + + -------------------------------------------------------------------------*/ static void ft1000_disable_interrupts(struct net_device *dev) { u16 tempword; @@ -314,17 +314,17 @@ static void ft1000_disable_interrupts(struct net_device *dev) tempword); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_reset_asic -// Description: This function will call the Card Service function to reset the -// ASIC. -// Input: -// dev - device structure -// Output: -// none -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_reset_asic + Description: This function will call the Card Service function to reset the + ASIC. + Input: + dev - device structure + Output: + none + + -------------------------------------------------------------------------*/ static void ft1000_reset_asic(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); @@ -335,21 +335,23 @@ static void ft1000_reset_asic(struct net_device *dev) (*info->ft1000_reset) (pcmcia->link); - // Let's use the register provided by the Magnemite ASIC to reset the - // ASIC and DSP. + /* + * Let's use the register provided by the Magnemite ASIC to reset the + * ASIC and DSP. + */ if (info->AsicID == MAGNEMITE_ID) { ft1000_write_reg(dev, FT1000_REG_RESET, (DSP_RESET_BIT | ASIC_RESET_BIT)); } mdelay(1); if (info->AsicID == ELECTRABUZZ_ID) { - // set watermark to -1 in order to not generate an interrupt + /* set watermark to -1 in order to not generate an interrupt */ ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff); } else { - // set watermark to -1 in order to not generate an interrupt + /* set watermark to -1 in order to not generate an interrupt */ ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff); } - // clear interrupts + /* clear interrupts */ tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR); DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword); ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword); @@ -358,17 +360,17 @@ static void ft1000_reset_asic(struct net_device *dev) } -//--------------------------------------------------------------------------- -// -// Function: ft1000_reset_card -// Description: This function will reset the card -// Input: -// dev - device structure -// Output: -// status - false (card reset fail) -// true (card reset successful) -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_reset_card + Description: This function will reset the card + Input: + dev - device structure + Output: + status - false (card reset fail) + true (card reset successful) + + -------------------------------------------------------------------------*/ static int ft1000_reset_card(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); @@ -384,9 +386,9 @@ static int ft1000_reset_card(struct net_device *dev) info->squeseqnum = 0; ft1000_disable_interrupts(dev); -// del_timer(&poll_timer); + /* del_timer(&poll_timer); */ - // Make sure we free any memory reserve for provisioning + /* Make sure we free any memory reserve for provisioning */ while (list_empty(&info->prov_list) == 0) { DEBUG(0, "ft1000_hw:ft1000_reset_card:deleting provisioning record\n"); @@ -406,7 +408,7 @@ static int ft1000_reset_card(struct net_device *dev) (DSP_RESET_BIT | ASIC_RESET_BIT)); } - // Copy DSP session record into info block if this is not a coldstart + /* Copy DSP session record into info block if this is not a coldstart */ if (ft1000_card_present == 1) { spin_lock_irqsave(&info->dpram_lock, flags); if (info->AsicID == ELECTRABUZZ_ID) { @@ -430,29 +432,29 @@ static int ft1000_reset_card(struct net_device *dev) DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting ASIC\n"); mdelay(10); - //reset ASIC + /* reset ASIC */ ft1000_reset_asic(dev); DEBUG(1, "ft1000_hw:ft1000_reset_card:downloading dsp image\n"); if (info->AsicID == MAGNEMITE_ID) { - // Put dsp in reset and take ASIC out of reset + /* Put dsp in reset and take ASIC out of reset */ DEBUG(0, "ft1000_hw:ft1000_reset_card:Put DSP in reset and take ASIC out of reset\n"); ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT); - // Setting MAGNEMITE ASIC to big endian mode + /* Setting MAGNEMITE ASIC to big endian mode */ ft1000_write_reg(dev, FT1000_REG_SUP_CTRL, HOST_INTF_BE); - // Download bootloader + /* Download bootloader */ card_bootload(dev); - // Take DSP out of reset + /* Take DSP out of reset */ ft1000_write_reg(dev, FT1000_REG_RESET, 0); - // FLARION_DSP_ACTIVE; + /* FLARION_DSP_ACTIVE; */ mdelay(10); DEBUG(0, "ft1000_hw:ft1000_reset_card:Take DSP out of reset\n"); - // Wait for 0xfefe indicating dsp ready before starting download + /* Wait for 0xfefe indicating dsp ready before starting download */ for (i = 0; i < 50; i++) { tempword = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DPRAM_FEFE, @@ -470,7 +472,7 @@ static int ft1000_reset_card(struct net_device *dev) } } else { - // Take DSP out of reset + /* Take DSP out of reset */ ft1000_write_reg(dev, FT1000_REG_RESET, ~DSP_RESET_BIT); mdelay(10); } @@ -485,17 +487,19 @@ static int ft1000_reset_card(struct net_device *dev) mdelay(10); if (info->AsicID == ELECTRABUZZ_ID) { - // Need to initialize the FIFO length counter to zero in order to sync up - // with the DSP + /* + * Need to initialize the FIFO length counter to zero in order to sync up + * with the DSP + */ info->fifo_cnt = 0; ft1000_write_dpram(dev, FT1000_FIFO_LEN, info->fifo_cnt); - // Initialize DSP heartbeat area to ho + /* Initialize DSP heartbeat area to ho */ ft1000_write_dpram(dev, FT1000_HI_HO, ho); tempword = ft1000_read_dpram(dev, FT1000_HI_HO); DEBUG(1, "ft1000_hw:ft1000_reset_asic:hi_ho value = 0x%x\n", tempword); } else { - // Initialize DSP heartbeat area to ho + /* Initialize DSP heartbeat area to ho */ ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, ho_mag, FT1000_MAG_HI_HO_INDX); tempword = @@ -509,40 +513,44 @@ static int ft1000_reset_card(struct net_device *dev) ft1000_enable_interrupts(dev); /* Schedule heartbeat process to run every 2 seconds */ -// poll_timer.expires = jiffies + (2*HZ); -// poll_timer.data = (u_long)dev; -// add_timer(&poll_timer); + /* poll_timer.expires = jiffies + (2*HZ); */ + /* poll_timer.data = (u_long)dev; */ + /* add_timer(&poll_timer); */ return true; } -//--------------------------------------------------------------------------- -// -// Function: ft1000_chkcard -// Description: This function will check if the device is presently available on -// the system. -// Input: -// dev - device structure -// Output: -// status - false (device is not present) -// true (device is present) -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_chkcard + Description: This function will check if the device is presently available on + the system. + Input: + dev - device structure + Output: + status - false (device is not present) + true (device is present) + + -------------------------------------------------------------------------*/ static int ft1000_chkcard(struct net_device *dev) { u16 tempword; - // Mask register is used to check for device presence since it is never - // set to zero. + /* + * Mask register is used to check for device presence since it is never + * set to zero. + */ tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK); if (tempword == 0) { DEBUG(1, "ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n"); return false; } - // The system will return the value of 0xffff for the version register - // if the device is not present. + /* + * The system will return the value of 0xffff for the version register + * if the device is not present. + */ tempword = ft1000_read_reg(dev, FT1000_REG_ASIC_ID); if (tempword == 0xffff) { DEBUG(1, @@ -553,17 +561,17 @@ static int ft1000_chkcard(struct net_device *dev) } -//--------------------------------------------------------------------------- -// -// Function: ft1000_hbchk -// Description: This function will perform the heart beat check of the DSP as -// well as the ASIC. -// Input: -// dev - device structure -// Output: -// none -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_hbchk + Description: This function will perform the heart beat check of the DSP as + well as the ASIC. + Input: + dev - device structure + Output: + none + + -------------------------------------------------------------------------*/ static void ft1000_hbchk(u_long data) { struct net_device *dev = (struct net_device *)data; @@ -574,7 +582,7 @@ static void ft1000_hbchk(u_long data) info = netdev_priv(dev); if (info->CardReady == 1) { - // Perform dsp heartbeat check + /* Perform dsp heartbeat check */ if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_dpram(dev, FT1000_HI_HO); } else { @@ -585,7 +593,7 @@ static void ft1000_hbchk(u_long data) } DEBUG(1, "ft1000_hw:ft1000_hbchk:hi_ho value = 0x%x\n", tempword); - // Let's perform another check if ho is not detected + /* Let's perform another check if ho is not detected */ if (tempword != ho) { if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_dpram(dev, FT1000_HI_HO); @@ -639,7 +647,7 @@ static void ft1000_hbchk(u_long data) } tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); - // Let's check doorbell again if fail + /* Let's check doorbell again if fail */ if (tempword & FT1000_DB_HB) { tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); } @@ -686,8 +694,10 @@ static void ft1000_hbchk(u_long data) add_timer(&poll_timer); return; } - // Set dedicated area to hi and ring appropriate doorbell according - // to hi/ho heartbeat protocol + /* + * Set dedicated area to hi and ring appropriate doorbell according + * to hi/ho heartbeat protocol + */ if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_dpram(dev, FT1000_HI_HO, hi); } else { @@ -703,7 +713,7 @@ static void ft1000_hbchk(u_long data) (dev, FT1000_MAG_HI_HO, FT1000_MAG_HI_HO_INDX)); } - // Let's write hi again if fail + /* Let's write hi again if fail */ if (tempword != hi) { if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_dpram(dev, FT1000_HI_HO, hi); @@ -774,14 +784,14 @@ static void ft1000_hbchk(u_long data) add_timer(&poll_timer); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_send_cmd -// Description: -// Input: -// Output: -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_send_cmd + Description: + Input: + Output: + + -------------------------------------------------------------------------*/ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype) { struct ft1000_info *info = netdev_priv(dev); @@ -790,17 +800,19 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, unsigned long flags; size += sizeof(struct pseudo_hdr); - // check for odd byte and increment to 16-bit word align value + /* check for odd byte and increment to 16-bit word align value */ if ((size & 0x0001)) { size++; } DEBUG(1, "FT1000:ft1000_send_cmd:total length = %d\n", size); DEBUG(1, "FT1000:ft1000_send_cmd:length = %d\n", ntohs(*ptempbuffer)); - // put message into slow queue area - // All messages are in the form total_len + pseudo header + message body + /* + * put message into slow queue area + * All messages are in the form total_len + pseudo header + message body + */ spin_lock_irqsave(&info->dpram_lock, flags); - // Make sure SLOWQ doorbell is clear + /* Make sure SLOWQ doorbell is clear */ tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); i=0; while (tempword & FT1000_DB_DPRAM_TX) { @@ -816,9 +828,9 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_TX_BASE); - // Write total length to dpram + /* Write total length to dpram */ ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size); - // Write pseudo header and messgae body + /* Write pseudo header and messgae body */ for (i = 0; i < (size >> 1); i++) { DEBUG(1, "FT1000:ft1000_send_cmd:data %d = 0x%x\n", i, *ptempbuffer); @@ -828,9 +840,9 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, } else { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_MAG_TX_BASE); - // Write total length to dpram + /* Write total length to dpram */ ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAH, htons(size)); - // Write pseudo header and messgae body + /* Write pseudo header and messgae body */ ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_MAG_TX_BASE + 1); for (i = 0; i < (size >> 2); i++) { @@ -850,23 +862,23 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, } spin_unlock_irqrestore(&info->dpram_lock, flags); - // ring doorbell to notify DSP that we have a message ready + /* ring doorbell to notify DSP that we have a message ready */ ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_DPRAM_TX); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_receive_cmd -// Description: This function will read a message from the dpram area. -// Input: -// dev - network device structure -// pbuffer - caller supply address to buffer -// pnxtph - pointer to next pseudo header -// Output: -// Status = 0 (unsuccessful) -// = 1 (successful) -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_receive_cmd + Description: This function will read a message from the dpram area. + Input: + dev - network device structure + pbuffer - caller supply address to buffer + pnxtph - pointer to next pseudo header + Output: + Status = 0 (unsuccessful) + = 1 (successful) + + -------------------------------------------------------------------------*/ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer, int maxsz, u16 *pnxtph) { @@ -919,7 +931,7 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer, FT1000_REG_MAG_DPDATAH); pbuffer++; } - //copy odd aligned word + /* copy odd aligned word */ *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAL); DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer); pbuffer++; @@ -928,14 +940,16 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer, pbuffer++; } if (size & 0x0001) { - //copy odd byte from fifo + /* copy odd byte from fifo */ tempword = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA); *pbuffer = ntohs(tempword); } spin_unlock_irqrestore(&info->dpram_lock, flags); - // Check if pseudo header checksum is good - // Calculate pseudo header checksum + /* + * Check if pseudo header checksum is good + * Calculate pseudo header checksum + */ tempword = *ppseudohdr++; for (i = 1; i < 7; i++) { tempword ^= *ppseudohdr++; @@ -943,24 +957,24 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer, if ((tempword != *ppseudohdr)) { DEBUG(1, "FT1000:ft1000_receive_cmd:Pseudo header checksum mismatch\n"); - // Drop this message + /* Drop this message */ return false; } return true; } } -//--------------------------------------------------------------------------- -// -// Function: ft1000_proc_drvmsg -// Description: This function will process the various driver messages. -// Input: -// dev - device structure -// pnxtph - pointer to next pseudo header -// Output: -// none -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_proc_drvmsg + Description: This function will process the various driver messages. + Input: + dev - device structure + pnxtph - pointer to next pseudo header + Output: + none + + -------------------------------------------------------------------------*/ static void ft1000_proc_drvmsg(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); @@ -988,7 +1002,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev) } if ( ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword) ) { - // Get the message type which is total_len + PSEUDO header + msgtype + message body + /* Get the message type which is total_len + PSEUDO header + msgtype + message body */ pdrvmsg = (struct drv_msg *) & cmdbuffer[0]; msgtype = ntohs(pdrvmsg->type); DEBUG(1, "Command message type = 0x%x\n", msgtype); @@ -999,7 +1013,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev) mdelay(25); while (list_empty(&info->prov_list) == 0) { DEBUG(0, "Sending a provisioning message\n"); - // Make sure SLOWQ doorbell is clear + /* Make sure SLOWQ doorbell is clear */ tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); i = 0; @@ -1018,10 +1032,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev) pmsg = (u16 *) ptr->pprov_data; ppseudo_hdr = (struct pseudo_hdr *) pmsg; - // Insert slow queue sequence number + /* Insert slow queue sequence number */ ppseudo_hdr->seq_num = info->squeseqnum++; ppseudo_hdr->portsrc = 0; - // Calculate new checksum + /* Calculate new checksum */ ppseudo_hdr->checksum = *pmsg++; DEBUG(1, "checksum = 0x%x\n", ppseudo_hdr->checksum); @@ -1036,8 +1050,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev) kfree(ptr->pprov_data); kfree(ptr); } - // Indicate adapter is ready to take application messages after all - // provisioning messages are sent + /* + * Indicate adapter is ready to take application messages after all + * provisioning messages are sent + */ info->CardReady = 1; break; case MEDIA_STATE: @@ -1118,8 +1134,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev) break; case DSP_GET_INFO: DEBUG(1, "FT1000:drivermsg:Got DSP_GET_INFO\n"); - // copy dsp info block to dsp - // allow any outstanding ioctl to finish + /* + * copy dsp info block to dsp + * allow any outstanding ioctl to finish + */ mdelay(10); tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { @@ -1132,8 +1150,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev) } if ((tempword & FT1000_DB_DPRAM_TX) == 0) { - // Put message into Slow Queue - // Form Pseudo header + /* + * Put message into Slow Queue + * Form Pseudo header + */ pmsg = (u16 *) info->DSPInfoBlk; ppseudo_hdr = (struct pseudo_hdr *) pmsg; ppseudo_hdr->length = @@ -1147,11 +1167,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev) ppseudo_hdr->rsvd1 = 0; ppseudo_hdr->rsvd2 = 0; ppseudo_hdr->qos_class = 0; - // Insert slow queue sequence number + /* Insert slow queue sequence number */ ppseudo_hdr->seq_num = info->squeseqnum++; - // Insert application id + /* Insert application id */ ppseudo_hdr->portsrc = 0; - // Calculate new checksum + /* Calculate new checksum */ ppseudo_hdr->checksum = *pmsg++; for (i = 1; i < 7; i++) { ppseudo_hdr->checksum ^= *pmsg++; @@ -1165,8 +1185,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev) break; case GET_DRV_ERR_RPT_MSG: DEBUG(1, "FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n"); - // copy driver error message to dsp - // allow any outstanding ioctl to finish + /* + * copy driver error message to dsp + * allow any outstanding ioctl to finish + */ mdelay(10); tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { @@ -1179,8 +1201,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev) } if ((tempword & FT1000_DB_DPRAM_TX) == 0) { - // Put message into Slow Queue - // Form Pseudo header + /* + * Put message into Slow Queue + * Form Pseudo header + */ pmsg = (u16 *) & tempbuffer[0]; ppseudo_hdr = (struct pseudo_hdr *) pmsg; ppseudo_hdr->length = htons(0x0012); @@ -1193,11 +1217,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev) ppseudo_hdr->rsvd1 = 0; ppseudo_hdr->rsvd2 = 0; ppseudo_hdr->qos_class = 0; - // Insert slow queue sequence number + /* Insert slow queue sequence number */ ppseudo_hdr->seq_num = info->squeseqnum++; - // Insert application id + /* Insert application id */ ppseudo_hdr->portsrc = 0; - // Calculate new checksum + /* Calculate new checksum */ ppseudo_hdr->checksum = *pmsg++; for (i=1; i<7; i++) { ppseudo_hdr->checksum ^= *pmsg++; @@ -1228,18 +1252,18 @@ static void ft1000_proc_drvmsg(struct net_device *dev) } } -//--------------------------------------------------------------------------- -// -// Function: ft1000_parse_dpram_msg -// Description: This function will parse the message received from the DSP -// via the DPRAM interface. -// Input: -// dev - device structure -// Output: -// status - FAILURE -// SUCCESS -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_parse_dpram_msg + Description: This function will parse the message received from the DSP + via the DPRAM interface. + Input: + dev - device structure + Output: + status - FAILURE + SUCCESS + + -------------------------------------------------------------------------*/ static int ft1000_parse_dpram_msg(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); @@ -1255,7 +1279,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev) DEBUG(1, "Doorbell = 0x%x\n", doorbell); if (doorbell & FT1000_ASIC_RESET_REQ) { - // Copy DSP session record from info block + /* Copy DSP session record from info block */ spin_lock_irqsave(&info->dpram_lock, flags); if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, @@ -1274,7 +1298,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev) } spin_unlock_irqrestore(&info->dpram_lock, flags); - // clear ASIC RESET request + /* clear ASIC RESET request */ ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_ASIC_RESET_REQ); DEBUG(1, "Got an ASIC RESET Request\n"); @@ -1282,7 +1306,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev) FT1000_ASIC_RESET_DSP); if (info->AsicID == MAGNEMITE_ID) { - // Setting MAGNEMITE ASIC to big endian mode + /* Setting MAGNEMITE ASIC to big endian mode */ ft1000_write_reg(dev, FT1000_REG_SUP_CTRL, HOST_INTF_BE); } @@ -1315,8 +1339,10 @@ static int ft1000_parse_dpram_msg(struct net_device *dev) if ((total_len < MAX_CMD_SQSIZE) && (total_len > sizeof(struct pseudo_hdr))) { total_len += nxtph; cnt = 0; - // ft1000_read_reg will return a value that needs to be byteswap - // in order to get DSP_QID_OFFSET. + /* + * ft1000_read_reg will return a value that needs to be byteswap + * in order to get DSP_QID_OFFSET. + */ if (info->AsicID == ELECTRABUZZ_ID) { portid = (ft1000_read_dpram @@ -1332,7 +1358,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev) DEBUG(1, "DSP_QID = 0x%x\n", portid); if (portid == DRIVERID) { - // We are assumming one driver message from the DSP at a time. + /* We are assumming one driver message from the DSP at a time. */ ft1000_proc_drvmsg(dev); } } @@ -1340,7 +1366,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev) } if (doorbell & FT1000_DB_COND_RESET) { - // Reset ASIC and DSP + /* Reset ASIC and DSP */ if (info->AsicID == ELECTRABUZZ_ID) { info->DSP_TIME[0] = ft1000_read_dpram(dev, FT1000_DSP_TIMER0); @@ -1370,7 +1396,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev) ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_COND_RESET); } - // let's clear any unexpected doorbells from DSP + /* let's clear any unexpected doorbells from DSP */ doorbell = doorbell & ~(FT1000_DB_DPRAM_RX | FT1000_ASIC_RESET_REQ | FT1000_DB_COND_RESET | 0xff00); @@ -1383,18 +1409,18 @@ static int ft1000_parse_dpram_msg(struct net_device *dev) } -//--------------------------------------------------------------------------- -// -// Function: ft1000_flush_fifo -// Description: This function will flush one packet from the downlink -// FIFO. -// Input: -// dev - device structure -// drv_err - driver error causing the flush fifo -// Output: -// None. -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_flush_fifo + Description: This function will flush one packet from the downlink + FIFO. + Input: + dev - device structure + drv_err - driver error causing the flush fifo + Output: + None. + + -------------------------------------------------------------------------*/ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum) { struct ft1000_info *info = netdev_priv(dev); @@ -1432,7 +1458,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum) ft1000_reset_card(dev); return; } else { - // Flush corrupted pkt from FIFO + /* Flush corrupted pkt from FIFO */ i = 0; do { if (info->AsicID == ELECTRABUZZ_ID) { @@ -1447,8 +1473,10 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum) inw(dev->base_addr + FT1000_REG_MAG_DFSR); } i++; - // This should never happen unless the ASIC is broken. - // We must reset to recover. + /* + * This should never happen unless the ASIC is broken. + * We must reset to recover. + */ if ((i > 2048) || (tempword == 0)) { if (info->AsicID == ELECTRABUZZ_ID) { info->DSP_TIME[0] = @@ -1482,17 +1510,19 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum) FT1000_MAG_DSP_TIMER3_INDX); } if (tempword == 0) { - // Let's check if ASIC reads are still ok by reading the Mask register - // which is never zero at this point of the code. + /* + * Let's check if ASIC reads are still ok by reading the Mask register + * which is never zero at this point of the code. + */ tempword = inw(dev->base_addr + FT1000_REG_SUP_IMASK); if (tempword == 0) { - // This indicates that we can not communicate with the ASIC + /* This indicates that we can not communicate with the ASIC */ info->DrvErrNum = FIFO_FLUSH_BADCNT; } else { - // Let's assume that we really flush the FIFO + /* Let's assume that we really flush the FIFO */ pcmcia->PktIntfErr++; return; } @@ -1506,9 +1536,9 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum) if (info->AsicID == ELECTRABUZZ_ID) { i++; DEBUG(0, "Flushing FIFO complete = %x\n", tempword); - // Flush last word in FIFO. + /* Flush last word in FIFO. */ tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); - // Update FIFO counter for DSP + /* Update FIFO counter for DSP */ i = i * 2; DEBUG(0, "Flush Data byte count to dsp = %d\n", i); info->fifo_cnt += i; @@ -1516,7 +1546,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum) info->fifo_cnt); } else { DEBUG(0, "Flushing FIFO complete = %x\n", tempword); - // Flush last word in FIFO + /* Flush last word in FIFO */ templong = inl(dev->base_addr + FT1000_REG_MAG_DFR); tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT); DEBUG(0, "FT1000_REG_SUP_STAT = 0x%x\n", tempword); @@ -1529,19 +1559,19 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum) } } -//--------------------------------------------------------------------------- -// -// Function: ft1000_copy_up_pkt -// Description: This function will pull Flarion packets out of the Downlink -// FIFO and convert it to an ethernet packet. The ethernet packet will -// then be deliver to the TCP/IP stack. -// Input: -// dev - device structure -// Output: -// status - FAILURE -// SUCCESS -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_copy_up_pkt + Description: This function will pull Flarion packets out of the Downlink + FIFO and convert it to an ethernet packet. The ethernet packet will + then be deliver to the TCP/IP stack. + Input: + dev - device structure + Output: + status - FAILURE + SUCCESS + + -------------------------------------------------------------------------*/ static int ft1000_copy_up_pkt(struct net_device *dev) { u16 tempword; @@ -1556,7 +1586,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev) u32 templong; DEBUG(1, "ft1000_copy_up_pkt\n"); - // Read length + /* Read length */ if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); len = tempword; @@ -1570,7 +1600,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev) if (len > ENET_MAX_SIZE) { DEBUG(0, "size of ethernet packet invalid\n"); if (info->AsicID == MAGNEMITE_ID) { - // Read High word to complete 32 bit access + /* Read High word to complete 32 bit access */ tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); } ft1000_flush_fifo(dev, DSP_PKTLEN_INFO); @@ -1582,7 +1612,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev) if (skb == NULL) { DEBUG(0, "No Network buffers available\n"); - // Read High word to complete 32 bit access + /* Read High word to complete 32 bit access */ if (info->AsicID == MAGNEMITE_ID) { tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); } @@ -1592,13 +1622,13 @@ static int ft1000_copy_up_pkt(struct net_device *dev) } pbuffer = (u8 *) skb_put(skb, len + 12); - // Pseudo header + /* Pseudo header */ if (info->AsicID == ELECTRABUZZ_ID) { for (i = 1; i < 7; i++) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); chksum ^= tempword; } - // read checksum value + /* read checksum value */ tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); } else { tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); @@ -1625,7 +1655,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev) DEBUG(1, "Pseudo = 0x%x\n", tempword); chksum ^= tempword; - // read checksum value + /* read checksum value */ tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); DEBUG(1, "Pseudo = 0x%x\n", tempword); } @@ -1638,10 +1668,10 @@ static int ft1000_copy_up_pkt(struct net_device *dev) kfree_skb(skb); return FAILURE; } - //subtract the number of bytes read already + /* subtract the number of bytes read already */ ptemp = pbuffer; - // fake MAC address + /* fake MAC address */ *pbuffer++ = dev->dev_addr[0]; *pbuffer++ = dev->dev_addr[1]; *pbuffer++ = dev->dev_addr[2]; @@ -1666,7 +1696,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev) } } - // Need to read one more word if odd byte + /* Need to read one more word if odd byte */ if (len & 0x0001) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); *pbuffer++ = (u8) (tempword >> 8); @@ -1679,7 +1709,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev) *ptemplong++ = templong; } - // Need to read one more word if odd align. + /* Need to read one more word if odd align. */ if (len & 0x0003) { templong = inl(dev->base_addr + FT1000_REG_MAG_DFR); DEBUG(1, "Data = 0x%8x\n", templong); @@ -1699,11 +1729,11 @@ static int ft1000_copy_up_pkt(struct net_device *dev) netif_rx(skb); info->stats.rx_packets++; - // Add on 12 bytes for MAC address which was removed + /* Add on 12 bytes for MAC address which was removed */ info->stats.rx_bytes += (len + 12); if (info->AsicID == ELECTRABUZZ_ID) { - // track how many bytes have been read from FIFO - round up to 16 bit word + /* track how many bytes have been read from FIFO - round up to 16 bit word */ tempword = len + 16; if (tempword & 0x01) tempword++; @@ -1715,21 +1745,21 @@ static int ft1000_copy_up_pkt(struct net_device *dev) return SUCCESS; } -//--------------------------------------------------------------------------- -// -// Function: ft1000_copy_down_pkt -// Description: This function will take an ethernet packet and convert it to -// a Flarion packet prior to sending it to the ASIC Downlink -// FIFO. -// Input: -// dev - device structure -// packet - address of ethernet packet -// len - length of IP packet -// Output: -// status - FAILURE -// SUCCESS -// -//--------------------------------------------------------------------------- +/*--------------------------------------------------------------------------- + + Function: ft1000_copy_down_pkt + Description: This function will take an ethernet packet and convert it to + a Flarion packet prior to sending it to the ASIC Downlink + FIFO. + Input: + dev - device structure + packet - address of ethernet packet + len - length of IP packet + Output: + status - FAILURE + SUCCESS + + -------------------------------------------------------------------------*/ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) { struct ft1000_info *info = netdev_priv(dev); @@ -1744,7 +1774,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) DEBUG(1, "ft1000_hw: copy_down_pkt()\n"); - // Check if there is room on the FIFO + /* Check if there is room on the FIFO */ if (len > ft1000_read_fifo_len(dev)) { udelay(10); if (len > ft1000_read_fifo_len(dev)) { @@ -1769,15 +1799,15 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) return SUCCESS; } } - // Create pseudo header and send pseudo/ip to hardware + /* Create pseudo header and send pseudo/ip to hardware */ if (info->AsicID == ELECTRABUZZ_ID) { pseudo.blk.length = len; } else { pseudo.blk.length = ntohs(len); } - pseudo.blk.source = DSPID; // Need to swap to get in correct order + pseudo.blk.source = DSPID; /* Need to swap to get in correct order */ pseudo.blk.destination = HOSTID; - pseudo.blk.portdest = NETWORKID; // Need to swap to get in correct order + pseudo.blk.portdest = NETWORKID; /* Need to swap to get in correct order */ pseudo.blk.portsrc = DSPAIRID; pseudo.blk.sh_str_id = 0; pseudo.blk.control = 0; @@ -1791,14 +1821,14 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) pseudo.blk.checksum ^= pseudo.buff[i]; } - // Production Mode + /* Production Mode */ if (info->AsicID == ELECTRABUZZ_ID) { - // copy first word to UFIFO_BEG reg + /* copy first word to UFIFO_BEG reg */ ft1000_write_reg(dev, FT1000_REG_UFIFO_BEG, pseudo.buff[0]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 0 BEG = 0x%04x\n", pseudo.buff[0]); - // copy subsequent words to UFIFO_MID reg + /* copy subsequent words to UFIFO_MID reg */ ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[1]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 1 MID = 0x%04x\n", pseudo.buff[1]); @@ -1821,7 +1851,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 7 MID = 0x%04x\n", pseudo.buff[7]); - // Write PPP type + IP Packet into Downlink FIFO + /* Write PPP type + IP Packet into Downlink FIFO */ for (i = 0; i < (len >> 1) - 1; i++) { ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, htons(*packet)); @@ -1831,7 +1861,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) packet++; } - // Check for odd byte + /* Check for odd byte */ if (len & 0x0001) { ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, htons(*packet)); @@ -1870,12 +1900,12 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) *(u32 *) & pseudo.buff[6]); plong = (u32 *) packet; - // Write PPP type + IP Packet into Downlink FIFO + /* Write PPP type + IP Packet into Downlink FIFO */ for (i = 0; i < (len >> 2); i++) { outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR); } - // Check for odd alignment + /* Check for odd alignment */ if (len & 0x0003) { DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data = 0x%8x\n", @@ -1886,7 +1916,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) } info->stats.tx_packets++; - // Add 14 bytes for MAC address plus ethernet type + /* Add 14 bytes for MAC address plus ethernet type */ info->stats.tx_bytes += (len + 14); return SUCCESS; } @@ -1931,7 +1961,7 @@ static int ft1000_close(struct net_device *dev) ft1000_disable_interrupts(dev); ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT); - //reset ASIC + /* reset ASIC */ ft1000_reset_asic(dev); } return 0; @@ -1995,10 +2025,10 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id) ft1000_disable_interrupts(dev); - // Read interrupt type + /* Read interrupt type */ inttype = ft1000_read_reg(dev, FT1000_REG_SUP_ISR); - // Make sure we process all interrupt before leaving the ISR due to the edge trigger interrupt type + /* Make sure we process all interrupt before leaving the ISR due to the edge trigger interrupt type */ while (inttype) { if (inttype & ISR_DOORBELL_PEND) ft1000_parse_dpram_msg(dev); @@ -2008,7 +2038,7 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id) cnt = 0; do { - // Check if we have packets in the Downlink FIFO + /* Check if we have packets in the Downlink FIFO */ if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_reg(dev, @@ -2027,12 +2057,12 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id) } while (cnt < MAX_RCV_LOOP); } - // clear interrupts + /* clear interrupts */ tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR); DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword); ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword); - // Read interrupt type + /* Read interrupt type */ inttype = ft1000_read_reg (dev, FT1000_REG_SUP_ISR); DEBUG(1,"ft1000_hw: interrupt status register after clear = 0x%x\n",inttype); } @@ -2044,7 +2074,7 @@ void stop_ft1000_card(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); struct prov_record *ptr; -// int cnt; + /* int cnt; */ DEBUG(0, "ft1000_hw: stop_ft1000_card()\n"); @@ -2053,7 +2083,7 @@ void stop_ft1000_card(struct net_device *dev) netif_stop_queue(dev); ft1000_disable_interrupts(dev); - // Make sure we free any memory reserve for provisioning + /* Make sure we free any memory reserve for provisioning */ while (list_empty(&info->prov_list) == 0) { ptr = list_entry(info->prov_list.next, struct prov_record, list); list_del(&ptr->list); @@ -2109,7 +2139,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link, struct ft1000_pcmcia *pcmcia; struct net_device *dev; - static const struct net_device_ops ft1000ops = // Slavius 21.10.2009 due to kernel changes + static const struct net_device_ops ft1000ops = /* Slavius 21.10.2009 due to kernel changes */ { .ndo_open = &ft1000_open, .ndo_stop = &ft1000_close, @@ -2169,12 +2199,12 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link, info->squeseqnum = 0; -// dev->hard_start_xmit = &ft1000_start_xmit; -// dev->get_stats = &ft1000_stats; -// dev->open = &ft1000_open; -// dev->stop = &ft1000_close; + /* dev->hard_start_xmit = &ft1000_start_xmit; */ + /* dev->get_stats = &ft1000_stats; */ + /* dev->open = &ft1000_open; */ + /* dev->stop = &ft1000_close; */ - dev->netdev_ops = &ft1000ops; // Slavius 21.10.2009 due to kernel changes + dev->netdev_ops = &ft1000ops; /* Slavius 21.10.2009 due to kernel changes */ DEBUG(0, "device name = %s\n", dev->name); diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c index 68a55ce692008d..ffdc7f597a961b 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c @@ -560,6 +560,8 @@ static long ft1000_ioctl(struct file *file, unsigned int command, /* Get the length field to see how many bytes to copy */ result = get_user(msgsz, (__u16 __user *)argp); + if (result) + break; msgsz = ntohs(msgsz); /* DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz); */ diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c index 12f333fa59b525..cab9cdf6273e24 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c @@ -4,7 +4,6 @@ * This file is part of Express Card USB Driver */ -#include #include #include #include @@ -55,7 +54,7 @@ #define MAX_LENGTH 0x7f0 -// Temporary download mechanism for Magnemite +/* Temporary download mechanism for Magnemite */ #define DWNLD_MAG_TYPE_LOC 0x00 #define DWNLD_MAG_LEN_LOC 0x01 #define DWNLD_MAG_ADDR_LOC 0x02 @@ -74,36 +73,36 @@ #define HANDSHAKE_MAG_TIMEOUT_VALUE 0xF1F1 -// New Magnemite downloader +/* New Magnemite downloader */ #define DWNLD_MAG1_HANDSHAKE_LOC 0x00 #define DWNLD_MAG1_TYPE_LOC 0x01 #define DWNLD_MAG1_SIZE_LOC 0x02 #define DWNLD_MAG1_PS_HDR_LOC 0x03 struct dsp_file_hdr { - long version_id; // Version ID of this image format. - long package_id; // Package ID of code release. - long build_date; // Date/time stamp when file was built. - long commands_offset; // Offset to attached commands in Pseudo Hdr format. - long loader_offset; // Offset to bootloader code. - long loader_code_address; // Start address of bootloader. - long loader_code_end; // Where bootloader code ends. - long loader_code_size; - long version_data_offset; // Offset were scrambled version data begins. - long version_data_size; // Size, in words, of scrambled version data. - long nDspImages; // Number of DSP images in file. + long version_id; /* Version ID of this image format. */ + long package_id; /* Package ID of code release. */ + long build_date; /* Date/time stamp when file was built. */ + long commands_offset; /* Offset to attached commands in Pseudo Hdr format. */ + long loader_offset; /* Offset to bootloader code. */ + long loader_code_address; /* Start address of bootloader. */ + long loader_code_end; /* Where bootloader code ends. */ + long loader_code_size; + long version_data_offset; /* Offset were scrambled version data begins. */ + long version_data_size; /* Size, in words, of scrambled version data. */ + long nDspImages; /* Number of DSP images in file. */ }; #pragma pack(1) struct dsp_image_info { - long coff_date; // Date/time when DSP Coff image was built. - long begin_offset; // Offset in file where image begins. - long end_offset; // Offset in file where image begins. - long run_address; // On chip Start address of DSP code. - long image_size; // Size of image. - long version; // Embedded version # of DSP code. - unsigned short checksum; // DSP File checksum - unsigned short pad1; + long coff_date; /* Date/time when DSP Coff image was built. */ + long begin_offset; /* Offset in file where image begins. */ + long end_offset; /* Offset in file where image begins. */ + long run_address; /* On chip Start address of DSP code. */ + long image_size; /* Size of image. */ + long version; /* Embedded version # of DSP code. */ + unsigned short checksum; /* DSP File checksum */ + unsigned short pad1; }; @@ -151,7 +150,7 @@ static int check_usb_db(struct ft1000_usb *ft1000dev) } } - return HANDSHAKE_MAG_TIMEOUT_VALUE; + return -1; } /* gets the handshake and compares it with the expected value */ @@ -172,9 +171,8 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value) ft1000dev->fcodeldr); ft1000dev->fcodeldr = 0; status = check_usb_db(ft1000dev); - if (status != STATUS_SUCCESS) { + if (status != 0) { DEBUG("get_handshake: check_usb_db failed\n"); - status = STATUS_FAILURE; break; } status = ft1000_write_register(ft1000dev, @@ -202,7 +200,7 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value) } /* write the handshake value to the handshake location */ -static void put_handshake(struct ft1000_usb *ft1000dev,u16 handshake_value) +static void put_handshake(struct ft1000_usb *ft1000dev, u16 handshake_value) { u32 tempx; u16 tempword; @@ -268,11 +266,12 @@ static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value) return HANDSHAKE_TIMEOUT_VALUE; } -static void put_handshake_usb(struct ft1000_usb *ft1000dev,u16 handshake_value) +static void put_handshake_usb(struct ft1000_usb *ft1000dev, u16 handshake_value) { int i; - for (i=0; i<1000; i++); + for (i = 0; i < 1000; i++) + ; } static u16 get_request_type(struct ft1000_usb *ft1000dev) @@ -450,7 +449,7 @@ static int write_dpram32_and_check(struct ft1000_usb *ft1000dev, static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length) { - int status = STATUS_SUCCESS; + int status = 0; u16 dpram; int loopcnt, i; u16 tempword; @@ -499,7 +498,7 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile, } else { status = write_dpram32_and_check(ft1000dev, tempbuffer, dpram); - if (status != STATUS_SUCCESS) { + if (status != 0) { DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]); break; } @@ -509,9 +508,9 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile, return status; } -static void usb_dnld_complete (struct urb *urb) +static void usb_dnld_complete(struct urb *urb) { - //DEBUG("****** usb_dnld_complete\n"); + /* DEBUG("****** usb_dnld_complete\n"); */ } /* writes a block of DSP image to DPRAM @@ -523,7 +522,7 @@ static void usb_dnld_complete (struct urb *urb) static int write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length) { - int Status = STATUS_SUCCESS; + int Status = 0; int byte_length; byte_length = word_length * 4; @@ -586,12 +585,12 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file, /*NdisMSleep (100); */ if (word_length > MAX_LENGTH) { DEBUG("FT1000:download:Download error: Max length exceeded\n"); - return STATUS_FAILURE; + return -1; } if ((word_length * 2 + (long)c_file) > (long)endpoint) { /* Error, beyond boot code range.*/ DEBUG("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n", (int)word_length); - return STATUS_FAILURE; + return -1; } if (word_length & 0x1) word_length++; @@ -601,11 +600,11 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file, status = write_blk(ft1000dev, s_file, c_file, word_length); /*DEBUG("write_blk returned %d\n", status); */ } else { - write_blk_fifo(ft1000dev, s_file, c_file, word_length); + status = write_blk_fifo(ft1000dev, s_file, c_file, word_length); if (ft1000dev->usbboot == 0) ft1000dev->usbboot++; if (ft1000dev->usbboot == 1) - ft1000_write_dpram16(ft1000dev, + status |= ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_PS_HDR_LOC, 0, 0); } return status; @@ -615,7 +614,7 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file, int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, u32 FileLength) { - int status = STATUS_SUCCESS; + int status = 0; u32 state; u16 handshake; struct pseudo_hdr *pseudo_header; @@ -651,9 +650,9 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, ft1000dev->usbboot = 0; ft1000dev->dspalive = 0xffff; - // - // Get version id of file, at first 4 bytes of file, for newer files. - // + /* + * Get version id of file, at first 4 bytes of file, for newer files. + */ state = STATE_START_DWNLD; @@ -670,7 +669,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, loader_code_size = file_hdr->loader_code_size; correct_version = false; - while ((status == STATUS_SUCCESS) && (state != STATE_DONE_FILE)) { + while ((status == 0) && (state != STATE_DONE_FILE)) { switch (state) { case STATE_START_DWNLD: status = scram_start_dwnld(ft1000dev, &handshake, @@ -702,8 +701,8 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, /* Reposition ptrs to beginning of code section */ s_file = (u16 *) (boot_end); c_file = (u8 *) (boot_end); - //DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file); - //DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file); + /* DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file); */ + /* DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file); */ state = STATE_CODE_DWNLD; ft1000dev->fcodeldr = 1; break; @@ -717,7 +716,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, DEBUG ("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n", request); - status = STATUS_FAILURE; + status = -1; break; } if (ft1000dev->usbboot) @@ -729,13 +728,13 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, } else { DEBUG ("FT1000:download:Download error: Handshake failed\n"); - status = STATUS_FAILURE; + status = -1; } break; case STATE_CODE_DWNLD: - //DEBUG("FT1000:STATE_CODE_DWNLD\n"); + /* DEBUG("FT1000:STATE_CODE_DWNLD\n"); */ ft1000dev->bootmode = 0; if (ft1000dev->usbboot) handshake = @@ -773,7 +772,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, } else { DEBUG ("FT1000:download:Download error: Got Run address request before image offset request.\n"); - status = STATUS_FAILURE; + status = -1; break; } break; @@ -789,7 +788,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, } else { DEBUG ("FT1000:download:Download error: Got Size request before image offset request.\n"); - status = STATUS_FAILURE; + status = -1; break; } break; @@ -805,11 +804,11 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, state = STATE_DONE_DWNLD; break; case REQUEST_CODE_SEGMENT: - //DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n"); + /* DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n"); */ if (!correct_version) { DEBUG ("FT1000:download:Download error: Got Code Segment request before image offset request.\n"); - status = STATUS_FAILURE; + status = -1; break; } @@ -823,7 +822,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, case REQUEST_MAILBOX_DATA: DEBUG ("FT1000:download: REQUEST_MAILBOX_DATA\n"); - // Convert length from byte count to word count. Make sure we round up. + /* Convert length from byte count to word count. Make sure we round up. */ word_length = (long)(pft1000info->DSPInfoBlklen + 1) / 2; @@ -836,7 +835,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, * Position ASIC DPRAM auto-increment pointer. */ - data = (u16 *) & mailbox_data->data[0]; + data = (u16 *) &mailbox_data->data[0]; dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; @@ -850,7 +849,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, - (u8 *) & templong); + (u8 *) &templong); } break; @@ -939,7 +938,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, } dsp_img_info++; - } //end of for + } /* end of for */ if (!correct_version) { /* @@ -948,7 +947,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, DEBUG ("FT1000:download:Download error: Bad Version Request = 0x%x.\n", (int)requested_version); - status = STATUS_FAILURE; + status = -1; break; } break; @@ -957,7 +956,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, DEBUG ("FT1000:download:Download error: Bad request type=%d in CODE download state.\n", request); - status = STATUS_FAILURE; + status = -1; break; } if (ft1000dev->usbboot) @@ -969,7 +968,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, } else { DEBUG ("FT1000:download:Download error: Handshake failed\n"); - status = STATUS_FAILURE; + status = -1; } break; @@ -1002,7 +1001,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, (u32) (pseudo_header_len + sizeof(struct pseudo_hdr))); - // link provisioning data + /* link provisioning data */ pprov_record = kmalloc(sizeof(struct prov_record), GFP_ATOMIC); @@ -1013,7 +1012,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, list, &pft1000info-> prov_list); - // Move to next entry if available + /* Move to next entry if available */ c_file = (u8 *) ((unsigned long) c_file + @@ -1026,14 +1025,14 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, } } else { kfree(pbuffer); - status = STATUS_FAILURE; + status = -1; } } else { - status = STATUS_FAILURE; + status = -1; } } else { /* Checksum did not compute */ - status = STATUS_FAILURE; + status = -1; } DEBUG ("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n", @@ -1046,23 +1045,23 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, break; default: - status = STATUS_FAILURE; + status = -1; break; } /* End Switch */ - if (status != STATUS_SUCCESS) + if (status != 0) break; /**** // Check if Card is present status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK); if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) { - break; + break; } status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID); if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) { - break; + break; } ****/ diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c index 0d4931b2c2e26f..a433e33049b550 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c @@ -1,12 +1,9 @@ -//===================================================== -// CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved. -// -// -// This file is part of Express Card USB Driver -// -// $Id: -//==================================================== -#include +/* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved. +* +* +* This file is part of Express Card USB Driver +*/ + #include #include #include @@ -27,7 +24,9 @@ #define HARLEY_READ_OPERATION 0xc1 #define HARLEY_WRITE_OPERATION 0x41 -//#define JDEBUG +#if 0 +#define JDEBUG +#endif static int ft1000_submit_rx_urb(struct ft1000_info *info); @@ -35,32 +34,22 @@ static u8 tempbuffer[1600]; #define MAX_RCV_LOOP 100 -//--------------------------------------------------------------------------- -// Function: ft1000_control -// -// Parameters: ft1000_usb - device structure -// pipe - usb control message pipe -// request - control request -// requesttype - control message request type -// value - value to be written or 0 -// index - register index -// data - data buffer to hold the read/write values -// size - data size -// timeout - control message time out value -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function sends a control message via USB interface synchronously -// -// Notes: -// -//--------------------------------------------------------------------------- +/* send a control message via USB interface synchronously +* Parameters: ft1000_usb - device structure +* pipe - usb control message pipe +* request - control request +* requesttype - control message request type +* value - value to be written or 0 +* index - register index +* data - data buffer to hold the read/write values +* size - data size +* timeout - control message time out value +*/ static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe, u8 request, u8 requesttype, u16 value, u16 index, void *data, u16 size, int timeout) { - u16 ret; + int ret; if ((ft1000dev == NULL) || (ft1000dev->dev == NULL)) { DEBUG("ft1000dev or ft1000dev->dev == NULL, failure\n"); @@ -76,26 +65,11 @@ static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe, return ret; } -//--------------------------------------------------------------------------- -// Function: ft1000_read_register -// -// Parameters: ft1000_usb - device structure -// Data - data buffer to hold the value read -// nRegIndex - register index -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function returns the value in a register -// -// Notes: -// -//--------------------------------------------------------------------------- - -int ft1000_read_register(struct ft1000_usb *ft1000dev, u16* Data, +/* returns the value in a register */ +int ft1000_read_register(struct ft1000_usb *ft1000dev, u16 *Data, u16 nRegIndx) { - int ret = STATUS_SUCCESS; + int ret = 0; ret = ft1000_control(ft1000dev, usb_rcvctrlpipe(ft1000dev->dev, 0), @@ -110,25 +84,11 @@ int ft1000_read_register(struct ft1000_usb *ft1000dev, u16* Data, return ret; } -//--------------------------------------------------------------------------- -// Function: ft1000_write_register -// -// Parameters: ft1000_usb - device structure -// value - value to write into a register -// nRegIndex - register index -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function writes the value in a register -// -// Notes: -// -//--------------------------------------------------------------------------- +/* writes the value in a register */ int ft1000_write_register(struct ft1000_usb *ft1000dev, u16 value, u16 nRegIndx) { - int ret = STATUS_SUCCESS; + int ret = 0; ret = ft1000_control(ft1000dev, usb_sndctrlpipe(ft1000dev->dev, 0), @@ -143,27 +103,11 @@ int ft1000_write_register(struct ft1000_usb *ft1000dev, u16 value, return ret; } -//--------------------------------------------------------------------------- -// Function: ft1000_read_dpram32 -// -// Parameters: ft1000_usb - device structure -// indx - starting address to read -// buffer - data buffer to hold the data read -// cnt - number of byte read from DPRAM -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function read a number of bytes from DPRAM -// -// Notes: -// -//--------------------------------------------------------------------------- - +/* read a number of bytes from DPRAM */ int ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer, u16 cnt) { - int ret = STATUS_SUCCESS; + int ret = 0; ret = ft1000_control(ft1000dev, usb_rcvctrlpipe(ft1000dev->dev, 0), @@ -178,26 +122,11 @@ int ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer, return ret; } -//--------------------------------------------------------------------------- -// Function: ft1000_write_dpram32 -// -// Parameters: ft1000_usb - device structure -// indx - starting address to write the data -// buffer - data buffer to write into DPRAM -// cnt - number of bytes to write -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function writes into DPRAM a number of bytes -// -// Notes: -// -//--------------------------------------------------------------------------- +/* writes into DPRAM a number of bytes */ int ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer, u16 cnt) { - int ret = STATUS_SUCCESS; + int ret = 0; if (cnt % 4) cnt += cnt - (cnt % 4); @@ -215,26 +144,11 @@ int ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer, return ret; } -//--------------------------------------------------------------------------- -// Function: ft1000_read_dpram16 -// -// Parameters: ft1000_usb - device structure -// indx - starting address to read -// buffer - data buffer to hold the data read -// hightlow - high or low 16 bit word -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function read 16 bits from DPRAM -// -// Notes: -// -//--------------------------------------------------------------------------- +/* read 16 bits from DPRAM */ int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer, u8 highlow) { - int ret = STATUS_SUCCESS; + int ret = 0; u8 request; if (highlow == 0) @@ -255,25 +169,11 @@ int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer, return ret; } -//--------------------------------------------------------------------------- -// Function: ft1000_write_dpram16 -// -// Parameters: ft1000_usb - device structure -// indx - starting address to write the data -// value - 16bits value to write -// hightlow - high or low 16 bit word -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function writes into DPRAM a number of bytes -// -// Notes: -// -//--------------------------------------------------------------------------- -int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value, u8 highlow) +/* write into DPRAM a number of bytes */ +int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value, + u8 highlow) { - int ret = STATUS_SUCCESS; + int ret = 0; u8 request; if (highlow == 0) @@ -294,33 +194,18 @@ int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value, u8 h return ret; } -//--------------------------------------------------------------------------- -// Function: fix_ft1000_read_dpram32 -// -// Parameters: ft1000_usb - device structure -// indx - starting address to read -// buffer - data buffer to hold the data read -// -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function read DPRAM 4 words at a time -// -// Notes: -// -//--------------------------------------------------------------------------- +/* read DPRAM 4 words at a time */ int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer) { u8 buf[16]; u16 pos; - int ret = STATUS_SUCCESS; + int ret = 0; pos = (indx / 4) * 4; ret = ft1000_read_dpram32(ft1000dev, pos, buf, 16); - if (ret == STATUS_SUCCESS) { + if (ret == 0) { pos = (indx % 4) * 4; *buffer++ = buf[pos++]; *buffer++ = buf[pos++]; @@ -338,22 +223,7 @@ int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, } -//--------------------------------------------------------------------------- -// Function: fix_ft1000_write_dpram32 -// -// Parameters: ft1000_usb - device structure -// indx - starting address to write -// buffer - data buffer to write -// -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function write to DPRAM 4 words at a time -// -// Notes: -// -//--------------------------------------------------------------------------- +/* Description: This function write to DPRAM 4 words at a time */ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer) { u16 pos1; @@ -362,13 +232,13 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer) u8 buf[32]; u8 resultbuffer[32]; u8 *pdata; - int ret = STATUS_SUCCESS; + int ret = 0; pos1 = (indx / 4) * 4; pdata = buffer; ret = ft1000_read_dpram32(ft1000dev, pos1, buf, 16); - if (ret == STATUS_SUCCESS) { + if (ret == 0) { pos2 = (indx % 4)*4; buf[pos2++] = *buffer++; buf[pos2++] = *buffer++; @@ -382,24 +252,24 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer) ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16); - if (ret == STATUS_SUCCESS) { + if (ret == 0) { buffer = pdata; for (i = 0; i < 16; i++) { if (buf[i] != resultbuffer[i]) - ret = STATUS_FAILURE; + ret = -1; } } - if (ret == STATUS_FAILURE) { + if (ret == -1) { ret = ft1000_write_dpram32(ft1000dev, pos1, (u8 *)&tempbuffer[0], 16); ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16); - if (ret == STATUS_SUCCESS) { + if (ret == 0) { buffer = pdata; for (i = 0; i < 16; i++) { if (tempbuffer[i] != resultbuffer[i]) { - ret = STATUS_FAILURE; + ret = -1; DEBUG("%s Failed to write\n", __func__); } @@ -410,20 +280,10 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer) return ret; } - -//------------------------------------------------------------------------ -// -// Function: card_reset_dsp -// -// Synopsis: This function is called to reset or activate the DSP -// -// Arguments: value - reset or activate -// -// Returns: None -//----------------------------------------------------------------------- +/* reset or activate the DSP */ static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value) { - u16 status = STATUS_SUCCESS; + int status = 0; u16 tempword; status = ft1000_write_register(ft1000dev, HOST_INTF_BE, @@ -457,21 +317,11 @@ static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value) } } -//--------------------------------------------------------------------------- -// Function: card_send_command -// -// Parameters: ft1000_usb - device structure -// ptempbuffer - command buffer -// size - command buffer size -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function sends a command to ASIC -// -// Notes: -// -//--------------------------------------------------------------------------- +/* send a command to ASIC +* Parameters: ft1000_usb - device structure +* ptempbuffer - command buffer +* size - command buffer size +*/ void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer, int size) { @@ -486,7 +336,7 @@ void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer, ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL); if (temp & 0x0100) - msleep(10); + usleep_range(900, 1100); /* check for odd word */ size = size + 2; @@ -496,29 +346,21 @@ void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer, size += 4 - (size % 4); ft1000_write_dpram32(ft1000dev, 0, commandbuf, size); - msleep(1); + usleep_range(900, 1100); ft1000_write_register(ft1000dev, FT1000_DB_DPRAM_TX, FT1000_REG_DOORBELL); - msleep(1); + usleep_range(900, 1100); ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL); - if ((temp & 0x0100) == 0) { - //DEBUG("card_send_command: Message sent\n"); - } +#if 0 + if ((temp & 0x0100) == 0) + DEBUG("card_send_command: Message sent\n"); +#endif } -//-------------------------------------------------------------------------- -// -// Function: dsp_reload -// -// Synopsis: This function is called to load or reload the DSP -// -// Arguments: ft1000dev - device structure -// -// Returns: None -//----------------------------------------------------------------------- +/* load or reload the DSP */ int dsp_reload(struct ft1000_usb *ft1000dev) { int status; @@ -559,7 +401,7 @@ int dsp_reload(struct ft1000_usb *ft1000dev) /* call codeloader */ status = scram_dnldr(ft1000dev, pFileStart, FileLength); - if (status != STATUS_SUCCESS) + if (status != 0) return -EIO; msleep(1000); @@ -569,17 +411,7 @@ int dsp_reload(struct ft1000_usb *ft1000dev) return 0; } -//--------------------------------------------------------------------------- -// -// Function: ft1000_reset_asic -// Description: This function will call the Card Service function to reset the -// ASIC. -// Input: -// dev - device structure -// Output: -// none -// -//--------------------------------------------------------------------------- +/* call the Card Service function to reset the ASIC. */ static void ft1000_reset_asic(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); @@ -607,18 +439,6 @@ static void ft1000_reset_asic(struct net_device *dev) DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword); } - -//--------------------------------------------------------------------------- -// -// Function: ft1000_reset_card -// Description: This function will reset the card -// Input: -// dev - device structure -// Output: -// status - FALSE (card reset fail) -// TRUE (card reset successful) -// -//--------------------------------------------------------------------------- static int ft1000_reset_card(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); @@ -666,19 +486,7 @@ static int ft1000_reset_card(struct net_device *dev) return TRUE; } -//--------------------------------------------------------------------------- -// Function: ft1000_usb_transmit_complete -// -// Parameters: urb - transmitted usb urb -// -// -// Returns: none -// -// Description: This is the callback function when a urb is transmitted -// -// Notes: -// -//--------------------------------------------------------------------------- +/* callback function when a urb is transmitted */ static void ft1000_usb_transmit_complete(struct urb *urb) { @@ -690,22 +498,10 @@ static void ft1000_usb_transmit_complete(struct urb *urb) netif_wake_queue(ft1000dev->net); } -//--------------------------------------------------------------------------- -// -// Function: ft1000_copy_down_pkt -// Description: This function will take an ethernet packet and convert it to -// a Flarion packet prior to sending it to the ASIC Downlink -// FIFO. -// Input: -// dev - device structure -// packet - address of ethernet packet -// len - length of IP packet -// Output: -// status - FAILURE -// SUCCESS -// -//--------------------------------------------------------------------------- -static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len) +/* take an ethernet packet and convert it to a Flarion +* packet prior to sending it to the ASIC Downlink FIFO. +*/ +static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len) { struct ft1000_info *pInfo = netdev_priv(netdev); struct ft1000_usb *pFt1000Dev = pInfo->priv; @@ -769,20 +565,10 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len) return 0; } -//--------------------------------------------------------------------------- -// Function: ft1000_start_xmit -// -// Parameters: skb - socket buffer to be sent -// dev - network device -// -// -// Returns: none -// -// Description: transmit a ethernet packet -// -// Notes: -// -//--------------------------------------------------------------------------- +/* transmit an ethernet packet +* Parameters: skb - socket buffer to be sent +* dev - network device +*/ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ft1000_info *pInfo = netdev_priv(dev); @@ -827,20 +613,7 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -//--------------------------------------------------------------------------- -// Function: ft1000_open -// -// Parameters: -// dev - network device -// -// -// Returns: none -// -// Description: open the network driver -// -// Notes: -// -//--------------------------------------------------------------------------- +/* open the network driver */ static int ft1000_open(struct net_device *dev) { struct ft1000_info *pInfo = netdev_priv(dev); @@ -871,29 +644,14 @@ static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev) return &(info->stats); } -static const struct net_device_ops ftnet_ops = -{ +static const struct net_device_ops ftnet_ops = { .ndo_open = &ft1000_open, .ndo_stop = &ft1000_close, .ndo_start_xmit = &ft1000_start_xmit, .ndo_get_stats = &ft1000_netdev_stats, }; -//--------------------------------------------------------------------------- -// Function: init_ft1000_netdev -// -// Parameters: ft1000dev - device structure -// -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function initialize the network device -// -// Notes: -// -//--------------------------------------------------------------------------- - +/* initialize the network device */ static int ft1000_reset(void *dev) { ft1000_reset_card(dev); @@ -931,14 +689,14 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev) card_nr[1] = '\0'; ret_val = kstrtou8(card_nr, 10, &gCardIndex); if (ret_val) { - printk(KERN_ERR "Can't parse netdev\n"); + netdev_err(ft1000dev->net, "Can't parse netdev\n"); goto err_net; } ft1000dev->CardNumber = gCardIndex; DEBUG("card number = %d\n", ft1000dev->CardNumber); } else { - printk(KERN_ERR "ft1000: Invalid device name\n"); + netdev_err(ft1000dev->net, "ft1000: Invalid device name\n"); ret_val = -ENXIO; goto err_net; } @@ -1014,20 +772,7 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev) return ret_val; } -//--------------------------------------------------------------------------- -// Function: reg_ft1000_netdev -// -// Parameters: ft1000dev - device structure -// -// -// Returns: STATUS_SUCCESS - success -// STATUS_FAILURE - failure -// -// Description: This function register the network driver -// -// Notes: -// -//--------------------------------------------------------------------------- +/* register the network driver */ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev, struct usb_interface *intf) { @@ -1060,19 +805,9 @@ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev, return 0; } -//--------------------------------------------------------------------------- -// -// Function: ft1000_copy_up_pkt -// Description: This function will take a packet from the FIFO up link and -// convert it into an ethernet packet and deliver it to the IP stack -// Input: -// urb - the receiving usb urb -// -// Output: -// status - FAILURE -// SUCCESS -// -//--------------------------------------------------------------------------- +/* take a packet from the FIFO up link and +* convert it into an ethernet packet and deliver it to the IP stack +*/ static int ft1000_copy_up_pkt(struct urb *urb) { struct ft1000_info *info = urb->context; @@ -1090,9 +825,9 @@ static int ft1000_copy_up_pkt(struct urb *urb) if (ft1000dev->status & FT1000_STATUS_CLOSING) { DEBUG("network driver is closed, return\n"); - return STATUS_SUCCESS; + return 0; } - // Read length + /* Read length */ len = urb->transfer_buffer_length; lena = urb->actual_length; @@ -1105,7 +840,7 @@ static int ft1000_copy_up_pkt(struct urb *urb) if (tempword != *chksum) { info->stats.rx_errors++; ft1000_submit_rx_urb(info); - return STATUS_FAILURE; + return -1; } skb = dev_alloc_skb(len + 12 + 2); @@ -1114,7 +849,7 @@ static int ft1000_copy_up_pkt(struct urb *urb) DEBUG("ft1000_copy_up_pkt: No Network buffers available\n"); info->stats.rx_errors++; ft1000_submit_rx_urb(info); - return STATUS_FAILURE; + return -1; } pbuffer = (u8 *) skb_put(skb, len + 12); @@ -1151,23 +886,11 @@ static int ft1000_copy_up_pkt(struct urb *urb) ft1000_submit_rx_urb(info); - return SUCCESS; + return 0; } -//--------------------------------------------------------------------------- -// -// Function: ft1000_submit_rx_urb -// Description: the receiving function of the network driver -// -// Input: -// info - a private structure contains the device information -// -// Output: -// status - FAILURE -// SUCCESS -// -//--------------------------------------------------------------------------- +/* the receiving function of the network driver */ static int ft1000_submit_rx_urb(struct ft1000_info *info) { int result; @@ -1196,20 +919,7 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info) return 0; } -//--------------------------------------------------------------------------- -// Function: ft1000_close -// -// Parameters: -// net - network device -// -// -// Returns: none -// -// Description: close the network driver -// -// Notes: -// -//--------------------------------------------------------------------------- +/* close the network driver */ int ft1000_close(struct net_device *net) { struct ft1000_info *pInfo = netdev_priv(net); @@ -1227,26 +937,14 @@ int ft1000_close(struct net_device *net) return 0; } -//--------------------------------------------------------------------------- -// -// Function: ft1000_chkcard -// Description: This function will check if the device is presently available on -// the system. -// Input: -// dev - device structure -// Output: -// status - FALSE (device is not present) -// TRUE (device is present) -// -//--------------------------------------------------------------------------- +/* check if the device is presently available on the system. */ static int ft1000_chkcard(struct ft1000_usb *dev) { u16 tempword; - u16 status; + int status; if (dev->fCondResetPend) { - DEBUG - ("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n"); + DEBUG("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n"); return TRUE; } /* Mask register is used to check for device presence since it is never @@ -1254,8 +952,7 @@ static int ft1000_chkcard(struct ft1000_usb *dev) */ status = ft1000_read_register(dev, &tempword, FT1000_REG_SUP_IMASK); if (tempword == 0) { - DEBUG - ("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n"); + DEBUG("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n"); return FALSE; } /* The system will return the value of 0xffff for the version register @@ -1264,30 +961,22 @@ static int ft1000_chkcard(struct ft1000_usb *dev) status = ft1000_read_register(dev, &tempword, FT1000_REG_ASIC_ID); if (tempword != 0x1b01) { dev->status |= FT1000_STATUS_CLOSING; - DEBUG - ("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n"); + DEBUG("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n"); return FALSE; } return TRUE; } -//--------------------------------------------------------------------------- -// -// Function: ft1000_receive_cmd -// Description: This function will read a message from the dpram area. -// Input: -// dev - network device structure -// pbuffer - caller supply address to buffer -// pnxtph - pointer to next pseudo header -// Output: -// Status = 0 (unsuccessful) -// = 1 (successful) -// -//--------------------------------------------------------------------------- +/* read a message from the dpram area. +* Input: +* dev - network device structure +* pbuffer - caller supply address to buffer +*/ static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer, - int maxsz, u16 *pnxtph) + int maxsz) { - u16 size, ret; + u16 size; + int ret; u16 *ppseudohdr; int i; u16 tempword; @@ -1359,7 +1048,7 @@ static int ft1000_dsp_prov(void *arg) struct prov_record *ptr; struct pseudo_hdr *ppseudo_hdr; u16 *pmsg; - u16 status; + int status; u16 TempShortBuf[256]; DEBUG("*** DspProv Entered\n"); @@ -1381,7 +1070,7 @@ static int ft1000_dsp_prov(void *arg) i++; if (i == 10) { DEBUG("FT1000:ft1000_dsp_prov:message drop\n"); - return STATUS_FAILURE; + return -1; } ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); @@ -1405,9 +1094,8 @@ static int ft1000_dsp_prov(void *arg) ppseudo_hdr->portsrc = 0; /* Calculate new checksum */ ppseudo_hdr->checksum = *pmsg++; - for (i = 1; i < 7; i++) { + for (i = 1; i < 7; i++) ppseudo_hdr->checksum ^= *pmsg++; - } TempShortBuf[0] = 0; TempShortBuf[1] = htons(len); @@ -1425,7 +1113,7 @@ static int ft1000_dsp_prov(void *arg) kfree(ptr->pprov_data); kfree(ptr); } - msleep(10); + usleep_range(9000, 11000); } DEBUG("DSP Provisioning List Entry finished\n"); @@ -1435,7 +1123,7 @@ static int ft1000_dsp_prov(void *arg) dev->fProvComplete = true; info->CardReady = 1; - return STATUS_SUCCESS; + return 0; } static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size) @@ -1449,7 +1137,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size) u16 i; struct pseudo_hdr *ppseudo_hdr; u16 *pmsg; - u16 status; + int status; union { u8 byte[2]; u16 wrd; @@ -1457,7 +1145,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size) char *cmdbuffer = kmalloc(1600, GFP_KERNEL); if (!cmdbuffer) - return STATUS_FAILURE; + return -1; status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size); @@ -1481,154 +1169,179 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size) DEBUG("ft1000_proc_drvmsg:Command message type = 0x%x\n", msgtype); switch (msgtype) { case MEDIA_STATE:{ - DEBUG - ("ft1000_proc_drvmsg:Command message type = MEDIA_STATE"); - - pmediamsg = (struct media_msg *)&cmdbuffer[0]; - if (info->ProgConStat != 0xFF) { - if (pmediamsg->state) { - DEBUG("Media is up\n"); - if (info->mediastate == 0) { - if (dev->NetDevRegDone) { - netif_wake_queue(dev-> - net); - } - info->mediastate = 1; - } - } else { - DEBUG("Media is down\n"); - if (info->mediastate == 1) { - info->mediastate = 0; - if (dev->NetDevRegDone) { - } - info->ConTm = 0; - } + DEBUG("ft1000_proc_drvmsg:Command message type = MEDIA_STATE"); + pmediamsg = (struct media_msg *)&cmdbuffer[0]; + if (info->ProgConStat != 0xFF) { + if (pmediamsg->state) { + DEBUG("Media is up\n"); + if (info->mediastate == 0) { + if (dev->NetDevRegDone) + netif_wake_queue(dev->net); + info->mediastate = 1; } } else { DEBUG("Media is down\n"); if (info->mediastate == 1) { info->mediastate = 0; - info->ConTm = 0; + if (dev->NetDevRegDone) + info->ConTm = 0; } } - break; + } else { + DEBUG("Media is down\n"); + if (info->mediastate == 1) { + info->mediastate = 0; + info->ConTm = 0; + } } + break; + } case DSP_INIT_MSG:{ - DEBUG - ("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG"); - - pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2]; - memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ); - DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n", - info->DspVer[0], info->DspVer[1], info->DspVer[2], - info->DspVer[3]); - memcpy(info->HwSerNum, pdspinitmsg->HwSerNum, - HWSERNUMSZ); - memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ); - memcpy(info->eui64, pdspinitmsg->eui64, EUISZ); - DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n", - info->eui64[0], info->eui64[1], info->eui64[2], - info->eui64[3], info->eui64[4], info->eui64[5], - info->eui64[6], info->eui64[7]); - dev->net->dev_addr[0] = info->eui64[0]; - dev->net->dev_addr[1] = info->eui64[1]; - dev->net->dev_addr[2] = info->eui64[2]; - dev->net->dev_addr[3] = info->eui64[5]; - dev->net->dev_addr[4] = info->eui64[6]; - dev->net->dev_addr[5] = info->eui64[7]; - - if (ntohs(pdspinitmsg->length) == - (sizeof(struct dsp_init_msg) - 20)) { - memcpy(info->ProductMode, - pdspinitmsg->ProductMode, MODESZ); - memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, - CALVERSZ); - memcpy(info->RfCalDate, pdspinitmsg->RfCalDate, - CALDATESZ); - DEBUG("RFCalVer = 0x%2x 0x%2x\n", - info->RfCalVer[0], info->RfCalVer[1]); - } - break; + DEBUG("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG"); + pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2]; + memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ); + DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n", + info->DspVer[0], info->DspVer[1], info->DspVer[2], + info->DspVer[3]); + memcpy(info->HwSerNum, pdspinitmsg->HwSerNum, + HWSERNUMSZ); + memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ); + memcpy(info->eui64, pdspinitmsg->eui64, EUISZ); + DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n", + info->eui64[0], info->eui64[1], info->eui64[2], + info->eui64[3], info->eui64[4], info->eui64[5], + info->eui64[6], info->eui64[7]); + dev->net->dev_addr[0] = info->eui64[0]; + dev->net->dev_addr[1] = info->eui64[1]; + dev->net->dev_addr[2] = info->eui64[2]; + dev->net->dev_addr[3] = info->eui64[5]; + dev->net->dev_addr[4] = info->eui64[6]; + dev->net->dev_addr[5] = info->eui64[7]; + + if (ntohs(pdspinitmsg->length) == + (sizeof(struct dsp_init_msg) - 20)) { + memcpy(info->ProductMode, pdspinitmsg->ProductMode, + MODESZ); + memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, CALVERSZ); + memcpy(info->RfCalDate, pdspinitmsg->RfCalDate, + CALDATESZ); + DEBUG("RFCalVer = 0x%2x 0x%2x\n", info->RfCalVer[0], + info->RfCalVer[1]); } + break; + } case DSP_PROVISION:{ - DEBUG - ("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n"); + DEBUG("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n"); - /* kick off dspprov routine to start provisioning - * Send provisioning data to DSP - */ - if (list_empty(&info->prov_list) == 0) { - dev->fProvComplete = false; - status = ft1000_dsp_prov(dev); - if (status != STATUS_SUCCESS) - goto out; - } else { - dev->fProvComplete = true; - status = - ft1000_write_register(dev, FT1000_DB_HB, - FT1000_REG_DOORBELL); - DEBUG - ("FT1000:drivermsg:No more DSP provisioning data in dsp image\n"); - } - DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n"); - break; + /* kick off dspprov routine to start provisioning + * Send provisioning data to DSP + */ + if (list_empty(&info->prov_list) == 0) { + dev->fProvComplete = false; + status = ft1000_dsp_prov(dev); + if (status != 0) + goto out; + } else { + dev->fProvComplete = true; + status = ft1000_write_register(dev, FT1000_DB_HB, + FT1000_REG_DOORBELL); + DEBUG("FT1000:drivermsg:No more DSP provisioning data in dsp image\n"); } + DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n"); + break; + } case DSP_STORE_INFO:{ - DEBUG - ("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO"); - - DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n"); - tempword = ntohs(pdrvmsg->length); - info->DSPInfoBlklen = tempword; - if (tempword < (MAX_DSP_SESS_REC - 4)) { - pmsg = (u16 *) &pdrvmsg->data[0]; - for (i = 0; i < ((tempword + 1) / 2); i++) { - DEBUG - ("FT1000:drivermsg:dsp info data = 0x%x\n", - *pmsg); - info->DSPInfoBlk[i + 10] = *pmsg++; - } - } else { - info->DSPInfoBlklen = 0; + DEBUG("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO"); + DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n"); + tempword = ntohs(pdrvmsg->length); + info->DSPInfoBlklen = tempword; + if (tempword < (MAX_DSP_SESS_REC - 4)) { + pmsg = (u16 *) &pdrvmsg->data[0]; + for (i = 0; i < ((tempword + 1) / 2); i++) { + DEBUG("FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg); + info->DSPInfoBlk[i + 10] = *pmsg++; } - break; + } else { + info->DSPInfoBlklen = 0; } + break; + } case DSP_GET_INFO:{ - DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n"); - /* copy dsp info block to dsp */ - dev->DrvMsgPend = 1; - /* allow any outstanding ioctl to finish */ + DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n"); + /* copy dsp info block to dsp */ + dev->DrvMsgPend = 1; + /* allow any outstanding ioctl to finish */ + mdelay(10); + status = ft1000_read_register(dev, &tempword, + FT1000_REG_DOORBELL); + if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); - status = - ft1000_read_register(dev, &tempword, - FT1000_REG_DOORBELL); + status = ft1000_read_register(dev, &tempword, + FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); - status = - ft1000_read_register(dev, &tempword, - FT1000_REG_DOORBELL); - if (tempword & FT1000_DB_DPRAM_TX) { - mdelay(10); - status = - ft1000_read_register(dev, &tempword, - FT1000_REG_DOORBELL); - if (tempword & FT1000_DB_DPRAM_TX) - break; - } + status = ft1000_read_register(dev, &tempword, + FT1000_REG_DOORBELL); + if (tempword & FT1000_DB_DPRAM_TX) + break; } - /* Put message into Slow Queue - * Form Pseudo header - */ - pmsg = (u16 *) info->DSPInfoBlk; - *pmsg++ = 0; - *pmsg++ = - htons(info->DSPInfoBlklen + 20 + - info->DSPInfoBlklen); - ppseudo_hdr = - (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2]; - ppseudo_hdr->length = - htons(info->DSPInfoBlklen + 4 + - info->DSPInfoBlklen); + } + /* Put message into Slow Queue Form Pseudo header */ + pmsg = (u16 *) info->DSPInfoBlk; + *pmsg++ = 0; + *pmsg++ = htons(info->DSPInfoBlklen + 20 + info->DSPInfoBlklen); + ppseudo_hdr = + (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2]; + ppseudo_hdr->length = htons(info->DSPInfoBlklen + 4 + + info->DSPInfoBlklen); + ppseudo_hdr->source = 0x10; + ppseudo_hdr->destination = 0x20; + ppseudo_hdr->portdest = 0; + ppseudo_hdr->portsrc = 0; + ppseudo_hdr->sh_str_id = 0; + ppseudo_hdr->control = 0; + ppseudo_hdr->rsvd1 = 0; + ppseudo_hdr->rsvd2 = 0; + ppseudo_hdr->qos_class = 0; + /* Insert slow queue sequence number */ + ppseudo_hdr->seq_num = info->squeseqnum++; + /* Insert application id */ + ppseudo_hdr->portsrc = 0; + /* Calculate new checksum */ + ppseudo_hdr->checksum = *pmsg++; + for (i = 1; i < 7; i++) + ppseudo_hdr->checksum ^= *pmsg++; + + info->DSPInfoBlk[10] = 0x7200; + info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen); + status = ft1000_write_dpram32(dev, 0, + (u8 *)&info->DSPInfoBlk[0], + (unsigned short)(info->DSPInfoBlklen + 22)); + status = ft1000_write_register(dev, FT1000_DB_DPRAM_TX, + FT1000_REG_DOORBELL); + dev->DrvMsgPend = 0; + break; + } + case GET_DRV_ERR_RPT_MSG:{ + DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n"); + /* copy driver error message to dsp */ + dev->DrvMsgPend = 1; + /* allow any outstanding ioctl to finish */ + mdelay(10); + status = ft1000_read_register(dev, &tempword, + FT1000_REG_DOORBELL); + if (tempword & FT1000_DB_DPRAM_TX) { + mdelay(10); + status = ft1000_read_register(dev, &tempword, + FT1000_REG_DOORBELL); + if (tempword & FT1000_DB_DPRAM_TX) + mdelay(10); + } + if ((tempword & FT1000_DB_DPRAM_TX) == 0) { + /* Put message into Slow Queue Form Pseudo header */ + pmsg = (u16 *) &tempbuffer[0]; + ppseudo_hdr = (struct pseudo_hdr *)pmsg; + ppseudo_hdr->length = htons(0x0012); ppseudo_hdr->source = 0x10; ppseudo_hdr->destination = 0x20; ppseudo_hdr->portdest = 0; @@ -1647,293 +1360,245 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size) for (i = 1; i < 7; i++) ppseudo_hdr->checksum ^= *pmsg++; - info->DSPInfoBlk[10] = 0x7200; - info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen); - status = - ft1000_write_dpram32(dev, 0, - (u8 *) &info->DSPInfoBlk[0], - (unsigned short)(info-> - DSPInfoBlklen - + 22)); - status = - ft1000_write_register(dev, FT1000_DB_DPRAM_TX, - FT1000_REG_DOORBELL); - dev->DrvMsgPend = 0; - - break; + pmsg = (u16 *) &tempbuffer[16]; + *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG); + *pmsg++ = htons(0x000e); + *pmsg++ = htons(info->DSP_TIME[0]); + *pmsg++ = htons(info->DSP_TIME[1]); + *pmsg++ = htons(info->DSP_TIME[2]); + *pmsg++ = htons(info->DSP_TIME[3]); + convert.byte[0] = info->DspVer[0]; + convert.byte[1] = info->DspVer[1]; + *pmsg++ = convert.wrd; + convert.byte[0] = info->DspVer[2]; + convert.byte[1] = info->DspVer[3]; + *pmsg++ = convert.wrd; + *pmsg++ = htons(info->DrvErrNum); + + card_send_command(dev, (unsigned char *)&tempbuffer[0], + (u16)(0x0012 + PSEUDOSZ)); + info->DrvErrNum = 0; } - - case GET_DRV_ERR_RPT_MSG:{ - DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n"); - /* copy driver error message to dsp */ - dev->DrvMsgPend = 1; - /* allow any outstanding ioctl to finish */ - mdelay(10); - status = - ft1000_read_register(dev, &tempword, - FT1000_REG_DOORBELL); - if (tempword & FT1000_DB_DPRAM_TX) { - mdelay(10); - status = - ft1000_read_register(dev, &tempword, - FT1000_REG_DOORBELL); - if (tempword & FT1000_DB_DPRAM_TX) - mdelay(10); - } - - if ((tempword & FT1000_DB_DPRAM_TX) == 0) { - /* Put message into Slow Queue - * Form Pseudo header - */ - pmsg = (u16 *) &tempbuffer[0]; - ppseudo_hdr = (struct pseudo_hdr *)pmsg; - ppseudo_hdr->length = htons(0x0012); - ppseudo_hdr->source = 0x10; - ppseudo_hdr->destination = 0x20; - ppseudo_hdr->portdest = 0; - ppseudo_hdr->portsrc = 0; - ppseudo_hdr->sh_str_id = 0; - ppseudo_hdr->control = 0; - ppseudo_hdr->rsvd1 = 0; - ppseudo_hdr->rsvd2 = 0; - ppseudo_hdr->qos_class = 0; - /* Insert slow queue sequence number */ - ppseudo_hdr->seq_num = info->squeseqnum++; - /* Insert application id */ - ppseudo_hdr->portsrc = 0; - /* Calculate new checksum */ - ppseudo_hdr->checksum = *pmsg++; - for (i = 1; i < 7; i++) - ppseudo_hdr->checksum ^= *pmsg++; - - pmsg = (u16 *) &tempbuffer[16]; - *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG); - *pmsg++ = htons(0x000e); - *pmsg++ = htons(info->DSP_TIME[0]); - *pmsg++ = htons(info->DSP_TIME[1]); - *pmsg++ = htons(info->DSP_TIME[2]); - *pmsg++ = htons(info->DSP_TIME[3]); - convert.byte[0] = info->DspVer[0]; - convert.byte[1] = info->DspVer[1]; - *pmsg++ = convert.wrd; - convert.byte[0] = info->DspVer[2]; - convert.byte[1] = info->DspVer[3]; - *pmsg++ = convert.wrd; - *pmsg++ = htons(info->DrvErrNum); - - card_send_command(dev, - (unsigned char *)&tempbuffer[0], - (u16) (0x0012 + PSEUDOSZ)); - info->DrvErrNum = 0; - } - dev->DrvMsgPend = 0; - - break; - } - + dev->DrvMsgPend = 0; + break; + } default: break; } - status = STATUS_SUCCESS; + status = 0; out: kfree(cmdbuffer); DEBUG("return from ft1000_proc_drvmsg\n"); return status; } -int ft1000_poll(void* dev_id) +/* Check which application has registered for dsp broadcast messages */ +static int dsp_broadcast_msg_id(struct ft1000_usb *dev) { - struct ft1000_usb *dev = (struct ft1000_usb *)dev_id; - struct ft1000_info *info = netdev_priv(dev->net); + struct dpram_blk *pdpram_blk; + unsigned long flags; + int i; - u16 tempword; - u16 status; - u16 size; - int i; - u16 data; - u16 modulo; - u16 portid; - u16 nxtph; + for (i = 0; i < MAX_NUM_APP; i++) { + if ((dev->app_info[i].DspBCMsgFlag) + && (dev->app_info[i].fileobject) + && (dev->app_info[i].NumOfMsg + < MAX_MSG_LIMIT)) { + pdpram_blk = ft1000_get_buffer(&freercvpool); + if (pdpram_blk == NULL) { + DEBUG("Out of memory in free receive command pool\n"); + dev->app_info[i].nRxMsgMiss++; + return -1; + } + if (ft1000_receive_cmd(dev, pdpram_blk->pbuffer, + MAX_CMD_SQSIZE)) { + /* Put message into the + * appropriate application block + */ + dev->app_info[i].nRxMsg++; + spin_lock_irqsave(&free_buff_lock, flags); + list_add_tail(&pdpram_blk->list, + &dev->app_info[i] .app_sqlist); + dev->app_info[i].NumOfMsg++; + spin_unlock_irqrestore(&free_buff_lock, flags); + wake_up_interruptible(&dev->app_info[i] + .wait_dpram_msg); + } else { + dev->app_info[i].nRxMsgMiss++; + ft1000_free_buffer(pdpram_blk, &freercvpool); + DEBUG("pdpram_blk::ft1000_get_buffer NULL\n"); + return -1; + } + } + } + return 0; +} + +static int handle_misc_portid(struct ft1000_usb *dev) +{ struct dpram_blk *pdpram_blk; - struct pseudo_hdr *ppseudo_hdr; - unsigned long flags; - - if (ft1000_chkcard(dev) == FALSE) { - DEBUG("ft1000_poll::ft1000_chkcard: failed\n"); - return STATUS_FAILURE; - } - - status = ft1000_read_register (dev, &tempword, FT1000_REG_DOORBELL); - - if ( !status ) - { - - if (tempword & FT1000_DB_DPRAM_RX) { - - status = ft1000_read_dpram16(dev, 0x200, (u8 *)&data, 0); - size = ntohs(data) + 16 + 2; - if (size % 4) { - modulo = 4 - (size % 4); - size = size + modulo; - } - status = ft1000_read_dpram16(dev, 0x201, (u8 *)&portid, 1); - portid &= 0xff; - - if (size < MAX_CMD_SQSIZE) { - switch (portid) - { - case DRIVERID: - DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n"); - - status = ft1000_proc_drvmsg (dev, size); - if (status != STATUS_SUCCESS ) - return status; - break; - case DSPBCMSGID: - // This is a dsp broadcast message - // Check which application has registered for dsp broadcast messages - - for (i=0; iapp_info[i].DspBCMsgFlag) && (dev->app_info[i].fileobject) && - (dev->app_info[i].NumOfMsg < MAX_MSG_LIMIT) ) - { - nxtph = FT1000_DPRAM_RX_BASE + 2; - pdpram_blk = ft1000_get_buffer (&freercvpool); - if (pdpram_blk != NULL) { - if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) { - ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer; - // Put message into the appropriate application block - dev->app_info[i].nRxMsg++; - spin_lock_irqsave(&free_buff_lock, flags); - list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist); - dev->app_info[i].NumOfMsg++; - spin_unlock_irqrestore(&free_buff_lock, flags); - wake_up_interruptible(&dev->app_info[i].wait_dpram_msg); - } - else { - dev->app_info[i].nRxMsgMiss++; - // Put memory back to free pool - ft1000_free_buffer(pdpram_blk, &freercvpool); - DEBUG("pdpram_blk::ft1000_get_buffer NULL\n"); - } - } - else { - DEBUG("Out of memory in free receive command pool\n"); - dev->app_info[i].nRxMsgMiss++; - } - } - } - break; - default: - pdpram_blk = ft1000_get_buffer (&freercvpool); - - if (pdpram_blk != NULL) { - if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) { - ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer; - // Search for correct application block - for (i=0; iapp_info[i].app_id == ppseudo_hdr->portdest) { - break; - } - } - - if (i == MAX_NUM_APP) { - DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ppseudo_hdr->portdest); - // Put memory back to free pool - ft1000_free_buffer(pdpram_blk, &freercvpool); - } - else { - if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) { - // Put memory back to free pool - ft1000_free_buffer(pdpram_blk, &freercvpool); - } - else { - dev->app_info[i].nRxMsg++; - // Put message into the appropriate application block - list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist); - dev->app_info[i].NumOfMsg++; - } - } - } - else { - // Put memory back to free pool - ft1000_free_buffer(pdpram_blk, &freercvpool); - } - } - else { - DEBUG("Out of memory in free receive command pool\n"); - } - break; - } - } - else { - DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size); - } - status = ft1000_write_register (dev, FT1000_DB_DPRAM_RX, FT1000_REG_DOORBELL); - } - else if (tempword & FT1000_DSP_ASIC_RESET) { - - // Let's reset the ASIC from the Host side as well - status = ft1000_write_register (dev, ASIC_RESET_BIT, FT1000_REG_RESET); - status = ft1000_read_register (dev, &tempword, FT1000_REG_RESET); - i = 0; - while (tempword & ASIC_RESET_BIT) { - status = ft1000_read_register (dev, &tempword, FT1000_REG_RESET); - msleep(10); - i++; - if (i==100) - break; - } - if (i==100) { - DEBUG("Unable to reset ASIC\n"); - return STATUS_SUCCESS; - } - msleep(10); - // Program WMARK register - status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK); - // clear ASIC reset doorbell - status = ft1000_write_register (dev, FT1000_DSP_ASIC_RESET, FT1000_REG_DOORBELL); - msleep(10); - } - else if (tempword & FT1000_ASIC_RESET_REQ) { - DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n"); - - // clear ASIC reset request from DSP - status = ft1000_write_register (dev, FT1000_ASIC_RESET_REQ, FT1000_REG_DOORBELL); - status = ft1000_write_register (dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL); - // copy dsp session record from Adapter block - status = ft1000_write_dpram32 (dev, 0, (u8 *)&info->DSPSess.Rec[0], 1024); - // Program WMARK register - status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK); - // ring doorbell to tell DSP that ASIC is out of reset - status = ft1000_write_register (dev, FT1000_ASIC_RESET_DSP, FT1000_REG_DOORBELL); - } - else if (tempword & FT1000_DB_COND_RESET) { - DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n"); - - if (!dev->fAppMsgPend) { - // Reset ASIC and DSP - - status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (u8 *)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX); - status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER1, (u8 *)&(info->DSP_TIME[1]), FT1000_MAG_DSP_TIMER1_INDX); - status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER2, (u8 *)&(info->DSP_TIME[2]), FT1000_MAG_DSP_TIMER2_INDX); - status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER3, (u8 *)&(info->DSP_TIME[3]), FT1000_MAG_DSP_TIMER3_INDX); - info->CardReady = 0; - info->DrvErrNum = DSP_CONDRESET_INFO; - DEBUG("ft1000_hw:DSP conditional reset requested\n"); - info->ft1000_reset(dev->net); - } - else { - dev->fProvComplete = false; - dev->fCondResetPend = true; - } - - ft1000_write_register(dev, FT1000_DB_COND_RESET, FT1000_REG_DOORBELL); - } - - } - - return STATUS_SUCCESS; + int i; + + pdpram_blk = ft1000_get_buffer(&freercvpool); + if (pdpram_blk == NULL) { + DEBUG("Out of memory in free receive command pool\n"); + return -1; + } + if (!ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE)) + goto exit_failure; + /* Search for correct application block */ + for (i = 0; i < MAX_NUM_APP; i++) { + if (dev->app_info[i].app_id == ((struct pseudo_hdr *) + pdpram_blk->pbuffer)->portdest) + break; + } + if (i == MAX_NUM_APP) { + DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ((struct pseudo_hdr *)pdpram_blk->pbuffer)->portdest); + goto exit_failure; + } else if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) { + goto exit_failure; + } else { + dev->app_info[i].nRxMsg++; + /* Put message into the appropriate application block */ + list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist); + dev->app_info[i].NumOfMsg++; + } + return 0; + +exit_failure: + ft1000_free_buffer(pdpram_blk, &freercvpool); + return -1; +} + +int ft1000_poll(void *dev_id) +{ + struct ft1000_usb *dev = (struct ft1000_usb *)dev_id; + struct ft1000_info *info = netdev_priv(dev->net); + u16 tempword; + int status; + u16 size; + int i; + u16 data; + u16 modulo; + u16 portid; + + if (ft1000_chkcard(dev) == FALSE) { + DEBUG("ft1000_poll::ft1000_chkcard: failed\n"); + return -1; + } + status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); + if (!status) { + if (tempword & FT1000_DB_DPRAM_RX) { + status = ft1000_read_dpram16(dev, + 0x200, (u8 *)&data, 0); + size = ntohs(data) + 16 + 2; + if (size % 4) { + modulo = 4 - (size % 4); + size = size + modulo; + } + status = ft1000_read_dpram16(dev, 0x201, + (u8 *)&portid, 1); + portid &= 0xff; + if (size < MAX_CMD_SQSIZE) { + switch (portid) { + case DRIVERID: + DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n"); + status = ft1000_proc_drvmsg(dev, size); + if (status != 0) + return status; + break; + case DSPBCMSGID: + status = dsp_broadcast_msg_id(dev); + break; + default: + status = handle_misc_portid(dev); + break; + } + } else + DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size); + status = ft1000_write_register(dev, + FT1000_DB_DPRAM_RX, + FT1000_REG_DOORBELL); + } else if (tempword & FT1000_DSP_ASIC_RESET) { + /* Let's reset the ASIC from the Host side as well */ + status = ft1000_write_register(dev, ASIC_RESET_BIT, + FT1000_REG_RESET); + status = ft1000_read_register(dev, &tempword, + FT1000_REG_RESET); + i = 0; + while (tempword & ASIC_RESET_BIT) { + status = ft1000_read_register(dev, &tempword, + FT1000_REG_RESET); + usleep_range(9000, 11000); + i++; + if (i == 100) + break; + } + if (i == 100) { + DEBUG("Unable to reset ASIC\n"); + return 0; + } + usleep_range(9000, 11000); + /* Program WMARK register */ + status = ft1000_write_register(dev, 0x600, + FT1000_REG_MAG_WATERMARK); + /* clear ASIC reset doorbell */ + status = ft1000_write_register(dev, + FT1000_DSP_ASIC_RESET, + FT1000_REG_DOORBELL); + usleep_range(9000, 11000); + } else if (tempword & FT1000_ASIC_RESET_REQ) { + DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n"); + /* clear ASIC reset request from DSP */ + status = ft1000_write_register(dev, + FT1000_ASIC_RESET_REQ, + FT1000_REG_DOORBELL); + status = ft1000_write_register(dev, HOST_INTF_BE, + FT1000_REG_SUP_CTRL); + /* copy dsp session record from Adapter block */ + status = ft1000_write_dpram32(dev, 0, + (u8 *)&info->DSPSess.Rec[0], 1024); + status = ft1000_write_register(dev, 0x600, + FT1000_REG_MAG_WATERMARK); + /* ring doorbell to tell DSP that + * ASIC is out of reset + * */ + status = ft1000_write_register(dev, + FT1000_ASIC_RESET_DSP, + FT1000_REG_DOORBELL); + } else if (tempword & FT1000_DB_COND_RESET) { + DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n"); + if (!dev->fAppMsgPend) { + /* Reset ASIC and DSP */ + status = ft1000_read_dpram16(dev, + FT1000_MAG_DSP_TIMER0, + (u8 *)&(info->DSP_TIME[0]), + FT1000_MAG_DSP_TIMER0_INDX); + status = ft1000_read_dpram16(dev, + FT1000_MAG_DSP_TIMER1, + (u8 *)&(info->DSP_TIME[1]), + FT1000_MAG_DSP_TIMER1_INDX); + status = ft1000_read_dpram16(dev, + FT1000_MAG_DSP_TIMER2, + (u8 *)&(info->DSP_TIME[2]), + FT1000_MAG_DSP_TIMER2_INDX); + status = ft1000_read_dpram16(dev, + FT1000_MAG_DSP_TIMER3, + (u8 *)&(info->DSP_TIME[3]), + FT1000_MAG_DSP_TIMER3_INDX); + info->CardReady = 0; + info->DrvErrNum = DSP_CONDRESET_INFO; + DEBUG("ft1000_hw:DSP conditional reset requested\n"); + info->ft1000_reset(dev->net); + } else { + dev->fProvComplete = false; + dev->fCondResetPend = true; + } + ft1000_write_register(dev, FT1000_DB_COND_RESET, + FT1000_REG_DOORBELL); + } + } + return 0; } diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c index 5ead942be680c3..2575d0d6bff3b1 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c @@ -33,13 +33,13 @@ #define seq_putx(m, message, size, var) \ seq_printf(m, message); \ - for(i = 0; i < (size - 1); i++) \ + for (i = 0; i < (size - 1); i++) \ seq_printf(m, "%02x:", var[i]); \ seq_printf(m, "%02x\n", var[i]) #define seq_putd(m, message, size, var) \ seq_printf(m, message); \ - for(i = 0; i < (size - 1); i++) \ + for (i = 0; i < (size - 1); i++) \ seq_printf(m, "%d.", var[i]); \ seq_printf(m, "%d\n", var[i]) @@ -47,14 +47,14 @@ #define FTNET_PROC init_net.proc_net -int ft1000_read_dpram16 (struct ft1000_usb *ft1000dev, u16 indx, +int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer, u8 highlow); static int ft1000ReadProc(struct seq_file *m, void *v) { - static const char *status[] = { - "Idle (Disconnect)", + static const char *status[] = { + "Idle (Disconnect)", "Searching", "Active (Connected)", "Waiting for L2", @@ -127,10 +127,10 @@ static int ft1000ReadProc(struct seq_file *m, void *v) } seq_printf(m, "Connection Time: %02ld:%02ld:%02ld\n", - ((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60)); + ((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60)); seq_printf(m, "Connection Time[s]: %ld\n", delta); seq_printf(m, "Asic ID: %s\n", - (info->AsicID) == ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC"); + (info->AsicID) == ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC"); seq_putx(m, "SKU: ", SKUSZ, info->Sku); seq_putx(m, "EUI64: ", EUISZ, info->eui64); seq_putd(m, "DSP version number: ", DSPVERSZ, info->DspVer); diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c index a8dd1e54878cb7..e40763e60dbde9 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c @@ -7,7 +7,6 @@ * $Id: *==================================================== */ -#include #include #include #include @@ -45,13 +44,13 @@ static int ft1000_poll_thread(void *arg) msleep(10); if (!gPollingfailed) { ret = ft1000_poll(arg); - if (ret != STATUS_SUCCESS) { + if (ret != 0) { DEBUG("ft1000_poll_thread: polling failed\n"); gPollingfailed = true; } } } - return STATUS_SUCCESS; + return 0; } static int ft1000_probe(struct usb_interface *interface, diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h index e8d00a930dc64b..a6fdd524ee6f10 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h @@ -11,8 +11,6 @@ #define PSEUDOSZ 16 -#define SUCCESS 0x00 - struct app_info_block { u32 nTxMsg; /* DPRAM msg sent to DSP with app_id */ u32 nRxMsg; /* DPRAM msg rcv from dsp with app_id */ @@ -31,9 +29,6 @@ struct app_info_block { #define FALSE 0 #define TRUE 1 -#define STATUS_SUCCESS 0 -#define STATUS_FAILURE 0x1001 - #define FT1000_STATUS_CLOSING 0x01 #define DSPBCMSGID 0x10 diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig index a0812d99136f5e..9c7c9267d52cb3 100644 --- a/drivers/staging/fwserial/Kconfig +++ b/drivers/staging/fwserial/Kconfig @@ -9,3 +9,23 @@ config FIREWIRE_SERIAL To compile this driver as a module, say M here: the module will be called firewire-serial. + +if FIREWIRE_SERIAL + +config FWTTY_MAX_TOTAL_PORTS + int "Maximum number of serial ports supported" + default "64" + help + Set this to the maximum number of serial ports you want the + firewire-serial driver to support. + +config FWTTY_MAX_CARD_PORTS + int "Maximum number of serial ports supported per adapter" + range 0 FWTTY_MAX_TOTAL_PORTS + default "32" + help + Set this to the maximum number of serial ports each firewire + adapter supports. The actual number of serial ports registered + is set with the module parameter "ttys". + +endif diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c index 62df009e5ac7c8..8af136e9c9dc16 100644 --- a/drivers/staging/fwserial/fwserial.c +++ b/drivers/staging/fwserial/fwserial.c @@ -136,14 +136,14 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card, #ifdef FWTTY_PROFILING -static void profile_fifo_avail(struct fwtty_port *port, unsigned *stat) +static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat) { spin_lock_bh(&port->lock); - profile_size_distrib(stat, dma_fifo_avail(&port->tx_fifo)); + fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo)); spin_unlock_bh(&port->lock); } -static void dump_profile(struct seq_file *m, struct stats *stats) +static void fwtty_dump_profile(struct seq_file *m, struct stats *stats) { /* for each stat, print sum of 0 to 2^k, then individually */ int k = 4; @@ -183,8 +183,8 @@ static void dump_profile(struct seq_file *m, struct stats *stats) } #else -#define profile_fifo_avail(port, stat) -#define dump_profile(m, stats) +#define fwtty_profile_fifo(port, stat) +#define fwtty_dump_profile(m, stats) #endif /* @@ -456,16 +456,27 @@ static int fwtty_write_port_status(struct fwtty_port *port) return err; } -static void __fwtty_throttle(struct fwtty_port *port, struct tty_struct *tty) +static void fwtty_throttle_port(struct fwtty_port *port) { + struct tty_struct *tty; unsigned old; + tty = tty_port_tty_get(&port->port); + if (!tty) + return; + + spin_lock_bh(&port->lock); + old = port->mctrl; port->mctrl |= OOB_RX_THROTTLE; if (C_CRTSCTS(tty)) port->mctrl &= ~TIOCM_RTS; if (~old & OOB_RX_THROTTLE) __fwtty_write_port_status(port); + + spin_unlock_bh(&port->lock); + + tty_kref_put(tty); } /** @@ -532,80 +543,14 @@ static void fwtty_emit_breaks(struct work_struct *work) port->icount.brk += brk; } -static void fwtty_pushrx(struct work_struct *work) -{ - struct fwtty_port *port = to_port(work, push); - struct tty_struct *tty; - struct buffered_rx *buf, *next; - int n, c = 0; - - spin_lock_bh(&port->lock); - list_for_each_entry_safe(buf, next, &port->buf_list, list) { - n = tty_insert_flip_string_fixed_flag(&port->port, buf->data, - TTY_NORMAL, buf->n); - c += n; - port->buffered -= n; - if (n < buf->n) { - if (n > 0) { - memmove(buf->data, buf->data + n, buf->n - n); - buf->n -= n; - } - tty = tty_port_tty_get(&port->port); - if (tty) { - __fwtty_throttle(port, tty); - tty_kref_put(tty); - } - break; - } else { - list_del(&buf->list); - kfree(buf); - } - } - if (c > 0) - tty_flip_buffer_push(&port->port); - - if (list_empty(&port->buf_list)) - clear_bit(BUFFERING_RX, &port->flags); - spin_unlock_bh(&port->lock); -} - -static int fwtty_buffer_rx(struct fwtty_port *port, unsigned char *d, size_t n) -{ - struct buffered_rx *buf; - size_t size = (n + sizeof(struct buffered_rx) + 0xFF) & ~0xFF; - - if (port->buffered + n > HIGH_WATERMARK) { - fwtty_err_ratelimited(port, "overflowed rx buffer: buffered: %d new: %zu wtrmk: %d\n", - port->buffered, n, HIGH_WATERMARK); - return 0; - } - buf = kmalloc(size, GFP_ATOMIC); - if (!buf) - return 0; - INIT_LIST_HEAD(&buf->list); - buf->n = n; - memcpy(buf->data, d, n); - - spin_lock_bh(&port->lock); - list_add_tail(&buf->list, &port->buf_list); - port->buffered += n; - if (port->buffered > port->stats.watermark) - port->stats.watermark = port->buffered; - set_bit(BUFFERING_RX, &port->flags); - spin_unlock_bh(&port->lock); - - return n; -} - static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len) { - struct tty_struct *tty; int c, n = len; unsigned lsr; int err = 0; fwtty_dbg(port, "%d\n", n); - profile_size_distrib(port->stats.reads, n); + fwtty_profile_data(port->stats.reads, n); if (port->write_only) { n = 0; @@ -636,31 +581,24 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len) goto out; } - if (!test_bit(BUFFERING_RX, &port->flags)) { - c = tty_insert_flip_string_fixed_flag(&port->port, data, - TTY_NORMAL, n); - if (c > 0) - tty_flip_buffer_push(&port->port); - n -= c; - - if (n) { - /* start buffering and throttling */ - n -= fwtty_buffer_rx(port, &data[c], n); - - tty = tty_port_tty_get(&port->port); - if (tty) { - spin_lock_bh(&port->lock); - __fwtty_throttle(port, tty); - spin_unlock_bh(&port->lock); - tty_kref_put(tty); - } - } - } else - n -= fwtty_buffer_rx(port, data, n); + c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n); + if (c > 0) + tty_flip_buffer_push(&port->port); + n -= c; if (n) { port->overrun = true; err = -EIO; + fwtty_err_ratelimited(port, "flip buffer overrun\n"); + + } else { + /* throttle the sender if remaining flip buffer space has + * reached high watermark to avoid losing data which may be + * in-flight. Since the AR request context is 32k, that much + * data may have _already_ been acked. + */ + if (tty_buffer_space_avail(&port->port) < HIGH_WATERMARK) + fwtty_throttle_port(port); } out: @@ -821,7 +759,7 @@ static int fwtty_tx(struct fwtty_port *port, bool drain) if (n == -EAGAIN) ++port->stats.tx_stall; else if (n == -ENODATA) - profile_size_distrib(port->stats.txns, 0); + fwtty_profile_data(port->stats.txns, 0); else { ++port->stats.fifo_errs; fwtty_err_ratelimited(port, "fifo err: %d\n", @@ -830,7 +768,7 @@ static int fwtty_tx(struct fwtty_port *port, bool drain) break; } - profile_size_distrib(port->stats.txns, txn->dma_pended.len); + fwtty_profile_data(port->stats.txns, txn->dma_pended.len); fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST, peer->fifo_addr, txn->dma_pended.data, @@ -1101,20 +1039,13 @@ static int fwtty_port_activate(struct tty_port *tty_port, static void fwtty_port_shutdown(struct tty_port *tty_port) { struct fwtty_port *port = to_port(tty_port, port); - struct buffered_rx *buf, *next; /* TODO: cancel outstanding transactions */ cancel_delayed_work_sync(&port->emit_breaks); cancel_delayed_work_sync(&port->drain); - cancel_work_sync(&port->push); spin_lock_bh(&port->lock); - list_for_each_entry_safe(buf, next, &port->buf_list, list) { - list_del(&buf->list); - kfree(buf); - } - port->buffered = 0; port->flags = 0; port->break_ctl = 0; port->overrun = 0; @@ -1184,7 +1115,7 @@ static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c) int n, len; fwtty_dbg(port, "%d\n", c); - profile_size_distrib(port->stats.writes, c); + fwtty_profile_data(port->stats.writes, c); spin_lock_bh(&port->lock); n = dma_fifo_in(&port->tx_fifo, buf, c); @@ -1262,9 +1193,7 @@ static void fwtty_unthrottle(struct tty_struct *tty) fwtty_dbg(port, "CRTSCTS: %d\n", (C_CRTSCTS(tty) != 0)); - profile_fifo_avail(port, port->stats.unthrottle); - - schedule_work(&port->push); + fwtty_profile_fifo(port, port->stats.unthrottle); spin_lock_bh(&port->lock); port->mctrl &= ~OOB_RX_THROTTLE; @@ -1523,15 +1452,14 @@ static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port) seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped, stats.tx_stall, stats.fifo_errs, stats.lost); - seq_printf(m, " pkts:%d thr:%d wtrmk:%d", stats.sent, stats.throttled, - stats.watermark); + seq_printf(m, " pkts:%d thr:%d", stats.sent, stats.throttled); if (port->port.console) { seq_puts(m, "\n "); (*port->fwcon_ops->proc_show)(m, port->con_data); } - dump_profile(m, &port->stats); + fwtty_dump_profile(m, &port->stats); } static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer) @@ -2297,13 +2225,12 @@ static int fwserial_create(struct fw_unit *unit) port->index = FWTTY_INVALID_INDEX; port->port.ops = &fwtty_port_ops; port->serial = serial; + tty_buffer_set_limit(&port->port, 128 * 1024); spin_lock_init(&port->lock); INIT_DELAYED_WORK(&port->drain, fwtty_drain_tx); INIT_DELAYED_WORK(&port->emit_breaks, fwtty_emit_breaks); INIT_WORK(&port->hangup, fwtty_do_hangup); - INIT_WORK(&port->push, fwtty_pushrx); - INIT_LIST_HEAD(&port->buf_list); init_waitqueue_head(&port->wait_tx); port->max_payload = link_speed_to_max_payload(SCODE_100); dma_fifo_init(&port->tx_fifo); diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h index 24635014a2acda..54f7f9b9b2123a 100644 --- a/drivers/staging/fwserial/fwserial.h +++ b/drivers/staging/fwserial/fwserial.h @@ -22,14 +22,14 @@ #ifdef FWTTY_PROFILING #define DISTRIBUTION_MAX_SIZE 8192 #define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1) -static inline void profile_size_distrib(unsigned stat[], unsigned val) +static inline void fwtty_profile_data(unsigned stat[], unsigned val) { int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0; ++stat[n]; } #else #define DISTRIBUTION_MAX_INDEX 0 -#define profile_size_distrib(st, n) +#define fwtty_profile_data(st, n) #endif /* Parameters for both VIRT_CABLE_PLUG & VIRT_CABLE_PLUG_RSP mgmt codes */ @@ -166,7 +166,6 @@ struct stats { unsigned sent; unsigned lost; unsigned throttled; - unsigned watermark; unsigned reads[DISTRIBUTION_MAX_INDEX + 1]; unsigned writes[DISTRIBUTION_MAX_INDEX + 1]; unsigned txns[DISTRIBUTION_MAX_INDEX + 1]; @@ -183,12 +182,6 @@ struct fwconsole_ops { #define FWCON_NOTIFY_ATTACH 1 #define FWCON_NOTIFY_DETACH 2 -struct buffered_rx { - struct list_head list; - size_t n; - unsigned char data[0]; -}; - /** * fwtty_port: structure used to track/represent underlying tty_port * @port: underlying tty_port @@ -223,11 +216,6 @@ struct buffered_rx { * The work can race with the writer but concurrent sending is * prevented with the IN_TX flag. Scheduled under lock to * limit scheduling when fifo has just been drained. - * @push: work responsible for pushing buffered rx to the ldisc. - * rx can become buffered if the tty buffer is filled before the - * ldisc throttles the sender. - * @buf_list: list of buffered rx yet to be sent to ldisc - * @buffered: byte count of buffered rx * @tx_fifo: fifo used to store & block-up writes for dma to remote * @max_payload: max bytes transmissable per dma (based on peer's max_payload) * @status_mask: UART_LSR_* bitmask significant to rx (based on termios) @@ -267,9 +255,6 @@ struct fwtty_port { spinlock_t lock; unsigned mctrl; struct delayed_work drain; - struct work_struct push; - struct list_head buf_list; - int buffered; struct dma_fifo tx_fifo; int max_payload; unsigned status_mask; @@ -291,7 +276,6 @@ struct fwtty_port { /* bit #s for flags field */ #define IN_TX 0 #define STOP_TX 1 -#define BUFFERING_RX 2 /* bitmasks for special mctrl/mstatus bits */ #define OOB_RX_THROTTLE 0x00010000 @@ -307,8 +291,8 @@ struct fwtty_port { #define FREQ_BREAKS (HZ / 50) /* Ports are allocated in blocks of num_ports for each fw_card */ -#define MAX_CARD_PORTS 32 /* max # of ports per card */ -#define MAX_TOTAL_PORTS 64 /* max # of ports total */ +#define MAX_CARD_PORTS CONFIG_FWTTY_MAX_CARD_PORTS +#define MAX_TOTAL_PORTS CONFIG_FWTTY_MAX_TOTAL_PORTS /* tuning parameters */ #define FWTTY_PORT_TXFIFO_LEN 4096 diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index c57a6ba5d010e8..74a03608b2ddc9 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -44,18 +44,6 @@ */ #define DEFAULT_MTU_SIZE 1500 -#define gdm_dev_endian(n) (\ - n->phy_dev->get_endian(n->phy_dev->priv_dev)) - -#define gdm_lte_hci_send(n, d, l) (\ - n->phy_dev->send_hci_func(n->phy_dev->priv_dev, d, l, NULL, NULL)) - -#define gdm_lte_sdu_send(n, d, l, c, b, i, t) (\ - n->phy_dev->send_sdu_func(n->phy_dev->priv_dev, d, l, n->pdn_table.dft_eps_id, 0, c, b, i, t)) - -#define gdm_lte_rcv_with_cb(n, c, b, e) (\ - n->rcv_func(n->priv_dev, c, b, e)) - #define IP_VERSION_4 4 #define IP_VERSION_6 6 @@ -458,13 +446,11 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev) sscanf(dev->name, "lte%d", &idx); - ret = gdm_lte_sdu_send(nic, - data_buf, - data_len, - tx_complete, - nic, - idx, - nic_type); + ret = nic->phy_dev->send_sdu_func(nic->phy_dev->priv_dev, + data_buf, data_len, + nic->pdn_table.dft_eps_id, 0, + tx_complete, nic, idx, + nic_type); if (ret == TX_NO_BUFFER || ret == TX_NO_SPC) { netif_stop_queue(dev); @@ -503,14 +489,18 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len) sscanf(dev->name, "lte%d", &idx); return netlink_send(lte_event.sock, idx, 0, buf, - gdm_dev16_to_cpu(gdm_dev_endian(nic), hci->len) + HCI_HEADER_SIZE); + gdm_dev16_to_cpu( + nic->phy_dev->get_endian( + nic->phy_dev->priv_dev), hci->len) + + HCI_HEADER_SIZE); } static void gdm_lte_event_rcv(struct net_device *dev, u16 type, void *msg, int len) { struct nic *nic = netdev_priv(dev); - gdm_lte_hci_send(nic, msg, len); + nic->phy_dev->send_hci_func(nic->phy_dev->priv_dev, msg, len, NULL, + NULL); } int gdm_lte_event_init(void) @@ -688,8 +678,14 @@ static void gdm_lte_pdn_table(struct net_device *dev, char *buf, int len) if (pdn_table->activate) { nic->pdn_table.activate = pdn_table->activate; - nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->dft_eps_id); - nic->pdn_table.nic_type = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->nic_type); + nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu( + nic->phy_dev->get_endian( + nic->phy_dev->priv_dev), + pdn_table->dft_eps_id); + nic->pdn_table.nic_type = gdm_dev32_to_cpu( + nic->phy_dev->get_endian( + nic->phy_dev->priv_dev), + pdn_table->nic_type); netdev_info(dev, "pdn activated, nic_type=0x%x\n", nic->pdn_table.nic_type); @@ -762,7 +758,7 @@ void start_rx_proc(struct phy_dev *phy_dev) int i; for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++) - gdm_lte_rcv_with_cb(phy_dev, rx_complete, phy_dev, USB_COMPLETE); + phy_dev->rcv_func(phy_dev->priv_dev, rx_complete, phy_dev, USB_COMPLETE); } static struct net_device_ops gdm_netdev_ops = { diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c index 62163673976c30..2fa3a5a6580fa5 100644 --- a/drivers/staging/gdm724x/gdm_mux.c +++ b/drivers/staging/gdm724x/gdm_mux.c @@ -158,7 +158,6 @@ static int up_to_host(struct mux_rx *r) unsigned int start_flag; unsigned int payload_size; unsigned short packet_type; - int remain; int dummy_cnt; u32 packet_size_sum = r->offset; int index; @@ -176,8 +175,7 @@ static int up_to_host(struct mux_rx *r) break; } - remain = (MUX_HEADER_SIZE + payload_size) % 4; - dummy_cnt = remain ? (4-remain) : 0; + dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4); if (len - packet_size_sum < MUX_HEADER_SIZE + payload_size + dummy_cnt) { @@ -361,7 +359,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index, struct mux_pkt_header *mux_header; struct mux_tx *t = NULL; static u32 seq_num = 1; - int remain; int dummy_cnt; int total_len; int ret; @@ -375,8 +372,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index, spin_lock_irqsave(&mux_dev->write_lock, flags); - remain = (MUX_HEADER_SIZE + len) % 4; - dummy_cnt = remain ? (4 - remain) : 0; + dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4); total_len = len + MUX_HEADER_SIZE + dummy_cnt; diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c index c0f7cd75116b1f..fe47cd3eb2ed64 100644 --- a/drivers/staging/gdm724x/gdm_tty.c +++ b/drivers/staging/gdm724x/gdm_tty.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c index 781134af69d13f..33458a583142db 100644 --- a/drivers/staging/gdm724x/gdm_usb.c +++ b/drivers/staging/gdm724x/gdm_usb.c @@ -830,24 +830,19 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id if (bInterfaceNumber > NETWORK_INTERFACE) { pr_info("not a network device\n"); - return -1; + return -ENODEV; } - phy_dev = kmalloc(sizeof(struct phy_dev), GFP_ATOMIC); - if (!phy_dev) { - ret = -ENOMEM; - goto out; - } + phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL); + if (!phy_dev) + return -ENOMEM; - udev = kmalloc(sizeof(struct lte_udev), GFP_ATOMIC); + udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL); if (!udev) { ret = -ENOMEM; - goto out; + goto err_udev; } - memset(phy_dev, 0, sizeof(struct phy_dev)); - memset(udev, 0, sizeof(struct lte_udev)); - phy_dev->priv_dev = (void *)udev; phy_dev->send_hci_func = gdm_usb_hci_send; phy_dev->send_sdu_func = gdm_usb_sdu_send; @@ -858,7 +853,7 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id ret = init_usb(udev); if (ret < 0) { pr_err("init_usb func failed\n"); - goto out; + goto err_init_usb; } udev->intf = intf; @@ -875,23 +870,22 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id ret = request_mac_address(udev); if (ret < 0) { pr_err("request Mac address failed\n"); - goto out; + goto err_mac_address; } start_rx_proc(phy_dev); -out: - - if (ret < 0) { - kfree(phy_dev); - if (udev) { - release_usb(udev); - kfree(udev); - } - } - usb_get_dev(usbdev); usb_set_intfdata(intf, phy_dev); + return 0; + +err_mac_address: + release_usb(udev); +err_init_usb: + kfree(udev); +err_udev: + kfree(phy_dev); + return ret; } diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c index cc3692439a5c95..50d43ada093637 100644 --- a/drivers/staging/gdm72xx/gdm_qos.c +++ b/drivers/staging/gdm72xx/gdm_qos.c @@ -97,7 +97,7 @@ void gdm_qos_init(void *nic_ptr) struct qos_cb_s *qcb = &nic->qos; int i; - for (i = 0 ; i < QOS_MAX; i++) { + for (i = 0; i < QOS_MAX; i++) { INIT_LIST_HEAD(&qcb->qos_list[i]); qcb->csr[i].qos_buf_count = 0; qcb->csr[i].enabled = 0; diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c index e0cb2ffb41beac..f8788bf0a7d39e 100644 --- a/drivers/staging/gdm72xx/gdm_usb.c +++ b/drivers/staging/gdm72xx/gdm_usb.c @@ -780,9 +780,10 @@ static int k_mode_thread(void *arg) spin_lock_irqsave(&k_lock, flags2); } + wait_event_interruptible_lock_irq(k_wait, + !list_empty(&k_list) || k_mode_stop, + k_lock); spin_unlock_irqrestore(&k_lock, flags2); - - interruptible_sleep_on(&k_wait); } return 0; } diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c index 4302fcbdfdc3ce..cbe5dcfc2ac9f3 100644 --- a/drivers/staging/gdm72xx/sdio_boot.c +++ b/drivers/staging/gdm72xx/sdio_boot.c @@ -13,7 +13,6 @@ #include #include -#include #include #include #include diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c index 81e2ad4038feea..eca0873098cd5f 100644 --- a/drivers/staging/goldfish/goldfish_nand.c +++ b/drivers/staging/goldfish/goldfish_nand.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c index 4c9364b63c77d6..6f38ca95f9bb39 100644 --- a/drivers/staging/iio/accel/adis16220_core.c +++ b/drivers/staging/iio/accel/adis16220_core.c @@ -439,13 +439,13 @@ static int adis16220_probe(struct spi_device *spi) indio_dev->channels = adis16220_channels; indio_dev->num_channels = ARRAY_SIZE(adis16220_channels); - ret = iio_device_register(indio_dev); + ret = devm_iio_device_register(&spi->dev, indio_dev); if (ret) return ret; ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin); if (ret) - goto error_unregister_dev; + return ret; ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin); if (ret) @@ -470,8 +470,6 @@ static int adis16220_probe(struct spi_device *spi) sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin); error_rm_accel_bin: sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin); -error_unregister_dev: - iio_device_unregister(indio_dev); return ret; } @@ -482,7 +480,6 @@ static int adis16220_remove(struct spi_device *spi) sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin); sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin); sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin); - iio_device_unregister(indio_dev); return 0; } diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c index 735c0a34fa93a1..898653c092794c 100644 --- a/drivers/staging/iio/accel/lis3l02dq_core.c +++ b/drivers/staging/iio/accel/lis3l02dq_core.c @@ -676,10 +676,10 @@ static const struct attribute_group lis3l02dq_attribute_group = { static const struct iio_info lis3l02dq_info = { .read_raw = &lis3l02dq_read_raw, .write_raw = &lis3l02dq_write_raw, - .read_event_value_new = &lis3l02dq_read_thresh, - .write_event_value_new = &lis3l02dq_write_thresh, - .write_event_config_new = &lis3l02dq_write_event_config, - .read_event_config_new = &lis3l02dq_read_event_config, + .read_event_value = &lis3l02dq_read_thresh, + .write_event_value = &lis3l02dq_write_thresh, + .write_event_config = &lis3l02dq_write_event_config, + .read_event_config = &lis3l02dq_read_event_config, .driver_module = THIS_MODULE, .attrs = &lis3l02dq_attribute_group, }; diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c index c49e6ef9d05f86..7f6ccdfaf16830 100644 --- a/drivers/staging/iio/accel/sca3000_core.c +++ b/drivers/staging/iio/accel/sca3000_core.c @@ -1126,20 +1126,20 @@ static const struct iio_info sca3000_info = { .attrs = &sca3000_attribute_group, .read_raw = &sca3000_read_raw, .event_attrs = &sca3000_event_attribute_group, - .read_event_value_new = &sca3000_read_thresh, - .write_event_value_new = &sca3000_write_thresh, - .read_event_config_new = &sca3000_read_event_config, - .write_event_config_new = &sca3000_write_event_config, + .read_event_value = &sca3000_read_thresh, + .write_event_value = &sca3000_write_thresh, + .read_event_config = &sca3000_read_event_config, + .write_event_config = &sca3000_write_event_config, .driver_module = THIS_MODULE, }; static const struct iio_info sca3000_info_with_temp = { .attrs = &sca3000_attribute_group_with_temp, .read_raw = &sca3000_read_raw, - .read_event_value_new = &sca3000_read_thresh, - .write_event_value_new = &sca3000_write_thresh, - .read_event_config_new = &sca3000_read_event_config, - .write_event_config_new = &sca3000_write_event_config, + .read_event_value = &sca3000_read_thresh, + .write_event_value = &sca3000_write_thresh, + .read_event_config = &sca3000_read_event_config, + .write_event_config = &sca3000_write_event_config, .driver_module = THIS_MODULE, }; diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index 8209fa542a8a00..1ac11f64827c73 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -134,6 +134,8 @@ struct ad7280_state { unsigned char aux_threshhigh; unsigned char aux_threshlow; unsigned char cb_mask[AD7280A_MAX_CHAIN]; + + __be32 buf[2] ____cacheline_aligned; }; static void ad7280_crc8_build_table(unsigned char *crc_tab) @@ -189,22 +191,22 @@ static void ad7280_delay(struct ad7280_state *st) msleep(1); } -static int __ad7280_read32(struct spi_device *spi, unsigned *val) +static int __ad7280_read32(struct ad7280_state *st, unsigned *val) { - unsigned rx_buf, tx_buf = cpu_to_be32(AD7280A_READ_TXVAL); int ret; - struct spi_transfer t = { - .tx_buf = &tx_buf, - .rx_buf = &rx_buf, + .tx_buf = &st->buf[0], + .rx_buf = &st->buf[1], .len = 4, }; - ret = spi_sync_transfer(spi, &t, 1); + st->buf[0] = cpu_to_be32(AD7280A_READ_TXVAL); + + ret = spi_sync_transfer(st->spi, &t, 1); if (ret) return ret; - *val = be32_to_cpu(rx_buf); + *val = be32_to_cpu(st->buf[1]); return 0; } @@ -216,9 +218,9 @@ static int ad7280_write(struct ad7280_state *st, unsigned devaddr, (val & 0xFF) << 13 | all << 12); reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2; - reg = cpu_to_be32(reg); + st->buf[0] = cpu_to_be32(reg); - return spi_write(st->spi, ®, 4); + return spi_write(st->spi, &st->buf[0], 4); } static int ad7280_read(struct ad7280_state *st, unsigned devaddr, @@ -248,7 +250,7 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr, if (ret) return ret; - __ad7280_read32(st->spi, &tmp); + __ad7280_read32(st, &tmp); if (ad7280_check_crc(st, tmp)) return -EIO; @@ -286,7 +288,7 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr, ad7280_delay(st); - __ad7280_read32(st->spi, &tmp); + __ad7280_read32(st, &tmp); if (ad7280_check_crc(st, tmp)) return -EIO; @@ -319,7 +321,7 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt, ad7280_delay(st); for (i = 0; i < cnt; i++) { - __ad7280_read32(st->spi, &tmp); + __ad7280_read32(st, &tmp); if (ad7280_check_crc(st, tmp)) return -EIO; @@ -362,7 +364,7 @@ static int ad7280_chain_setup(struct ad7280_state *st) return ret; for (n = 0; n <= AD7280A_MAX_CHAIN; n++) { - __ad7280_read32(st->spi, &val); + __ad7280_read32(st, &val); if (val == 0) return n - 1; diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c index d13f8aeeb62fef..357cef2a6f4c5c 100644 --- a/drivers/staging/iio/adc/ad7291.c +++ b/drivers/staging/iio/adc/ad7291.c @@ -452,10 +452,10 @@ static const struct iio_chan_spec ad7291_channels[] = { static const struct iio_info ad7291_info = { .read_raw = &ad7291_read_raw, - .read_event_config_new = &ad7291_read_event_config, - .write_event_config_new = &ad7291_write_event_config, - .read_event_value_new = &ad7291_read_event_value, - .write_event_value_new = &ad7291_write_event_value, + .read_event_config = &ad7291_read_event_config, + .write_event_config = &ad7291_write_event_config, + .read_event_value = &ad7291_read_event_value, + .write_event_value = &ad7291_write_event_value, .driver_module = THIS_MODULE, }; diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c index 2083673a79ca67..f0f05f195d2c5b 100644 --- a/drivers/staging/iio/adc/ad7606_core.c +++ b/drivers/staging/iio/adc/ad7606_core.c @@ -239,7 +239,12 @@ static const struct attribute_group ad7606_attribute_group_range = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\ .scan_index = num, \ - .scan_type = IIO_ST('s', 16, 16, 0), \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_CPU, \ + }, \ } static const struct iio_chan_spec ad7606_8_channels[] = { diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c index 9f48e5c74eed85..2369cf28412ed6 100644 --- a/drivers/staging/iio/adc/ad7816.c +++ b/drivers/staging/iio/adc/ad7816.c @@ -412,7 +412,7 @@ static int ad7816_probe(struct spi_device *spi_dev) return ret; } - ret = iio_device_register(indio_dev); + ret = devm_iio_device_register(&spi_dev->dev, indio_dev); if (ret) return ret; @@ -422,15 +422,6 @@ static int ad7816_probe(struct spi_device *spi_dev) return 0; } -static int ad7816_remove(struct spi_device *spi_dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev); - - iio_device_unregister(indio_dev); - - return 0; -} - static const struct spi_device_id ad7816_id[] = { { "ad7816", 0 }, { "ad7817", 0 }, @@ -446,7 +437,6 @@ static struct spi_driver ad7816_driver = { .owner = THIS_MODULE, }, .probe = ad7816_probe, - .remove = ad7816_remove, .id_table = ad7816_id, }; module_spi_driver(ad7816_driver); diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c index 9428be82b655f0..5ea36410f716e7 100644 --- a/drivers/staging/iio/adc/ad799x_core.c +++ b/drivers/staging/iio/adc/ad799x_core.c @@ -377,9 +377,9 @@ static const struct iio_info ad7991_info = { static const struct iio_info ad7993_4_7_8_info = { .read_raw = &ad799x_read_raw, .event_attrs = &ad799x_event_attrs_group, - .read_event_config_new = &ad799x_read_event_config, - .read_event_value_new = &ad799x_read_event_value, - .write_event_value_new = &ad799x_write_event_value, + .read_event_config = &ad799x_read_event_config, + .read_event_value = &ad799x_read_event_value, + .write_event_value = &ad799x_write_event_value, .driver_module = THIS_MODULE, .update_scan_mode = ad7997_8_update_scan_mode, }; diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c index ef0a21d8ce15ce..a876ce75535175 100644 --- a/drivers/staging/iio/adc/lpc32xx_adc.c +++ b/drivers/staging/iio/adc/lpc32xx_adc.c @@ -183,7 +183,7 @@ static int lpc32xx_adc_probe(struct platform_device *pdev) iodev->channels = lpc32xx_adc_iio_channels; iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels); - retval = iio_device_register(iodev); + retval = devm_iio_device_register(&pdev->dev, iodev); if (retval) return retval; @@ -192,15 +192,6 @@ static int lpc32xx_adc_probe(struct platform_device *pdev) return 0; } -static int lpc32xx_adc_remove(struct platform_device *pdev) -{ - struct iio_dev *iodev = platform_get_drvdata(pdev); - - iio_device_unregister(iodev); - - return 0; -} - #ifdef CONFIG_OF static const struct of_device_id lpc32xx_adc_match[] = { { .compatible = "nxp,lpc3220-adc" }, @@ -211,7 +202,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_adc_match); static struct platform_driver lpc32xx_adc_driver = { .probe = lpc32xx_adc_probe, - .remove = lpc32xx_adc_remove, .driver = { .name = MOD_NAME, .owner = THIS_MODULE, diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index e2dd7830b3204a..df71669bb60ea5 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -111,16 +112,59 @@ static const char * const mx28_lradc_irq_names[] = { struct mxs_lradc_of_config { const int irq_count; const char * const *irq_name; + const uint32_t *vref_mv; +}; + +#define VREF_MV_BASE 1850 + +static const uint32_t mx23_vref_mv[LRADC_MAX_TOTAL_CHANS] = { + VREF_MV_BASE, /* CH0 */ + VREF_MV_BASE, /* CH1 */ + VREF_MV_BASE, /* CH2 */ + VREF_MV_BASE, /* CH3 */ + VREF_MV_BASE, /* CH4 */ + VREF_MV_BASE, /* CH5 */ + VREF_MV_BASE * 2, /* CH6 VDDIO */ + VREF_MV_BASE * 4, /* CH7 VBATT */ + VREF_MV_BASE, /* CH8 Temp sense 0 */ + VREF_MV_BASE, /* CH9 Temp sense 1 */ + VREF_MV_BASE, /* CH10 */ + VREF_MV_BASE, /* CH11 */ + VREF_MV_BASE, /* CH12 USB_DP */ + VREF_MV_BASE, /* CH13 USB_DN */ + VREF_MV_BASE, /* CH14 VBG */ + VREF_MV_BASE * 4, /* CH15 VDD5V */ +}; + +static const uint32_t mx28_vref_mv[LRADC_MAX_TOTAL_CHANS] = { + VREF_MV_BASE, /* CH0 */ + VREF_MV_BASE, /* CH1 */ + VREF_MV_BASE, /* CH2 */ + VREF_MV_BASE, /* CH3 */ + VREF_MV_BASE, /* CH4 */ + VREF_MV_BASE, /* CH5 */ + VREF_MV_BASE, /* CH6 */ + VREF_MV_BASE * 4, /* CH7 VBATT */ + VREF_MV_BASE, /* CH8 Temp sense 0 */ + VREF_MV_BASE, /* CH9 Temp sense 1 */ + VREF_MV_BASE * 2, /* CH10 VDDIO */ + VREF_MV_BASE, /* CH11 VTH */ + VREF_MV_BASE * 2, /* CH12 VDDA */ + VREF_MV_BASE, /* CH13 VDDD */ + VREF_MV_BASE, /* CH14 VBG */ + VREF_MV_BASE * 4, /* CH15 VDD5V */ }; static const struct mxs_lradc_of_config mxs_lradc_of_config[] = { [IMX23_LRADC] = { .irq_count = ARRAY_SIZE(mx23_lradc_irq_names), .irq_name = mx23_lradc_irq_names, + .vref_mv = mx23_vref_mv, }, [IMX28_LRADC] = { .irq_count = ARRAY_SIZE(mx28_lradc_irq_names), .irq_name = mx28_lradc_irq_names, + .vref_mv = mx28_vref_mv, }, }; @@ -141,6 +185,16 @@ enum lradc_ts_plate { LRADC_SAMPLE_VALID, }; +enum mxs_lradc_divbytwo { + MXS_LRADC_DIV_DISABLED = 0, + MXS_LRADC_DIV_ENABLED, +}; + +struct mxs_lradc_scale { + unsigned int integer; + unsigned int nano; +}; + struct mxs_lradc { struct device *dev; void __iomem *base; @@ -155,6 +209,10 @@ struct mxs_lradc { struct completion completion; + const uint32_t *vref_mv; + struct mxs_lradc_scale scale_avail[LRADC_MAX_TOTAL_CHANS][2]; + unsigned long is_divided; + /* * Touchscreen LRADC channels receives a private slot in the CTRL4 * register, the slot #7. Therefore only 7 slots instead of 8 in the @@ -243,6 +301,7 @@ struct mxs_lradc { #define LRADC_CTRL1_LRADC_IRQ_OFFSET 0 #define LRADC_CTRL2 0x20 +#define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24 #define LRADC_CTRL2_TEMPSENSE_PWD (1 << 15) #define LRADC_STATUS 0x40 @@ -759,20 +818,11 @@ static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) /* * Raw I/O operations */ -static int mxs_lradc_read_raw(struct iio_dev *iio_dev, - const struct iio_chan_spec *chan, - int *val, int *val2, long m) +static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val) { struct mxs_lradc *lradc = iio_priv(iio_dev); int ret; - if (m != IIO_CHAN_INFO_RAW) - return -EINVAL; - - /* Check for invalid channel */ - if (chan->channel > LRADC_MAX_TOTAL_CHANS) - return -EINVAL; - /* * See if there is no buffered operation in progess. If there is, simply * bail out. This can be improved to support both buffered and raw IO at @@ -797,7 +847,7 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev, /* Clean the slot's previous content, then set new one. */ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), LRADC_CTRL4); - mxs_lradc_reg_set(lradc, chan->channel, LRADC_CTRL4); + mxs_lradc_reg_set(lradc, chan, LRADC_CTRL4); mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0)); @@ -824,9 +874,206 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev, return ret; } +static int mxs_lradc_read_temp(struct iio_dev *iio_dev, int *val) +{ + int ret, min, max; + + ret = mxs_lradc_read_single(iio_dev, 8, &min); + if (ret != IIO_VAL_INT) + return ret; + + ret = mxs_lradc_read_single(iio_dev, 9, &max); + if (ret != IIO_VAL_INT) + return ret; + + *val = max - min; + + return IIO_VAL_INT; +} + +static int mxs_lradc_read_raw(struct iio_dev *iio_dev, + const struct iio_chan_spec *chan, + int *val, int *val2, long m) +{ + struct mxs_lradc *lradc = iio_priv(iio_dev); + + /* Check for invalid channel */ + if (chan->channel > LRADC_MAX_TOTAL_CHANS) + return -EINVAL; + + switch (m) { + case IIO_CHAN_INFO_RAW: + if (chan->type == IIO_TEMP) + return mxs_lradc_read_temp(iio_dev, val); + + return mxs_lradc_read_single(iio_dev, chan->channel, val); + + case IIO_CHAN_INFO_SCALE: + if (chan->type == IIO_TEMP) { + /* From the datasheet, we have to multiply by 1.012 and + * divide by 4 + */ + *val = 0; + *val2 = 253000; + return IIO_VAL_INT_PLUS_MICRO; + } + + *val = lradc->vref_mv[chan->channel]; + *val2 = chan->scan_type.realbits - + test_bit(chan->channel, &lradc->is_divided); + return IIO_VAL_FRACTIONAL_LOG2; + + case IIO_CHAN_INFO_OFFSET: + if (chan->type == IIO_TEMP) { + /* The calculated value from the ADC is in Kelvin, we + * want Celsius for hwmon so the offset is + * -272.15 * scale + */ + *val = -1075; + *val2 = 691699; + + return IIO_VAL_INT_PLUS_MICRO; + } + + return -EINVAL; + + default: + break; + } + + return -EINVAL; +} + +static int mxs_lradc_write_raw(struct iio_dev *iio_dev, + const struct iio_chan_spec *chan, + int val, int val2, long m) +{ + struct mxs_lradc *lradc = iio_priv(iio_dev); + struct mxs_lradc_scale *scale_avail = + lradc->scale_avail[chan->channel]; + int ret; + + ret = mutex_trylock(&lradc->lock); + if (!ret) + return -EBUSY; + + switch (m) { + case IIO_CHAN_INFO_SCALE: + ret = -EINVAL; + if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer && + val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) { + /* divider by two disabled */ + writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, + lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR); + clear_bit(chan->channel, &lradc->is_divided); + ret = 0; + } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer && + val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) { + /* divider by two enabled */ + writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, + lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET); + set_bit(chan->channel, &lradc->is_divided); + ret = 0; + } + + break; + default: + ret = -EINVAL; + break; + } + + mutex_unlock(&lradc->lock); + + return ret; +} + +static int mxs_lradc_write_raw_get_fmt(struct iio_dev *iio_dev, + const struct iio_chan_spec *chan, + long m) +{ + return IIO_VAL_INT_PLUS_NANO; +} + +static ssize_t mxs_lradc_show_scale_available_ch(struct device *dev, + struct device_attribute *attr, + char *buf, + int ch) +{ + struct iio_dev *iio = dev_to_iio_dev(dev); + struct mxs_lradc *lradc = iio_priv(iio); + int i, len = 0; + + for (i = 0; i < ARRAY_SIZE(lradc->scale_avail[ch]); i++) + len += sprintf(buf + len, "%d.%09u ", + lradc->scale_avail[ch][i].integer, + lradc->scale_avail[ch][i].nano); + + len += sprintf(buf + len, "\n"); + + return len; +} + +static ssize_t mxs_lradc_show_scale_available(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev_attr *iio_attr = to_iio_dev_attr(attr); + + return mxs_lradc_show_scale_available_ch(dev, attr, buf, + iio_attr->address); +} + +#define SHOW_SCALE_AVAILABLE_ATTR(ch) \ +static IIO_DEVICE_ATTR(in_voltage##ch##_scale_available, S_IRUGO, \ + mxs_lradc_show_scale_available, NULL, ch) + +SHOW_SCALE_AVAILABLE_ATTR(0); +SHOW_SCALE_AVAILABLE_ATTR(1); +SHOW_SCALE_AVAILABLE_ATTR(2); +SHOW_SCALE_AVAILABLE_ATTR(3); +SHOW_SCALE_AVAILABLE_ATTR(4); +SHOW_SCALE_AVAILABLE_ATTR(5); +SHOW_SCALE_AVAILABLE_ATTR(6); +SHOW_SCALE_AVAILABLE_ATTR(7); +SHOW_SCALE_AVAILABLE_ATTR(8); +SHOW_SCALE_AVAILABLE_ATTR(9); +SHOW_SCALE_AVAILABLE_ATTR(10); +SHOW_SCALE_AVAILABLE_ATTR(11); +SHOW_SCALE_AVAILABLE_ATTR(12); +SHOW_SCALE_AVAILABLE_ATTR(13); +SHOW_SCALE_AVAILABLE_ATTR(14); +SHOW_SCALE_AVAILABLE_ATTR(15); + +static struct attribute *mxs_lradc_attributes[] = { + &iio_dev_attr_in_voltage0_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage1_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage2_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage3_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage4_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage13_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage14_scale_available.dev_attr.attr, + &iio_dev_attr_in_voltage15_scale_available.dev_attr.attr, + NULL +}; + +static const struct attribute_group mxs_lradc_attribute_group = { + .attrs = mxs_lradc_attributes, +}; + static const struct iio_info mxs_lradc_iio_info = { .driver_module = THIS_MODULE, .read_raw = mxs_lradc_read_raw, + .write_raw = mxs_lradc_write_raw, + .write_raw_get_fmt = mxs_lradc_write_raw_get_fmt, + .attrs = &mxs_lradc_attribute_group, }; static int mxs_lradc_ts_open(struct input_dev *dev) @@ -1133,8 +1380,10 @@ static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = { .type = (chan_type), \ .indexed = 1, \ .scan_index = (idx), \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE), \ .channel = (idx), \ + .address = (idx), \ .scan_type = { \ .sign = 'u', \ .realbits = LRADC_RESOLUTION, \ @@ -1151,8 +1400,17 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = { MXS_ADC_CHAN(5, IIO_VOLTAGE), MXS_ADC_CHAN(6, IIO_VOLTAGE), MXS_ADC_CHAN(7, IIO_VOLTAGE), /* VBATT */ - MXS_ADC_CHAN(8, IIO_TEMP), /* Temp sense 0 */ - MXS_ADC_CHAN(9, IIO_TEMP), /* Temp sense 1 */ + /* Combined Temperature sensors */ + { + .type = IIO_TEMP, + .indexed = 1, + .scan_index = 8, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SCALE), + .channel = 8, + .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, + }, MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ @@ -1271,7 +1529,8 @@ static int mxs_lradc_probe(struct platform_device *pdev) struct iio_dev *iio; struct resource *iores; int ret = 0, touch_ret; - int i; + int i, s; + unsigned int scale_uv; /* Allocate the IIO device. */ iio = devm_iio_device_alloc(dev, sizeof(*lradc)); @@ -1316,6 +1575,8 @@ static int mxs_lradc_probe(struct platform_device *pdev) return ret; } + lradc->vref_mv = of_cfg->vref_mv; + platform_set_drvdata(pdev, iio); init_completion(&lradc->completion); @@ -1339,6 +1600,26 @@ static int mxs_lradc_probe(struct platform_device *pdev) if (ret) goto err_trig; + /* Populate available ADC input ranges */ + for (i = 0; i < LRADC_MAX_TOTAL_CHANS; i++) { + for (s = 0; s < ARRAY_SIZE(lradc->scale_avail[i]); s++) { + /* + * [s=0] = optional divider by two disabled (default) + * [s=1] = optional divider by two enabled + * + * The scale is calculated by doing: + * Vref >> (realbits - s) + * which multiplies by two on the second component + * of the array. + */ + scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >> + (iio->channels[i].scan_type.realbits - s); + lradc->scale_avail[i][s].nano = + do_div(scale_uv, 100000000) * 10; + lradc->scale_avail[i][s].integer = scale_uv; + } + } + /* Configure the hardware. */ ret = mxs_lradc_hw_init(lradc); if (ret) diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c index 0feea5541d02d5..75ddd4f801a3da 100644 --- a/drivers/staging/iio/addac/adt7316-i2c.c +++ b/drivers/staging/iio/addac/adt7316-i2c.c @@ -108,11 +108,6 @@ static int adt7316_i2c_probe(struct i2c_client *client, return adt7316_probe(&client->dev, &bus, id->name); } -static int adt7316_i2c_remove(struct i2c_client *client) -{ - return adt7316_remove(&client->dev); -} - static const struct i2c_device_id adt7316_i2c_id[] = { { "adt7316", 0 }, { "adt7317", 0 }, @@ -132,7 +127,6 @@ static struct i2c_driver adt7316_driver = { .owner = THIS_MODULE, }, .probe = adt7316_i2c_probe, - .remove = adt7316_i2c_remove, .id_table = adt7316_i2c_id, }; module_i2c_driver(adt7316_driver); diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c index 7f4f0a8245b480..e480abb72e4a49 100644 --- a/drivers/staging/iio/addac/adt7316-spi.c +++ b/drivers/staging/iio/addac/adt7316-spi.c @@ -116,11 +116,6 @@ static int adt7316_spi_probe(struct spi_device *spi_dev) return adt7316_probe(&spi_dev->dev, &bus, spi_dev->modalias); } -static int adt7316_spi_remove(struct spi_device *spi_dev) -{ - return adt7316_remove(&spi_dev->dev); -} - static const struct spi_device_id adt7316_spi_id[] = { { "adt7316", 0 }, { "adt7317", 0 }, @@ -140,7 +135,6 @@ static struct spi_driver adt7316_driver = { .owner = THIS_MODULE, }, .probe = adt7316_spi_probe, - .remove = adt7316_spi_remove, .id_table = adt7316_spi_id, }; module_spi_driver(adt7316_driver); diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c index 80266e801d5673..16a8201228ffc1 100644 --- a/drivers/staging/iio/addac/adt7316.c +++ b/drivers/staging/iio/addac/adt7316.c @@ -2166,7 +2166,7 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus, if (ret) return -EIO; - ret = iio_device_register(indio_dev); + ret = devm_iio_device_register(dev, indio_dev); if (ret) return ret; @@ -2177,16 +2177,6 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus, } EXPORT_SYMBOL(adt7316_probe); -int adt7316_remove(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - - iio_device_unregister(indio_dev); - - return 0; -} -EXPORT_SYMBOL(adt7316_remove); - MODULE_AUTHOR("Sonic Zhang "); MODULE_DESCRIPTION("Analog Devices ADT7316/7/8 and ADT7516/7/9 digital" " temperature sensor, ADC and DAC driver"); diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h index 4d3efff46ae714..2dbfb499528d2b 100644 --- a/drivers/staging/iio/addac/adt7316.h +++ b/drivers/staging/iio/addac/adt7316.h @@ -31,6 +31,5 @@ extern const struct dev_pm_ops adt7316_pm_ops; #define ADT7316_PM_OPS NULL #endif int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name); -int adt7316_remove(struct device *dev); #endif diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c index 7e7f9890a642ad..047af23763004f 100644 --- a/drivers/staging/iio/cdc/ad7150.c +++ b/drivers/staging/iio/cdc/ad7150.c @@ -576,10 +576,10 @@ static const struct iio_info ad7150_info = { .event_attrs = &ad7150_event_attribute_group, .driver_module = THIS_MODULE, .read_raw = &ad7150_read_raw, - .read_event_config_new = &ad7150_read_event_config, - .write_event_config_new = &ad7150_write_event_config, - .read_event_value_new = &ad7150_read_event_value, - .write_event_value_new = &ad7150_write_event_value, + .read_event_config = &ad7150_read_event_config, + .write_event_config = &ad7150_write_event_config, + .read_event_value = &ad7150_read_event_value, + .write_event_value = &ad7150_write_event_value, }; /* diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c index 862d68d99630c7..cbb1588d591fdb 100644 --- a/drivers/staging/iio/cdc/ad7746.c +++ b/drivers/staging/iio/cdc/ad7746.c @@ -105,6 +105,11 @@ struct ad7746_chip_info { u8 vt_setup; u8 capdac[2][2]; s8 capdac_set; + + union { + __be32 d32; + u8 d8[4]; + } data ____cacheline_aligned; }; enum ad7746_chan { @@ -566,11 +571,6 @@ static int ad7746_read_raw(struct iio_dev *indio_dev, int ret, delay; u8 regval, reg; - union { - u32 d32; - u8 d8[4]; - } data; - mutex_lock(&indio_dev->mlock); switch (mask) { @@ -591,12 +591,12 @@ static int ad7746_read_raw(struct iio_dev *indio_dev, /* Now read the actual register */ ret = i2c_smbus_read_i2c_block_data(chip->client, - chan->address >> 8, 3, &data.d8[1]); + chan->address >> 8, 3, &chip->data.d8[1]); if (ret < 0) goto out; - *val = (be32_to_cpu(data.d32) & 0xFFFFFF) - 0x800000; + *val = (be32_to_cpu(chip->data.d32) & 0xFFFFFF) - 0x800000; switch (chan->type) { case IIO_TEMP: diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h index c5b701f8aabbf1..386f4dc8c9a11b 100644 --- a/drivers/staging/iio/frequency/ad9832.h +++ b/drivers/staging/iio/frequency/ad9832.h @@ -92,9 +92,9 @@ struct ad9832_state { * transfer buffers to live in their own cache lines. */ union { - unsigned short freq_data[4]____cacheline_aligned; - unsigned short phase_data[2]; - unsigned short data; + __be16 freq_data[4]____cacheline_aligned; + __be16 phase_data[2]; + __be16 data; }; }; diff --git a/drivers/staging/iio/frequency/ad9834.h b/drivers/staging/iio/frequency/ad9834.h index ed5ed8d0007ff4..8ca6e52bae6ba6 100644 --- a/drivers/staging/iio/frequency/ad9834.h +++ b/drivers/staging/iio/frequency/ad9834.h @@ -65,8 +65,8 @@ struct ad9834_state { * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ - unsigned short data ____cacheline_aligned; - unsigned short freq_data[2] ; + __be16 data ____cacheline_aligned; + __be16 freq_data[2]; }; diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c index 6d3d771154f3c3..d5d395c2e3e408 100644 --- a/drivers/staging/iio/gyro/adis16060_core.c +++ b/drivers/staging/iio/gyro/adis16060_core.c @@ -167,7 +167,7 @@ static int adis16060_r_probe(struct spi_device *spi) indio_dev->channels = adis16060_channels; indio_dev->num_channels = ARRAY_SIZE(adis16060_channels); - ret = iio_device_register(indio_dev); + ret = devm_iio_device_register(&spi->dev, indio_dev); if (ret) return ret; @@ -175,13 +175,6 @@ static int adis16060_r_probe(struct spi_device *spi) return 0; } -/* fixme, confirm ordering in this function */ -static int adis16060_r_remove(struct spi_device *spi) -{ - iio_device_unregister(spi_get_drvdata(spi)); - return 0; -} - static int adis16060_w_probe(struct spi_device *spi) { int ret; @@ -211,7 +204,6 @@ static struct spi_driver adis16060_r_driver = { .owner = THIS_MODULE, }, .probe = adis16060_r_probe, - .remove = adis16060_r_remove, }; static struct spi_driver adis16060_w_driver = { diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c index 1fac9894b18c2f..fd334a03a49a3e 100644 --- a/drivers/staging/iio/iio_simple_dummy.c +++ b/drivers/staging/iio/iio_simple_dummy.c @@ -370,10 +370,10 @@ static const struct iio_info iio_dummy_info = { .read_raw = &iio_dummy_read_raw, .write_raw = &iio_dummy_write_raw, #ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS - .read_event_config_new = &iio_simple_dummy_read_event_config, - .write_event_config_new = &iio_simple_dummy_write_event_config, - .read_event_value_new = &iio_simple_dummy_read_event_value, - .write_event_value_new = &iio_simple_dummy_write_event_value, + .read_event_config = &iio_simple_dummy_read_event_config, + .write_event_config = &iio_simple_dummy_write_event_config, + .read_event_value = &iio_simple_dummy_read_event_value, + .write_event_value = &iio_simple_dummy_write_event_value, #endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */ }; diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c index 488e690388c948..3660a43b5f0805 100644 --- a/drivers/staging/iio/light/isl29018.c +++ b/drivers/staging/iio/light/isl29018.c @@ -585,7 +585,7 @@ static int isl29018_probe(struct i2c_client *client, indio_dev->name = id->name; indio_dev->dev.parent = &client->dev; indio_dev->modes = INDIO_DIRECT_MODE; - err = iio_device_register(indio_dev); + err = devm_iio_device_register(&client->dev, indio_dev); if (err) { dev_err(&client->dev, "iio registration fails\n"); return err; @@ -594,16 +594,6 @@ static int isl29018_probe(struct i2c_client *client, return 0; } -static int isl29018_remove(struct i2c_client *client) -{ - struct iio_dev *indio_dev = i2c_get_clientdata(client); - - dev_dbg(&client->dev, "%s()\n", __func__); - iio_device_unregister(indio_dev); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int isl29018_suspend(struct device *dev) { @@ -664,7 +654,6 @@ static struct i2c_driver isl29018_driver = { .of_match_table = isl29018_of_match, }, .probe = isl29018_probe, - .remove = isl29018_remove, .id_table = isl29018_id, }; module_i2c_driver(isl29018_driver); diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c index 18805029d2a915..1e538086d48b97 100644 --- a/drivers/staging/iio/light/tsl2x7x_core.c +++ b/drivers/staging/iio/light/tsl2x7x_core.c @@ -1672,10 +1672,10 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value_new = &tsl2x7x_read_thresh, - .write_event_value_new = &tsl2x7x_write_thresh, - .read_event_config_new = &tsl2x7x_read_interrupt_config, - .write_event_config_new = &tsl2x7x_write_interrupt_config, + .read_event_value = &tsl2x7x_read_thresh, + .write_event_value = &tsl2x7x_write_thresh, + .read_event_config = &tsl2x7x_read_interrupt_config, + .write_event_config = &tsl2x7x_write_interrupt_config, }, [PRX] = { .attrs = &tsl2X7X_device_attr_group_tbl[PRX], @@ -1683,10 +1683,10 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value_new = &tsl2x7x_read_thresh, - .write_event_value_new = &tsl2x7x_write_thresh, - .read_event_config_new = &tsl2x7x_read_interrupt_config, - .write_event_config_new = &tsl2x7x_write_interrupt_config, + .read_event_value = &tsl2x7x_read_thresh, + .write_event_value = &tsl2x7x_write_thresh, + .read_event_config = &tsl2x7x_read_interrupt_config, + .write_event_config = &tsl2x7x_write_interrupt_config, }, [ALSPRX] = { .attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX], @@ -1694,10 +1694,10 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value_new = &tsl2x7x_read_thresh, - .write_event_value_new = &tsl2x7x_write_thresh, - .read_event_config_new = &tsl2x7x_read_interrupt_config, - .write_event_config_new = &tsl2x7x_write_interrupt_config, + .read_event_value = &tsl2x7x_read_thresh, + .write_event_value = &tsl2x7x_write_thresh, + .read_event_config = &tsl2x7x_read_interrupt_config, + .write_event_config = &tsl2x7x_write_interrupt_config, }, [PRX2] = { .attrs = &tsl2X7X_device_attr_group_tbl[PRX2], @@ -1705,10 +1705,10 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value_new = &tsl2x7x_read_thresh, - .write_event_value_new = &tsl2x7x_write_thresh, - .read_event_config_new = &tsl2x7x_read_interrupt_config, - .write_event_config_new = &tsl2x7x_write_interrupt_config, + .read_event_value = &tsl2x7x_read_thresh, + .write_event_value = &tsl2x7x_write_thresh, + .read_event_config = &tsl2x7x_read_interrupt_config, + .write_event_config = &tsl2x7x_write_interrupt_config, }, [ALSPRX2] = { .attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2], @@ -1716,10 +1716,10 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value_new = &tsl2x7x_read_thresh, - .write_event_value_new = &tsl2x7x_write_thresh, - .read_event_config_new = &tsl2x7x_read_interrupt_config, - .write_event_config_new = &tsl2x7x_write_interrupt_config, + .read_event_value = &tsl2x7x_read_thresh, + .write_event_value = &tsl2x7x_write_thresh, + .read_event_config = &tsl2x7x_read_interrupt_config, + .write_event_config = &tsl2x7x_write_interrupt_config, }, }; diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c index 0485d7f398672a..d4f4dd90c69908 100644 --- a/drivers/staging/iio/magnetometer/hmc5843.c +++ b/drivers/staging/iio/magnetometer/hmc5843.c @@ -629,10 +629,17 @@ static const struct i2c_device_id hmc5843_id[] = { }; MODULE_DEVICE_TABLE(i2c, hmc5843_id); +static const struct of_device_id hmc5843_of_match[] = { + { .compatible = "honeywell,hmc5843" }, + {} +}; +MODULE_DEVICE_TABLE(of, hmc5843_of_match); + static struct i2c_driver hmc5843_driver = { .driver = { .name = "hmc5843", .pm = HMC5843_PM_OPS, + .of_match_table = hmc5843_of_match, }, .id_table = hmc5843_id, .probe = hmc5843_probe, diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c index 62d30179301fa5..36eedd8a0ea981 100644 --- a/drivers/staging/iio/resolver/ad2s1200.c +++ b/drivers/staging/iio/resolver/ad2s1200.c @@ -131,7 +131,7 @@ static int ad2s1200_probe(struct spi_device *spi) indio_dev->num_channels = ARRAY_SIZE(ad2s1200_channels); indio_dev->name = spi_get_device_id(spi)->name; - ret = iio_device_register(indio_dev); + ret = devm_iio_device_register(&spi->dev, indio_dev); if (ret) return ret; @@ -142,13 +142,6 @@ static int ad2s1200_probe(struct spi_device *spi) return 0; } -static int ad2s1200_remove(struct spi_device *spi) -{ - iio_device_unregister(spi_get_drvdata(spi)); - - return 0; -} - static const struct spi_device_id ad2s1200_id[] = { { "ad2s1200" }, { "ad2s1205" }, @@ -162,7 +155,6 @@ static struct spi_driver ad2s1200_driver = { .owner = THIS_MODULE, }, .probe = ad2s1200_probe, - .remove = ad2s1200_remove, .id_table = ad2s1200_id, }; module_spi_driver(ad2s1200_driver); diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig index 5032ff7c2259a8..78319ad176cd2c 100644 --- a/drivers/staging/imx-drm/Kconfig +++ b/drivers/staging/imx-drm/Kconfig @@ -53,3 +53,9 @@ config DRM_IMX_IPUV3 depends on DRM_IMX_IPUV3_CORE help Choose this if you have a i.MX5 or i.MX6 processor. + +config DRM_IMX_HDMI + tristate "Freescale i.MX DRM HDMI" + depends on DRM_IMX + help + Choose this if you want to use HDMI on i.MX6. diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile index 8742432d7b0170..4677585b5ad512 100644 --- a/drivers/staging/imx-drm/Makefile +++ b/drivers/staging/imx-drm/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/ imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o +obj-$(CONFIG_DRM_IMX_HDMI) += imx-hdmi.o diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 96e4eee344ef60..09ef5fb8bae6f9 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -448,6 +448,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) goto err_vblank; } + platform_set_drvdata(drm->platformdev, drm); mutex_unlock(&imxdrm->mutex); return 0; @@ -848,7 +849,7 @@ static int imx_drm_platform_probe(struct platform_device *pdev) static int imx_drm_platform_remove(struct platform_device *pdev) { - drm_platform_exit(&imx_drm_driver, pdev); + drm_put_dev(platform_get_drvdata(pdev)); return 0; } diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c new file mode 100644 index 00000000000000..f3a1f5e2e492a4 --- /dev/null +++ b/drivers/staging/imx-drm/imx-hdmi.c @@ -0,0 +1,1916 @@ +/* + * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * SH-Mobile High-Definition Multimedia Interface (HDMI) driver + * for SLISHDMI13T and SLIPHDMIT IP cores + * + * Copyright (C) 2010, Guennadi Liakhovetski + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ipu-v3/imx-ipu-v3.h" +#include "imx-hdmi.h" +#include "imx-drm.h" + +#define HDMI_EDID_LEN 512 + +#define RGB 0 +#define YCBCR444 1 +#define YCBCR422_16BITS 2 +#define YCBCR422_8BITS 3 +#define XVYCC444 4 + +enum hdmi_datamap { + RGB444_8B = 0x01, + RGB444_10B = 0x03, + RGB444_12B = 0x05, + RGB444_16B = 0x07, + YCbCr444_8B = 0x09, + YCbCr444_10B = 0x0B, + YCbCr444_12B = 0x0D, + YCbCr444_16B = 0x0F, + YCbCr422_8B = 0x16, + YCbCr422_10B = 0x14, + YCbCr422_12B = 0x12, +}; + +enum hdmi_colorimetry { + ITU601, + ITU709, +}; + +enum imx_hdmi_devtype { + IMX6Q_HDMI, + IMX6DL_HDMI, +}; + +static const u16 csc_coeff_default[3][4] = { + { 0x2000, 0x0000, 0x0000, 0x0000 }, + { 0x0000, 0x2000, 0x0000, 0x0000 }, + { 0x0000, 0x0000, 0x2000, 0x0000 } +}; + +static const u16 csc_coeff_rgb_out_eitu601[3][4] = { + { 0x2000, 0x6926, 0x74fd, 0x010e }, + { 0x2000, 0x2cdd, 0x0000, 0x7e9a }, + { 0x2000, 0x0000, 0x38b4, 0x7e3b } +}; + +static const u16 csc_coeff_rgb_out_eitu709[3][4] = { + { 0x2000, 0x7106, 0x7a02, 0x00a7 }, + { 0x2000, 0x3264, 0x0000, 0x7e6d }, + { 0x2000, 0x0000, 0x3b61, 0x7e25 } +}; + +static const u16 csc_coeff_rgb_in_eitu601[3][4] = { + { 0x2591, 0x1322, 0x074b, 0x0000 }, + { 0x6535, 0x2000, 0x7acc, 0x0200 }, + { 0x6acd, 0x7534, 0x2000, 0x0200 } +}; + +static const u16 csc_coeff_rgb_in_eitu709[3][4] = { + { 0x2dc5, 0x0d9b, 0x049e, 0x0000 }, + { 0x62f0, 0x2000, 0x7d11, 0x0200 }, + { 0x6756, 0x78ab, 0x2000, 0x0200 } +}; + +struct hdmi_vmode { + bool mdvi; + bool mhsyncpolarity; + bool mvsyncpolarity; + bool minterlaced; + bool mdataenablepolarity; + + unsigned int mpixelclock; + unsigned int mpixelrepetitioninput; + unsigned int mpixelrepetitionoutput; +}; + +struct hdmi_data_info { + unsigned int enc_in_format; + unsigned int enc_out_format; + unsigned int enc_color_depth; + unsigned int colorimetry; + unsigned int pix_repet_factor; + unsigned int hdcp_enable; + struct hdmi_vmode video_mode; +}; + +struct imx_hdmi { + struct drm_connector connector; + struct imx_drm_connector *imx_drm_connector; + struct drm_encoder encoder; + struct imx_drm_encoder *imx_drm_encoder; + + enum imx_hdmi_devtype dev_type; + struct device *dev; + struct clk *isfr_clk; + struct clk *iahb_clk; + + struct hdmi_data_info hdmi_data; + int vic; + + u8 edid[HDMI_EDID_LEN]; + bool cable_plugin; + + bool phy_enabled; + struct drm_display_mode previous_mode; + + struct regmap *regmap; + struct i2c_adapter *ddc; + void __iomem *regs; + + unsigned long pixel_clk_rate; + unsigned int sample_rate; + int ratio; +}; + +static void imx_hdmi_set_ipu_di_mux(struct imx_hdmi *hdmi, int ipu_di) +{ + regmap_update_bits(hdmi->regmap, IOMUXC_GPR3, + IMX6Q_GPR3_HDMI_MUX_CTL_MASK, + ipu_di << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT); +} + +static inline void hdmi_writeb(struct imx_hdmi *hdmi, u8 val, int offset) +{ + writeb(val, hdmi->regs + offset); +} + +static inline u8 hdmi_readb(struct imx_hdmi *hdmi, int offset) +{ + return readb(hdmi->regs + offset); +} + +static void hdmi_mask_writeb(struct imx_hdmi *hdmi, u8 data, unsigned int reg, + u8 shift, u8 mask) +{ + u8 value = hdmi_readb(hdmi, reg) & ~mask; + value |= (data << shift) & mask; + hdmi_writeb(hdmi, value, reg); +} + +static void hdmi_set_clock_regenerator_n(struct imx_hdmi *hdmi, + unsigned int value) +{ + u8 val; + + hdmi_writeb(hdmi, value & 0xff, HDMI_AUD_N1); + hdmi_writeb(hdmi, (value >> 8) & 0xff, HDMI_AUD_N2); + hdmi_writeb(hdmi, (value >> 16) & 0x0f, HDMI_AUD_N3); + + /* nshift factor = 0 */ + val = hdmi_readb(hdmi, HDMI_AUD_CTS3); + val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK; + hdmi_writeb(hdmi, val, HDMI_AUD_CTS3); +} + +static void hdmi_regenerate_cts(struct imx_hdmi *hdmi, unsigned int cts) +{ + u8 val; + + /* Must be set/cleared first */ + val = hdmi_readb(hdmi, HDMI_AUD_CTS3); + val &= ~HDMI_AUD_CTS3_CTS_MANUAL; + hdmi_writeb(hdmi, val, HDMI_AUD_CTS3); + + hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1); + hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2); + hdmi_writeb(hdmi, ((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) | + HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3); +} + +static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk, + unsigned int ratio) +{ + unsigned int n = (128 * freq) / 1000; + + switch (freq) { + case 32000: + if (pixel_clk == 25170000) + n = (ratio == 150) ? 9152 : 4576; + else if (pixel_clk == 27020000) + n = (ratio == 150) ? 8192 : 4096; + else if (pixel_clk == 74170000 || pixel_clk == 148350000) + n = 11648; + else + n = 4096; + break; + + case 44100: + if (pixel_clk == 25170000) + n = 7007; + else if (pixel_clk == 74170000) + n = 17836; + else if (pixel_clk == 148350000) + n = (ratio == 150) ? 17836 : 8918; + else + n = 6272; + break; + + case 48000: + if (pixel_clk == 25170000) + n = (ratio == 150) ? 9152 : 6864; + else if (pixel_clk == 27020000) + n = (ratio == 150) ? 8192 : 6144; + else if (pixel_clk == 74170000) + n = 11648; + else if (pixel_clk == 148350000) + n = (ratio == 150) ? 11648 : 5824; + else + n = 6144; + break; + + case 88200: + n = hdmi_compute_n(44100, pixel_clk, ratio) * 2; + break; + + case 96000: + n = hdmi_compute_n(48000, pixel_clk, ratio) * 2; + break; + + case 176400: + n = hdmi_compute_n(44100, pixel_clk, ratio) * 4; + break; + + case 192000: + n = hdmi_compute_n(48000, pixel_clk, ratio) * 4; + break; + + default: + break; + } + + return n; +} + +static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk, + unsigned int ratio) +{ + unsigned int cts = 0; + + pr_debug("%s: freq: %d pixel_clk: %ld ratio: %d\n", __func__, freq, + pixel_clk, ratio); + + switch (freq) { + case 32000: + if (pixel_clk == 297000000) { + cts = 222750; + break; + } + case 48000: + case 96000: + case 192000: + switch (pixel_clk) { + case 25200000: + case 27000000: + case 54000000: + case 74250000: + case 148500000: + cts = pixel_clk / 1000; + break; + case 297000000: + cts = 247500; + break; + /* + * All other TMDS clocks are not supported by + * DWC_hdmi_tx. The TMDS clocks divided or + * multiplied by 1,001 coefficients are not + * supported. + */ + default: + break; + } + break; + case 44100: + case 88200: + case 176400: + switch (pixel_clk) { + case 25200000: + cts = 28000; + break; + case 27000000: + cts = 30000; + break; + case 54000000: + cts = 60000; + break; + case 74250000: + cts = 82500; + break; + case 148500000: + cts = 165000; + break; + case 297000000: + cts = 247500; + break; + default: + break; + } + break; + default: + break; + } + if (ratio == 100) + return cts; + else + return (cts * ratio) / 100; +} + +static void hdmi_get_pixel_clk(struct imx_hdmi *hdmi) +{ + unsigned long rate; + + rate = 65000000; /* FIXME */ + + if (rate) + hdmi->pixel_clk_rate = rate; +} + +static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi) +{ + unsigned int clk_n, clk_cts; + + clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate, + hdmi->ratio); + clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate, + hdmi->ratio); + + if (!clk_cts) { + dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n", + __func__, hdmi->pixel_clk_rate); + return; + } + + dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n", + __func__, hdmi->sample_rate, hdmi->ratio, + hdmi->pixel_clk_rate, clk_n, clk_cts); + + hdmi_set_clock_regenerator_n(hdmi, clk_n); + hdmi_regenerate_cts(hdmi, clk_cts); +} + +static void hdmi_init_clk_regenerator(struct imx_hdmi *hdmi) +{ + unsigned int clk_n, clk_cts; + + clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate, + hdmi->ratio); + clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate, + hdmi->ratio); + + if (!clk_cts) { + dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n", + __func__, hdmi->pixel_clk_rate); + return; + } + + dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n", + __func__, hdmi->sample_rate, hdmi->ratio, + hdmi->pixel_clk_rate, clk_n, clk_cts); + + hdmi_set_clock_regenerator_n(hdmi, clk_n); + hdmi_regenerate_cts(hdmi, clk_cts); +} + +static void hdmi_clk_regenerator_update_pixel_clock(struct imx_hdmi *hdmi) +{ + /* Get pixel clock from ipu */ + hdmi_get_pixel_clk(hdmi); + hdmi_set_clk_regenerator(hdmi); +} + +/* + * this submodule is responsible for the video data synchronization. + * for example, for RGB 4:4:4 input, the data map is defined as + * pin{47~40} <==> R[7:0] + * pin{31~24} <==> G[7:0] + * pin{15~8} <==> B[7:0] + */ +static void hdmi_video_sample(struct imx_hdmi *hdmi) +{ + int color_format = 0; + u8 val; + + if (hdmi->hdmi_data.enc_in_format == RGB) { + if (hdmi->hdmi_data.enc_color_depth == 8) + color_format = 0x01; + else if (hdmi->hdmi_data.enc_color_depth == 10) + color_format = 0x03; + else if (hdmi->hdmi_data.enc_color_depth == 12) + color_format = 0x05; + else if (hdmi->hdmi_data.enc_color_depth == 16) + color_format = 0x07; + else + return; + } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) { + if (hdmi->hdmi_data.enc_color_depth == 8) + color_format = 0x09; + else if (hdmi->hdmi_data.enc_color_depth == 10) + color_format = 0x0B; + else if (hdmi->hdmi_data.enc_color_depth == 12) + color_format = 0x0D; + else if (hdmi->hdmi_data.enc_color_depth == 16) + color_format = 0x0F; + else + return; + } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) { + if (hdmi->hdmi_data.enc_color_depth == 8) + color_format = 0x16; + else if (hdmi->hdmi_data.enc_color_depth == 10) + color_format = 0x14; + else if (hdmi->hdmi_data.enc_color_depth == 12) + color_format = 0x12; + else + return; + } + + val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE | + ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) & + HDMI_TX_INVID0_VIDEO_MAPPING_MASK); + hdmi_writeb(hdmi, val, HDMI_TX_INVID0); + + /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */ + val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE | + HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE | + HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE; + hdmi_writeb(hdmi, val, HDMI_TX_INSTUFFING); + hdmi_writeb(hdmi, 0x0, HDMI_TX_GYDATA0); + hdmi_writeb(hdmi, 0x0, HDMI_TX_GYDATA1); + hdmi_writeb(hdmi, 0x0, HDMI_TX_RCRDATA0); + hdmi_writeb(hdmi, 0x0, HDMI_TX_RCRDATA1); + hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA0); + hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA1); +} + +static int is_color_space_conversion(struct imx_hdmi *hdmi) +{ + return (hdmi->hdmi_data.enc_in_format != + hdmi->hdmi_data.enc_out_format); +} + +static int is_color_space_decimation(struct imx_hdmi *hdmi) +{ + return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) && + (hdmi->hdmi_data.enc_in_format == RGB || + hdmi->hdmi_data.enc_in_format == YCBCR444)); +} + +static int is_color_space_interpolation(struct imx_hdmi *hdmi) +{ + return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) && + (hdmi->hdmi_data.enc_out_format == RGB || + hdmi->hdmi_data.enc_out_format == YCBCR444)); +} + +static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi) +{ + const u16 (*csc_coeff)[3][4] = &csc_coeff_default; + u32 csc_scale = 1; + u8 val; + + if (is_color_space_conversion(hdmi)) { + if (hdmi->hdmi_data.enc_out_format == RGB) { + if (hdmi->hdmi_data.colorimetry == ITU601) + csc_coeff = &csc_coeff_rgb_out_eitu601; + else + csc_coeff = &csc_coeff_rgb_out_eitu709; + } else if (hdmi->hdmi_data.enc_in_format == RGB) { + if (hdmi->hdmi_data.colorimetry == ITU601) + csc_coeff = &csc_coeff_rgb_in_eitu601; + else + csc_coeff = &csc_coeff_rgb_in_eitu709; + csc_scale = 0; + } + } + + hdmi_writeb(hdmi, ((*csc_coeff)[0][0] & 0xff), HDMI_CSC_COEF_A1_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[0][0] >> 8), HDMI_CSC_COEF_A1_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[0][1] & 0xff), HDMI_CSC_COEF_A2_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[0][1] >> 8), HDMI_CSC_COEF_A2_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[0][2] & 0xff), HDMI_CSC_COEF_A3_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[0][2] >> 8), HDMI_CSC_COEF_A3_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[0][3] & 0xff), HDMI_CSC_COEF_A4_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[0][3] >> 8), HDMI_CSC_COEF_A4_MSB); + + hdmi_writeb(hdmi, ((*csc_coeff)[1][0] & 0xff), HDMI_CSC_COEF_B1_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[1][0] >> 8), HDMI_CSC_COEF_B1_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[1][1] & 0xff), HDMI_CSC_COEF_B2_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[1][1] >> 8), HDMI_CSC_COEF_B2_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[1][2] & 0xff), HDMI_CSC_COEF_B3_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[1][2] >> 8), HDMI_CSC_COEF_B3_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[1][3] & 0xff), HDMI_CSC_COEF_B4_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[1][3] >> 8), HDMI_CSC_COEF_B4_MSB); + + hdmi_writeb(hdmi, ((*csc_coeff)[2][0] & 0xff), HDMI_CSC_COEF_C1_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[2][0] >> 8), HDMI_CSC_COEF_C1_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[2][1] & 0xff), HDMI_CSC_COEF_C2_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[2][1] >> 8), HDMI_CSC_COEF_C2_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[2][2] & 0xff), HDMI_CSC_COEF_C3_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[2][2] >> 8), HDMI_CSC_COEF_C3_MSB); + hdmi_writeb(hdmi, ((*csc_coeff)[2][3] & 0xff), HDMI_CSC_COEF_C4_LSB); + hdmi_writeb(hdmi, ((*csc_coeff)[2][3] >> 8), HDMI_CSC_COEF_C4_MSB); + + val = hdmi_readb(hdmi, HDMI_CSC_SCALE); + val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK; + val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK; + hdmi_writeb(hdmi, val, HDMI_CSC_SCALE); +} + +static void hdmi_video_csc(struct imx_hdmi *hdmi) +{ + int color_depth = 0; + int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE; + int decimation = 0; + u8 val; + + /* YCC422 interpolation to 444 mode */ + if (is_color_space_interpolation(hdmi)) + interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1; + else if (is_color_space_decimation(hdmi)) + decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3; + + if (hdmi->hdmi_data.enc_color_depth == 8) + color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP; + else if (hdmi->hdmi_data.enc_color_depth == 10) + color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP; + else if (hdmi->hdmi_data.enc_color_depth == 12) + color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP; + else if (hdmi->hdmi_data.enc_color_depth == 16) + color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP; + else + return; + + /* Configure the CSC registers */ + hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG); + val = hdmi_readb(hdmi, HDMI_CSC_SCALE); + val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK; + val |= color_depth; + hdmi_writeb(hdmi, val, HDMI_CSC_SCALE); + + imx_hdmi_update_csc_coeffs(hdmi); +} + +/* + * HDMI video packetizer is used to packetize the data. + * for example, if input is YCC422 mode or repeater is used, + * data should be repacked this module can be bypassed. + */ +static void hdmi_video_packetize(struct imx_hdmi *hdmi) +{ + unsigned int color_depth = 0; + unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit; + unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP; + struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data; + u8 val; + + if (hdmi_data->enc_out_format == RGB + || hdmi_data->enc_out_format == YCBCR444) { + if (!hdmi_data->enc_color_depth) + output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; + else if (hdmi_data->enc_color_depth == 8) { + color_depth = 4; + output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; + } else if (hdmi_data->enc_color_depth == 10) + color_depth = 5; + else if (hdmi_data->enc_color_depth == 12) + color_depth = 6; + else if (hdmi_data->enc_color_depth == 16) + color_depth = 7; + else + return; + } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) { + if (!hdmi_data->enc_color_depth || + hdmi_data->enc_color_depth == 8) + remap_size = HDMI_VP_REMAP_YCC422_16bit; + else if (hdmi_data->enc_color_depth == 10) + remap_size = HDMI_VP_REMAP_YCC422_20bit; + else if (hdmi_data->enc_color_depth == 12) + remap_size = HDMI_VP_REMAP_YCC422_24bit; + else + return; + output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422; + } else + return; + + /* set the packetizer registers */ + val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) & + HDMI_VP_PR_CD_COLOR_DEPTH_MASK) | + ((hdmi_data->pix_repet_factor << + HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) & + HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK); + hdmi_writeb(hdmi, val, HDMI_VP_PR_CD); + + val = hdmi_readb(hdmi, HDMI_VP_STUFF); + val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK; + val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE; + hdmi_writeb(hdmi, val, HDMI_VP_STUFF); + + /* Data from pixel repeater block */ + if (hdmi_data->pix_repet_factor > 1) { + val = hdmi_readb(hdmi, HDMI_VP_CONF); + val &= ~(HDMI_VP_CONF_PR_EN_MASK | + HDMI_VP_CONF_BYPASS_SELECT_MASK); + val |= HDMI_VP_CONF_PR_EN_ENABLE | + HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER; + hdmi_writeb(hdmi, val, HDMI_VP_CONF); + } else { /* data from packetizer block */ + val = hdmi_readb(hdmi, HDMI_VP_CONF); + val &= ~(HDMI_VP_CONF_PR_EN_MASK | + HDMI_VP_CONF_BYPASS_SELECT_MASK); + val |= HDMI_VP_CONF_PR_EN_DISABLE | + HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER; + hdmi_writeb(hdmi, val, HDMI_VP_CONF); + } + + val = hdmi_readb(hdmi, HDMI_VP_STUFF); + val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK; + val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET; + hdmi_writeb(hdmi, val, HDMI_VP_STUFF); + + hdmi_writeb(hdmi, remap_size, HDMI_VP_REMAP); + + if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) { + val = hdmi_readb(hdmi, HDMI_VP_CONF); + val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK | + HDMI_VP_CONF_PP_EN_ENMASK | + HDMI_VP_CONF_YCC422_EN_MASK); + val |= HDMI_VP_CONF_BYPASS_EN_DISABLE | + HDMI_VP_CONF_PP_EN_ENABLE | + HDMI_VP_CONF_YCC422_EN_DISABLE; + hdmi_writeb(hdmi, val, HDMI_VP_CONF); + } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) { + val = hdmi_readb(hdmi, HDMI_VP_CONF); + val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK | + HDMI_VP_CONF_PP_EN_ENMASK | + HDMI_VP_CONF_YCC422_EN_MASK); + val |= HDMI_VP_CONF_BYPASS_EN_DISABLE | + HDMI_VP_CONF_PP_EN_DISABLE | + HDMI_VP_CONF_YCC422_EN_ENABLE; + hdmi_writeb(hdmi, val, HDMI_VP_CONF); + } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) { + val = hdmi_readb(hdmi, HDMI_VP_CONF); + val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK | + HDMI_VP_CONF_PP_EN_ENMASK | + HDMI_VP_CONF_YCC422_EN_MASK); + val |= HDMI_VP_CONF_BYPASS_EN_ENABLE | + HDMI_VP_CONF_PP_EN_DISABLE | + HDMI_VP_CONF_YCC422_EN_DISABLE; + hdmi_writeb(hdmi, val, HDMI_VP_CONF); + } else { + return; + } + + val = hdmi_readb(hdmi, HDMI_VP_STUFF); + val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK | + HDMI_VP_STUFF_YCC422_STUFFING_MASK); + val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE | + HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE; + hdmi_writeb(hdmi, val, HDMI_VP_STUFF); + + val = hdmi_readb(hdmi, HDMI_VP_CONF); + val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK; + val |= output_select; + hdmi_writeb(hdmi, val, HDMI_VP_CONF); +} + +static inline void hdmi_phy_test_clear(struct imx_hdmi *hdmi, + unsigned char bit) +{ + u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0); + val &= ~HDMI_PHY_TST0_TSTCLR_MASK; + val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) & + HDMI_PHY_TST0_TSTCLR_MASK; + hdmi_writeb(hdmi, val, HDMI_PHY_TST0); +} + +static inline void hdmi_phy_test_enable(struct imx_hdmi *hdmi, + unsigned char bit) +{ + u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0); + val &= ~HDMI_PHY_TST0_TSTEN_MASK; + val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) & + HDMI_PHY_TST0_TSTEN_MASK; + hdmi_writeb(hdmi, val, HDMI_PHY_TST0); +} + +static inline void hdmi_phy_test_clock(struct imx_hdmi *hdmi, + unsigned char bit) +{ + u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0); + val &= ~HDMI_PHY_TST0_TSTCLK_MASK; + val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) & + HDMI_PHY_TST0_TSTCLK_MASK; + hdmi_writeb(hdmi, val, HDMI_PHY_TST0); +} + +static inline void hdmi_phy_test_din(struct imx_hdmi *hdmi, + unsigned char bit) +{ + hdmi_writeb(hdmi, bit, HDMI_PHY_TST1); +} + +static inline void hdmi_phy_test_dout(struct imx_hdmi *hdmi, + unsigned char bit) +{ + hdmi_writeb(hdmi, bit, HDMI_PHY_TST2); +} + +static bool hdmi_phy_wait_i2c_done(struct imx_hdmi *hdmi, int msec) +{ + unsigned char val = 0; + val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3; + while (!val) { + udelay(1000); + if (msec-- == 0) + return false; + val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3; + } + return true; +} + +static void __hdmi_phy_i2c_write(struct imx_hdmi *hdmi, unsigned short data, + unsigned char addr) +{ + hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0); + hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR); + hdmi_writeb(hdmi, (unsigned char)(data >> 8), + HDMI_PHY_I2CM_DATAO_1_ADDR); + hdmi_writeb(hdmi, (unsigned char)(data >> 0), + HDMI_PHY_I2CM_DATAO_0_ADDR); + hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_WRITE, + HDMI_PHY_I2CM_OPERATION_ADDR); + hdmi_phy_wait_i2c_done(hdmi, 1000); +} + +static int hdmi_phy_i2c_write(struct imx_hdmi *hdmi, unsigned short data, + unsigned char addr) +{ + __hdmi_phy_i2c_write(hdmi, data, addr); + return 0; +} + +static void imx_hdmi_phy_enable_power(struct imx_hdmi *hdmi, u8 enable) +{ + hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, + HDMI_PHY_CONF0_PDZ_OFFSET, + HDMI_PHY_CONF0_PDZ_MASK); +} + +static void imx_hdmi_phy_enable_tmds(struct imx_hdmi *hdmi, u8 enable) +{ + hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, + HDMI_PHY_CONF0_ENTMDS_OFFSET, + HDMI_PHY_CONF0_ENTMDS_MASK); +} + +static void imx_hdmi_phy_gen2_pddq(struct imx_hdmi *hdmi, u8 enable) +{ + hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, + HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET, + HDMI_PHY_CONF0_GEN2_PDDQ_MASK); +} + +static void imx_hdmi_phy_gen2_txpwron(struct imx_hdmi *hdmi, u8 enable) +{ + hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, + HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET, + HDMI_PHY_CONF0_GEN2_TXPWRON_MASK); +} + +static void imx_hdmi_phy_sel_data_en_pol(struct imx_hdmi *hdmi, u8 enable) +{ + hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, + HDMI_PHY_CONF0_SELDATAENPOL_OFFSET, + HDMI_PHY_CONF0_SELDATAENPOL_MASK); +} + +static void imx_hdmi_phy_sel_interface_control(struct imx_hdmi *hdmi, u8 enable) +{ + hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, + HDMI_PHY_CONF0_SELDIPIF_OFFSET, + HDMI_PHY_CONF0_SELDIPIF_MASK); +} + +static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep, + unsigned char res, int cscon) +{ + u8 val, msec; + + /* color resolution 0 is 8 bit colour depth */ + if (!res) + res = 8; + + if (prep) + return -EINVAL; + else if (res != 8 && res != 12) + return -EINVAL; + + /* Enable csc path */ + if (cscon) + val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH; + else + val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS; + + hdmi_writeb(hdmi, val, HDMI_MC_FLOWCTRL); + + /* gen2 tx power off */ + imx_hdmi_phy_gen2_txpwron(hdmi, 0); + + /* gen2 pddq */ + imx_hdmi_phy_gen2_pddq(hdmi, 1); + + /* PHY reset */ + hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ); + hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ); + + hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST); + + hdmi_phy_test_clear(hdmi, 1); + hdmi_writeb(hdmi, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2, + HDMI_PHY_I2CM_SLAVE_ADDR); + hdmi_phy_test_clear(hdmi, 0); + + if (hdmi->hdmi_data.video_mode.mpixelclock <= 45250000) { + switch (res) { + case 8: + /* PLL/MPLL Cfg */ + hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06); + hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */ + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06); + hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06); + hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); + break; + default: + return -EINVAL; + } + } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 92500000) { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x0140, 0x06); + hdmi_phy_i2c_write(hdmi, 0x0005, 0x15); + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x2141, 0x06); + hdmi_phy_i2c_write(hdmi, 0x0005, 0x15); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x4142, 0x06); + hdmi_phy_i2c_write(hdmi, 0x0005, 0x15); + default: + return -EINVAL; + } + } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 148500000) { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06); + hdmi_phy_i2c_write(hdmi, 0x000a, 0x15); + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06); + hdmi_phy_i2c_write(hdmi, 0x000a, 0x15); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06); + hdmi_phy_i2c_write(hdmi, 0x000a, 0x15); + default: + return -EINVAL; + } + } else { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06); + hdmi_phy_i2c_write(hdmi, 0x000a, 0x15); + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x2001, 0x06); + hdmi_phy_i2c_write(hdmi, 0x000f, 0x15); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x4002, 0x06); + hdmi_phy_i2c_write(hdmi, 0x000f, 0x15); + default: + return -EINVAL; + } + } + + if (hdmi->hdmi_data.video_mode.mpixelclock <= 54000000) { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */ + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10); + break; + default: + return -EINVAL; + } + } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 58400000) { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10); + break; + default: + return -EINVAL; + } + } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 72000000) { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10); + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); + break; + default: + return -EINVAL; + } + } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 74250000) { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10); + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); + break; + default: + return -EINVAL; + } + } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 118800000) { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10); + break; + default: + return -EINVAL; + } + } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 216000000) { + switch (res) { + case 8: + hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10); + break; + case 10: + hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10); + break; + case 12: + hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); + break; + default: + return -EINVAL; + } + } else { + dev_err(hdmi->dev, + "Pixel clock %d - unsupported by HDMI\n", + hdmi->hdmi_data.video_mode.mpixelclock); + return -EINVAL; + } + + hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */ + hdmi_phy_i2c_write(hdmi, 0x0006, 0x17); + /* RESISTANCE TERM 133Ohm Cfg */ + hdmi_phy_i2c_write(hdmi, 0x0005, 0x19); /* TXTERM */ + /* PREEMP Cgf 0.00 */ + hdmi_phy_i2c_write(hdmi, 0x800d, 0x09); /* CKSYMTXCTRL */ + /* TX/CK LVL 10 */ + hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */ + /* REMOVE CLK TERM */ + hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */ + + imx_hdmi_phy_enable_power(hdmi, 1); + + /* toggle TMDS enable */ + imx_hdmi_phy_enable_tmds(hdmi, 0); + imx_hdmi_phy_enable_tmds(hdmi, 1); + + /* gen2 tx power on */ + imx_hdmi_phy_gen2_txpwron(hdmi, 1); + imx_hdmi_phy_gen2_pddq(hdmi, 0); + + /*Wait for PHY PLL lock */ + msec = 5; + do { + val = hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK; + if (!val) + break; + + if (msec == 0) { + dev_err(hdmi->dev, "PHY PLL not locked\n"); + return -ETIMEDOUT; + } + + udelay(1000); + msec--; + } while (1); + + return 0; +} + +static int imx_hdmi_phy_init(struct imx_hdmi *hdmi) +{ + int i, ret; + bool cscon = false; + + /*check csc whether needed activated in HDMI mode */ + cscon = (is_color_space_conversion(hdmi) && + !hdmi->hdmi_data.video_mode.mdvi); + + /* HDMI Phy spec says to do the phy initialization sequence twice */ + for (i = 0; i < 2; i++) { + imx_hdmi_phy_sel_data_en_pol(hdmi, 1); + imx_hdmi_phy_sel_interface_control(hdmi, 0); + imx_hdmi_phy_enable_tmds(hdmi, 0); + imx_hdmi_phy_enable_power(hdmi, 0); + + /* Enable CSC */ + ret = hdmi_phy_configure(hdmi, 0, 8, cscon); + if (ret) + return ret; + } + + hdmi->phy_enabled = true; + return 0; +} + +static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi) +{ + u8 de, val; + + if (hdmi->hdmi_data.video_mode.mdataenablepolarity) + de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH; + else + de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW; + + /* disable rx detect */ + val = hdmi_readb(hdmi, HDMI_A_HDCPCFG0); + val &= HDMI_A_HDCPCFG0_RXDETECT_MASK; + val |= HDMI_A_HDCPCFG0_RXDETECT_DISABLE; + hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG0); + + val = hdmi_readb(hdmi, HDMI_A_VIDPOLCFG); + val &= HDMI_A_VIDPOLCFG_DATAENPOL_MASK; + val |= de; + hdmi_writeb(hdmi, val, HDMI_A_VIDPOLCFG); + + val = hdmi_readb(hdmi, HDMI_A_HDCPCFG1); + val &= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK; + val |= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE; + hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG1); +} + +static void hdmi_config_AVI(struct imx_hdmi *hdmi) +{ + u8 val, pix_fmt, under_scan; + u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry; + bool aspect_16_9; + + aspect_16_9 = false; /* FIXME */ + + /* AVI Data Byte 1 */ + if (hdmi->hdmi_data.enc_out_format == YCBCR444) + pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444; + else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) + pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422; + else + pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB; + + under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA; + + /* + * Active format identification data is present in the AVI InfoFrame. + * Under scan info, no bar data + */ + val = pix_fmt | under_scan | + HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT | + HDMI_FC_AVICONF0_BAR_DATA_NO_DATA; + + hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0); + + /* AVI Data Byte 2 -Set the Aspect Ratio */ + if (aspect_16_9) { + act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9; + coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9; + } else { + act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3; + coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3; + } + + /* Set up colorimetry */ + if (hdmi->hdmi_data.enc_out_format == XVYCC444) { + colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO; + if (hdmi->hdmi_data.colorimetry == ITU601) + ext_colorimetry = + HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; + else /* hdmi->hdmi_data.colorimetry == ITU709 */ + ext_colorimetry = + HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709; + } else if (hdmi->hdmi_data.enc_out_format != RGB) { + if (hdmi->hdmi_data.colorimetry == ITU601) + colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE; + else /* hdmi->hdmi_data.colorimetry == ITU709 */ + colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR; + ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; + } else { /* Carries no data */ + colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA; + ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; + } + + val = colorimetry | coded_ratio | act_ratio; + hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1); + + /* AVI Data Byte 3 */ + val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry | + HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT | + HDMI_FC_AVICONF2_SCALING_NONE; + hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2); + + /* AVI Data Byte 4 */ + hdmi_writeb(hdmi, hdmi->vic, HDMI_FC_AVIVID); + + /* AVI Data Byte 5- set up input and output pixel repetition */ + val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) << + HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET) & + HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK) | + ((hdmi->hdmi_data.video_mode.mpixelrepetitionoutput << + HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET) & + HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK); + hdmi_writeb(hdmi, val, HDMI_FC_PRCONF); + + /* IT Content and quantization range = don't care */ + val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS | + HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED; + hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3); + + /* AVI Data Bytes 6-13 */ + hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB0); + hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB1); + hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB0); + hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB1); + hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB0); + hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB1); + hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB0); + hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1); +} + +static void hdmi_av_composer(struct imx_hdmi *hdmi, + const struct drm_display_mode *mode) +{ + u8 inv_val; + struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; + int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len; + + vmode->mhsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC); + vmode->mvsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC); + vmode->minterlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + vmode->mpixelclock = mode->clock * 1000; + + dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock); + + /* Set up HDMI_FC_INVIDCONF */ + inv_val = (hdmi->hdmi_data.hdcp_enable ? + HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : + HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE); + + inv_val |= (vmode->mvsyncpolarity ? + HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH : + HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW); + + inv_val |= (vmode->mhsyncpolarity ? + HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH : + HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW); + + inv_val |= (vmode->mdataenablepolarity ? + HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH : + HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW); + + if (hdmi->vic == 39) + inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH; + else + inv_val |= (vmode->minterlaced ? + HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH : + HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW); + + inv_val |= (vmode->minterlaced ? + HDMI_FC_INVIDCONF_IN_I_P_INTERLACED : + HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE); + + inv_val |= (vmode->mdvi ? + HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE : + HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE); + + hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF); + + /* Set up horizontal active pixel width */ + hdmi_writeb(hdmi, mode->hdisplay >> 8, HDMI_FC_INHACTV1); + hdmi_writeb(hdmi, mode->hdisplay, HDMI_FC_INHACTV0); + + /* Set up vertical active lines */ + hdmi_writeb(hdmi, mode->vdisplay >> 8, HDMI_FC_INVACTV1); + hdmi_writeb(hdmi, mode->vdisplay, HDMI_FC_INVACTV0); + + /* Set up horizontal blanking pixel region width */ + hblank = mode->htotal - mode->hdisplay; + hdmi_writeb(hdmi, hblank >> 8, HDMI_FC_INHBLANK1); + hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0); + + /* Set up vertical blanking pixel region width */ + vblank = mode->vtotal - mode->vdisplay; + hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK); + + /* Set up HSYNC active edge delay width (in pixel clks) */ + h_de_hs = mode->hsync_start - mode->hdisplay; + hdmi_writeb(hdmi, h_de_hs >> 8, HDMI_FC_HSYNCINDELAY1); + hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0); + + /* Set up VSYNC active edge delay (in lines) */ + v_de_vs = mode->vsync_start - mode->vdisplay; + hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY); + + /* Set up HSYNC active pulse width (in pixel clks) */ + hsync_len = mode->hsync_end - mode->hsync_start; + hdmi_writeb(hdmi, hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1); + hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0); + + /* Set up VSYNC active edge delay (in lines) */ + vsync_len = mode->vsync_end - mode->vsync_start; + hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH); +} + +static void imx_hdmi_phy_disable(struct imx_hdmi *hdmi) +{ + if (!hdmi->phy_enabled) + return; + + imx_hdmi_phy_enable_tmds(hdmi, 0); + imx_hdmi_phy_enable_power(hdmi, 0); + + hdmi->phy_enabled = false; +} + +/* HDMI Initialization Step B.4 */ +static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi) +{ + u8 clkdis; + + /* control period minimum duration */ + hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR); + hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR); + hdmi_writeb(hdmi, 1, HDMI_FC_EXCTRLSPAC); + + /* Set to fill TMDS data channels */ + hdmi_writeb(hdmi, 0x0B, HDMI_FC_CH0PREAM); + hdmi_writeb(hdmi, 0x16, HDMI_FC_CH1PREAM); + hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM); + + /* Enable pixel clock and tmds data path */ + clkdis = 0x7F; + clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE; + hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS); + + clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE; + hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS); + + /* Enable csc path */ + if (is_color_space_conversion(hdmi)) { + clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE; + hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS); + } +} + +static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi) +{ + u8 clkdis; + + clkdis = hdmi_readb(hdmi, HDMI_MC_CLKDIS); + clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE; + hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS); +} + +/* Workaround to clear the overflow condition */ +static void imx_hdmi_clear_overflow(struct imx_hdmi *hdmi) +{ + int count; + u8 val; + + /* TMDS software reset */ + hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ); + + val = hdmi_readb(hdmi, HDMI_FC_INVIDCONF); + if (hdmi->dev_type == IMX6DL_HDMI) { + hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF); + return; + } + + for (count = 0; count < 4; count++) + hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF); +} + +static void hdmi_enable_overflow_interrupts(struct imx_hdmi *hdmi) +{ + hdmi_writeb(hdmi, 0, HDMI_FC_MASK2); + hdmi_writeb(hdmi, 0, HDMI_IH_MUTE_FC_STAT2); +} + +static void hdmi_disable_overflow_interrupts(struct imx_hdmi *hdmi) +{ + hdmi_writeb(hdmi, HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK, + HDMI_IH_MUTE_FC_STAT2); +} + +static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode) +{ + int ret; + + hdmi_disable_overflow_interrupts(hdmi); + + hdmi->vic = drm_match_cea_mode(mode); + + if (!hdmi->vic) { + dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n"); + hdmi->hdmi_data.video_mode.mdvi = true; + } else { + dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic); + hdmi->hdmi_data.video_mode.mdvi = false; + } + + if ((hdmi->vic == 6) || (hdmi->vic == 7) || + (hdmi->vic == 21) || (hdmi->vic == 22) || + (hdmi->vic == 2) || (hdmi->vic == 3) || + (hdmi->vic == 17) || (hdmi->vic == 18)) + hdmi->hdmi_data.colorimetry = ITU601; + else + hdmi->hdmi_data.colorimetry = ITU709; + + if ((hdmi->vic == 10) || (hdmi->vic == 11) || + (hdmi->vic == 12) || (hdmi->vic == 13) || + (hdmi->vic == 14) || (hdmi->vic == 15) || + (hdmi->vic == 25) || (hdmi->vic == 26) || + (hdmi->vic == 27) || (hdmi->vic == 28) || + (hdmi->vic == 29) || (hdmi->vic == 30) || + (hdmi->vic == 35) || (hdmi->vic == 36) || + (hdmi->vic == 37) || (hdmi->vic == 38)) + hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1; + else + hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0; + + hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0; + + /* TODO: Get input format from IPU (via FB driver interface) */ + hdmi->hdmi_data.enc_in_format = RGB; + + hdmi->hdmi_data.enc_out_format = RGB; + + hdmi->hdmi_data.enc_color_depth = 8; + hdmi->hdmi_data.pix_repet_factor = 0; + hdmi->hdmi_data.hdcp_enable = 0; + hdmi->hdmi_data.video_mode.mdataenablepolarity = true; + + /* HDMI Initialization Step B.1 */ + hdmi_av_composer(hdmi, mode); + + /* HDMI Initializateion Step B.2 */ + ret = imx_hdmi_phy_init(hdmi); + if (ret) + return ret; + + /* HDMI Initialization Step B.3 */ + imx_hdmi_enable_video_path(hdmi); + + /* not for DVI mode */ + if (hdmi->hdmi_data.video_mode.mdvi) + dev_dbg(hdmi->dev, "%s DVI mode\n", __func__); + else { + dev_dbg(hdmi->dev, "%s CEA mode\n", __func__); + + /* HDMI Initialization Step E - Configure audio */ + hdmi_clk_regenerator_update_pixel_clock(hdmi); + hdmi_enable_audio_clk(hdmi); + + /* HDMI Initialization Step F - Configure AVI InfoFrame */ + hdmi_config_AVI(hdmi); + } + + hdmi_video_packetize(hdmi); + hdmi_video_csc(hdmi); + hdmi_video_sample(hdmi); + hdmi_tx_hdcp_config(hdmi); + + imx_hdmi_clear_overflow(hdmi); + if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi) + hdmi_enable_overflow_interrupts(hdmi); + + return 0; +} + +/* Wait until we are registered to enable interrupts */ +static int imx_hdmi_fb_registered(struct imx_hdmi *hdmi) +{ + hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL, + HDMI_PHY_I2CM_INT_ADDR); + + hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL | + HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL, + HDMI_PHY_I2CM_CTLINT_ADDR); + + /* enable cable hot plug irq */ + hdmi_writeb(hdmi, (u8)~HDMI_PHY_HPD, HDMI_PHY_MASK0); + + /* Clear Hotplug interrupts */ + hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0); + + /* Unmute interrupts */ + hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0); + + return 0; +} + +static void initialize_hdmi_ih_mutes(struct imx_hdmi *hdmi) +{ + u8 ih_mute; + + /* + * Boot up defaults are: + * HDMI_IH_MUTE = 0x03 (disabled) + * HDMI_IH_MUTE_* = 0x00 (enabled) + * + * Disable top level interrupt bits in HDMI block + */ + ih_mute = hdmi_readb(hdmi, HDMI_IH_MUTE) | + HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT | + HDMI_IH_MUTE_MUTE_ALL_INTERRUPT; + + hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE); + + /* by default mask all interrupts */ + hdmi_writeb(hdmi, 0xff, HDMI_VP_MASK); + hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK0); + hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK1); + hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK2); + hdmi_writeb(hdmi, 0xff, HDMI_PHY_MASK0); + hdmi_writeb(hdmi, 0xff, HDMI_PHY_I2CM_INT_ADDR); + hdmi_writeb(hdmi, 0xff, HDMI_PHY_I2CM_CTLINT_ADDR); + hdmi_writeb(hdmi, 0xff, HDMI_AUD_INT); + hdmi_writeb(hdmi, 0xff, HDMI_AUD_SPDIFINT); + hdmi_writeb(hdmi, 0xff, HDMI_AUD_HBR_MASK); + hdmi_writeb(hdmi, 0xff, HDMI_GP_MASK); + hdmi_writeb(hdmi, 0xff, HDMI_A_APIINTMSK); + hdmi_writeb(hdmi, 0xff, HDMI_CEC_MASK); + hdmi_writeb(hdmi, 0xff, HDMI_I2CM_INT); + hdmi_writeb(hdmi, 0xff, HDMI_I2CM_CTLINT); + + /* Disable interrupts in the IH_MUTE_* registers */ + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT0); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT1); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT2); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_AS_STAT0); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_PHY_STAT0); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_I2CM_STAT0); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_CEC_STAT0); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_VP_STAT0); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_I2CMPHY_STAT0); + hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0); + + /* Enable top level interrupt bits in HDMI block */ + ih_mute &= ~(HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT | + HDMI_IH_MUTE_MUTE_ALL_INTERRUPT); + hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE); +} + +static void imx_hdmi_poweron(struct imx_hdmi *hdmi) +{ + imx_hdmi_setup(hdmi, &hdmi->previous_mode); +} + +static void imx_hdmi_poweroff(struct imx_hdmi *hdmi) +{ + imx_hdmi_phy_disable(hdmi); +} + +static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector + *connector, bool force) +{ + /* FIXME */ + return connector_status_connected; +} + +static void imx_hdmi_connector_destroy(struct drm_connector *connector) +{ +} + +static int imx_hdmi_connector_get_modes(struct drm_connector *connector) +{ + struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi, + connector); + struct edid *edid; + int ret; + + if (!hdmi->ddc) + return 0; + + edid = drm_get_edid(connector, hdmi->ddc); + if (edid) { + dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n", + edid->width_cm, edid->height_cm); + + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + } else { + dev_dbg(hdmi->dev, "failed to get edid\n"); + } + + return 0; +} + +static int imx_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + + return MODE_OK; +} + +static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector + *connector) +{ + struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi, + connector); + + return &hdmi->encoder; +} + +static void imx_hdmi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder); + + imx_hdmi_setup(hdmi, mode); + + /* Store the display mode for plugin/DKMS poweron events */ + memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode)); +} + +static bool imx_hdmi_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void imx_hdmi_encoder_disable(struct drm_encoder *encoder) +{ +} + +static void imx_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder); + + if (mode) + imx_hdmi_poweroff(hdmi); + else + imx_hdmi_poweron(hdmi); +} + +static void imx_hdmi_encoder_prepare(struct drm_encoder *encoder) +{ + struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder); + + imx_hdmi_poweroff(hdmi); + imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_NONE, + V4L2_PIX_FMT_RGB24); +} + +static void imx_hdmi_encoder_commit(struct drm_encoder *encoder) +{ + struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder); + int mux = imx_drm_encoder_get_mux_id(hdmi->imx_drm_encoder, + encoder->crtc); + + imx_hdmi_set_ipu_di_mux(hdmi, mux); + + imx_hdmi_poweron(hdmi); +} + +static void imx_hdmi_encoder_destroy(struct drm_encoder *encoder) +{ + return; +} + +static struct drm_encoder_funcs imx_hdmi_encoder_funcs = { + .destroy = imx_hdmi_encoder_destroy, +}; + +static struct drm_encoder_helper_funcs imx_hdmi_encoder_helper_funcs = { + .dpms = imx_hdmi_encoder_dpms, + .prepare = imx_hdmi_encoder_prepare, + .commit = imx_hdmi_encoder_commit, + .mode_set = imx_hdmi_encoder_mode_set, + .mode_fixup = imx_hdmi_encoder_mode_fixup, + .disable = imx_hdmi_encoder_disable, +}; + +static struct drm_connector_funcs imx_hdmi_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = imx_hdmi_connector_detect, + .destroy = imx_hdmi_connector_destroy, +}; + +static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = { + .get_modes = imx_hdmi_connector_get_modes, + .mode_valid = imx_hdmi_connector_mode_valid, + .best_encoder = imx_hdmi_connector_best_encoder, +}; + +static irqreturn_t imx_hdmi_irq(int irq, void *dev_id) +{ + struct imx_hdmi *hdmi = dev_id; + u8 intr_stat; + u8 phy_int_pol; + u8 val; + + intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); + + phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0); + + if (intr_stat & HDMI_IH_PHY_STAT0_HPD) { + if (phy_int_pol & HDMI_PHY_HPD) { + dev_dbg(hdmi->dev, "EVENT=plugin\n"); + + val = hdmi_readb(hdmi, HDMI_PHY_POL0); + val &= ~HDMI_PHY_HPD; + hdmi_writeb(hdmi, val, HDMI_PHY_POL0); + + imx_hdmi_poweron(hdmi); + } else { + dev_dbg(hdmi->dev, "EVENT=plugout\n"); + + val = hdmi_readb(hdmi, HDMI_PHY_POL0); + val |= HDMI_PHY_HPD; + hdmi_writeb(hdmi, val, HDMI_PHY_POL0); + + imx_hdmi_poweroff(hdmi); + } + } + + hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); + + return IRQ_HANDLED; +} + +static int imx_hdmi_register(struct imx_hdmi *hdmi) +{ + int ret; + + hdmi->connector.funcs = &imx_hdmi_connector_funcs; + hdmi->encoder.funcs = &imx_hdmi_encoder_funcs; + + hdmi->encoder.encoder_type = DRM_MODE_ENCODER_TMDS; + hdmi->connector.connector_type = DRM_MODE_CONNECTOR_HDMIA; + + drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs); + ret = imx_drm_add_encoder(&hdmi->encoder, &hdmi->imx_drm_encoder, + THIS_MODULE); + if (ret) { + dev_err(hdmi->dev, "adding encoder failed: %d\n", ret); + return ret; + } + + drm_connector_helper_add(&hdmi->connector, + &imx_hdmi_connector_helper_funcs); + + ret = imx_drm_add_connector(&hdmi->connector, + &hdmi->imx_drm_connector, THIS_MODULE); + if (ret) { + imx_drm_remove_encoder(hdmi->imx_drm_encoder); + dev_err(hdmi->dev, "adding connector failed: %d\n", ret); + return ret; + } + + hdmi->connector.encoder = &hdmi->encoder; + + drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder); + + return 0; +} + +static struct platform_device_id imx_hdmi_devtype[] = { + { + .name = "imx6q-hdmi", + .driver_data = IMX6Q_HDMI, + }, { + .name = "imx6dl-hdmi", + .driver_data = IMX6DL_HDMI, + }, { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype); + +static const struct of_device_id imx_hdmi_dt_ids[] = { +{ .compatible = "fsl,imx6q-hdmi", .data = &imx_hdmi_devtype[IMX6Q_HDMI], }, +{ .compatible = "fsl,imx6dl-hdmi", .data = &imx_hdmi_devtype[IMX6DL_HDMI], }, +{ /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids); + +static int imx_hdmi_platform_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id = + of_match_device(imx_hdmi_dt_ids, &pdev->dev); + struct device_node *np = pdev->dev.of_node; + struct device_node *ddc_node; + struct imx_hdmi *hdmi; + struct resource *iores; + int ret, irq; + + hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + hdmi->dev = &pdev->dev; + + if (of_id) { + const struct platform_device_id *device_id = of_id->data; + hdmi->dev_type = device_id->driver_data; + } + + ddc_node = of_parse_phandle(np, "ddc", 0); + if (ddc_node) { + hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node); + if (!hdmi->ddc) + dev_dbg(hdmi->dev, "failed to read ddc node\n"); + + of_node_put(ddc_node); + } else { + dev_dbg(hdmi->dev, "no ddc property found\n"); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + ret = devm_request_irq(&pdev->dev, irq, imx_hdmi_irq, 0, + dev_name(&pdev->dev), hdmi); + if (ret) + return ret; + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hdmi->regs = devm_ioremap_resource(&pdev->dev, iores); + if (IS_ERR(hdmi->regs)) + return PTR_ERR(hdmi->regs); + + hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr"); + if (IS_ERR(hdmi->regmap)) + return PTR_ERR(hdmi->regmap); + + hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr"); + if (IS_ERR(hdmi->isfr_clk)) { + ret = PTR_ERR(hdmi->isfr_clk); + dev_err(hdmi->dev, + "Unable to get HDMI isfr clk: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(hdmi->isfr_clk); + if (ret) { + dev_err(hdmi->dev, + "Cannot enable HDMI isfr clock: %d\n", ret); + return ret; + } + + hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb"); + if (IS_ERR(hdmi->iahb_clk)) { + ret = PTR_ERR(hdmi->iahb_clk); + dev_err(hdmi->dev, + "Unable to get HDMI iahb clk: %d\n", ret); + goto err_isfr; + } + + ret = clk_prepare_enable(hdmi->iahb_clk); + if (ret) { + dev_err(hdmi->dev, + "Cannot enable HDMI iahb clock: %d\n", ret); + goto err_isfr; + } + + /* Product and revision IDs */ + dev_info(&pdev->dev, + "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n", + hdmi_readb(hdmi, HDMI_DESIGN_ID), + hdmi_readb(hdmi, HDMI_REVISION_ID), + hdmi_readb(hdmi, HDMI_PRODUCT_ID0), + hdmi_readb(hdmi, HDMI_PRODUCT_ID1)); + + initialize_hdmi_ih_mutes(hdmi); + + /* + * To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator + * N and cts values before enabling phy + */ + hdmi_init_clk_regenerator(hdmi); + + /* + * Configure registers related to HDMI interrupt + * generation before registering IRQ. + */ + hdmi_writeb(hdmi, HDMI_PHY_HPD, HDMI_PHY_POL0); + + /* Clear Hotplug interrupts */ + hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0); + + ret = imx_hdmi_fb_registered(hdmi); + if (ret) + goto err_iahb; + + ret = imx_hdmi_register(hdmi); + if (ret) + goto err_iahb; + + imx_drm_encoder_add_possible_crtcs(hdmi->imx_drm_encoder, np); + + platform_set_drvdata(pdev, hdmi); + + return 0; + +err_iahb: + clk_disable_unprepare(hdmi->iahb_clk); +err_isfr: + clk_disable_unprepare(hdmi->isfr_clk); + + return ret; +} + +static int imx_hdmi_platform_remove(struct platform_device *pdev) +{ + struct imx_hdmi *hdmi = platform_get_drvdata(pdev); + struct drm_connector *connector = &hdmi->connector; + struct drm_encoder *encoder = &hdmi->encoder; + + drm_mode_connector_detach_encoder(connector, encoder); + imx_drm_remove_connector(hdmi->imx_drm_connector); + imx_drm_remove_encoder(hdmi->imx_drm_encoder); + + clk_disable_unprepare(hdmi->iahb_clk); + clk_disable_unprepare(hdmi->isfr_clk); + i2c_put_adapter(hdmi->ddc); + + return 0; +} + +static struct platform_driver imx_hdmi_driver = { + .probe = imx_hdmi_platform_probe, + .remove = imx_hdmi_platform_remove, + .driver = { + .name = "imx-hdmi", + .owner = THIS_MODULE, + .of_match_table = imx_hdmi_dt_ids, + }, +}; + +module_platform_driver(imx_hdmi_driver); + +MODULE_AUTHOR("Sascha Hauer "); +MODULE_DESCRIPTION("i.MX6 HDMI transmitter driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:imx-hdmi"); diff --git a/drivers/staging/imx-drm/imx-hdmi.h b/drivers/staging/imx-drm/imx-hdmi.h new file mode 100644 index 00000000000000..39b677689db615 --- /dev/null +++ b/drivers/staging/imx-drm/imx-hdmi.h @@ -0,0 +1,1032 @@ +/* + * Copyright (C) 2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __IMX_HDMI_H__ +#define __IMX_HDMI_H__ + +/* Identification Registers */ +#define HDMI_DESIGN_ID 0x0000 +#define HDMI_REVISION_ID 0x0001 +#define HDMI_PRODUCT_ID0 0x0002 +#define HDMI_PRODUCT_ID1 0x0003 +#define HDMI_CONFIG0_ID 0x0004 +#define HDMI_CONFIG1_ID 0x0005 +#define HDMI_CONFIG2_ID 0x0006 +#define HDMI_CONFIG3_ID 0x0007 + +/* Interrupt Registers */ +#define HDMI_IH_FC_STAT0 0x0100 +#define HDMI_IH_FC_STAT1 0x0101 +#define HDMI_IH_FC_STAT2 0x0102 +#define HDMI_IH_AS_STAT0 0x0103 +#define HDMI_IH_PHY_STAT0 0x0104 +#define HDMI_IH_I2CM_STAT0 0x0105 +#define HDMI_IH_CEC_STAT0 0x0106 +#define HDMI_IH_VP_STAT0 0x0107 +#define HDMI_IH_I2CMPHY_STAT0 0x0108 +#define HDMI_IH_AHBDMAAUD_STAT0 0x0109 + +#define HDMI_IH_MUTE_FC_STAT0 0x0180 +#define HDMI_IH_MUTE_FC_STAT1 0x0181 +#define HDMI_IH_MUTE_FC_STAT2 0x0182 +#define HDMI_IH_MUTE_AS_STAT0 0x0183 +#define HDMI_IH_MUTE_PHY_STAT0 0x0184 +#define HDMI_IH_MUTE_I2CM_STAT0 0x0185 +#define HDMI_IH_MUTE_CEC_STAT0 0x0186 +#define HDMI_IH_MUTE_VP_STAT0 0x0187 +#define HDMI_IH_MUTE_I2CMPHY_STAT0 0x0188 +#define HDMI_IH_MUTE_AHBDMAAUD_STAT0 0x0189 +#define HDMI_IH_MUTE 0x01FF + +/* Video Sample Registers */ +#define HDMI_TX_INVID0 0x0200 +#define HDMI_TX_INSTUFFING 0x0201 +#define HDMI_TX_GYDATA0 0x0202 +#define HDMI_TX_GYDATA1 0x0203 +#define HDMI_TX_RCRDATA0 0x0204 +#define HDMI_TX_RCRDATA1 0x0205 +#define HDMI_TX_BCBDATA0 0x0206 +#define HDMI_TX_BCBDATA1 0x0207 + +/* Video Packetizer Registers */ +#define HDMI_VP_STATUS 0x0800 +#define HDMI_VP_PR_CD 0x0801 +#define HDMI_VP_STUFF 0x0802 +#define HDMI_VP_REMAP 0x0803 +#define HDMI_VP_CONF 0x0804 +#define HDMI_VP_STAT 0x0805 +#define HDMI_VP_INT 0x0806 +#define HDMI_VP_MASK 0x0807 +#define HDMI_VP_POL 0x0808 + +/* Frame Composer Registers */ +#define HDMI_FC_INVIDCONF 0x1000 +#define HDMI_FC_INHACTV0 0x1001 +#define HDMI_FC_INHACTV1 0x1002 +#define HDMI_FC_INHBLANK0 0x1003 +#define HDMI_FC_INHBLANK1 0x1004 +#define HDMI_FC_INVACTV0 0x1005 +#define HDMI_FC_INVACTV1 0x1006 +#define HDMI_FC_INVBLANK 0x1007 +#define HDMI_FC_HSYNCINDELAY0 0x1008 +#define HDMI_FC_HSYNCINDELAY1 0x1009 +#define HDMI_FC_HSYNCINWIDTH0 0x100A +#define HDMI_FC_HSYNCINWIDTH1 0x100B +#define HDMI_FC_VSYNCINDELAY 0x100C +#define HDMI_FC_VSYNCINWIDTH 0x100D +#define HDMI_FC_INFREQ0 0x100E +#define HDMI_FC_INFREQ1 0x100F +#define HDMI_FC_INFREQ2 0x1010 +#define HDMI_FC_CTRLDUR 0x1011 +#define HDMI_FC_EXCTRLDUR 0x1012 +#define HDMI_FC_EXCTRLSPAC 0x1013 +#define HDMI_FC_CH0PREAM 0x1014 +#define HDMI_FC_CH1PREAM 0x1015 +#define HDMI_FC_CH2PREAM 0x1016 +#define HDMI_FC_AVICONF3 0x1017 +#define HDMI_FC_GCP 0x1018 +#define HDMI_FC_AVICONF0 0x1019 +#define HDMI_FC_AVICONF1 0x101A +#define HDMI_FC_AVICONF2 0x101B +#define HDMI_FC_AVIVID 0x101C +#define HDMI_FC_AVIETB0 0x101D +#define HDMI_FC_AVIETB1 0x101E +#define HDMI_FC_AVISBB0 0x101F +#define HDMI_FC_AVISBB1 0x1020 +#define HDMI_FC_AVIELB0 0x1021 +#define HDMI_FC_AVIELB1 0x1022 +#define HDMI_FC_AVISRB0 0x1023 +#define HDMI_FC_AVISRB1 0x1024 +#define HDMI_FC_AUDICONF0 0x1025 +#define HDMI_FC_AUDICONF1 0x1026 +#define HDMI_FC_AUDICONF2 0x1027 +#define HDMI_FC_AUDICONF3 0x1028 +#define HDMI_FC_VSDIEEEID0 0x1029 +#define HDMI_FC_VSDSIZE 0x102A +#define HDMI_FC_VSDIEEEID1 0x1030 +#define HDMI_FC_VSDIEEEID2 0x1031 +#define HDMI_FC_VSDPAYLOAD0 0x1032 +#define HDMI_FC_VSDPAYLOAD1 0x1033 +#define HDMI_FC_VSDPAYLOAD2 0x1034 +#define HDMI_FC_VSDPAYLOAD3 0x1035 +#define HDMI_FC_VSDPAYLOAD4 0x1036 +#define HDMI_FC_VSDPAYLOAD5 0x1037 +#define HDMI_FC_VSDPAYLOAD6 0x1038 +#define HDMI_FC_VSDPAYLOAD7 0x1039 +#define HDMI_FC_VSDPAYLOAD8 0x103A +#define HDMI_FC_VSDPAYLOAD9 0x103B +#define HDMI_FC_VSDPAYLOAD10 0x103C +#define HDMI_FC_VSDPAYLOAD11 0x103D +#define HDMI_FC_VSDPAYLOAD12 0x103E +#define HDMI_FC_VSDPAYLOAD13 0x103F +#define HDMI_FC_VSDPAYLOAD14 0x1040 +#define HDMI_FC_VSDPAYLOAD15 0x1041 +#define HDMI_FC_VSDPAYLOAD16 0x1042 +#define HDMI_FC_VSDPAYLOAD17 0x1043 +#define HDMI_FC_VSDPAYLOAD18 0x1044 +#define HDMI_FC_VSDPAYLOAD19 0x1045 +#define HDMI_FC_VSDPAYLOAD20 0x1046 +#define HDMI_FC_VSDPAYLOAD21 0x1047 +#define HDMI_FC_VSDPAYLOAD22 0x1048 +#define HDMI_FC_VSDPAYLOAD23 0x1049 +#define HDMI_FC_SPDVENDORNAME0 0x104A +#define HDMI_FC_SPDVENDORNAME1 0x104B +#define HDMI_FC_SPDVENDORNAME2 0x104C +#define HDMI_FC_SPDVENDORNAME3 0x104D +#define HDMI_FC_SPDVENDORNAME4 0x104E +#define HDMI_FC_SPDVENDORNAME5 0x104F +#define HDMI_FC_SPDVENDORNAME6 0x1050 +#define HDMI_FC_SPDVENDORNAME7 0x1051 +#define HDMI_FC_SDPPRODUCTNAME0 0x1052 +#define HDMI_FC_SDPPRODUCTNAME1 0x1053 +#define HDMI_FC_SDPPRODUCTNAME2 0x1054 +#define HDMI_FC_SDPPRODUCTNAME3 0x1055 +#define HDMI_FC_SDPPRODUCTNAME4 0x1056 +#define HDMI_FC_SDPPRODUCTNAME5 0x1057 +#define HDMI_FC_SDPPRODUCTNAME6 0x1058 +#define HDMI_FC_SDPPRODUCTNAME7 0x1059 +#define HDMI_FC_SDPPRODUCTNAME8 0x105A +#define HDMI_FC_SDPPRODUCTNAME9 0x105B +#define HDMI_FC_SDPPRODUCTNAME10 0x105C +#define HDMI_FC_SDPPRODUCTNAME11 0x105D +#define HDMI_FC_SDPPRODUCTNAME12 0x105E +#define HDMI_FC_SDPPRODUCTNAME13 0x105F +#define HDMI_FC_SDPPRODUCTNAME14 0x1060 +#define HDMI_FC_SPDPRODUCTNAME15 0x1061 +#define HDMI_FC_SPDDEVICEINF 0x1062 +#define HDMI_FC_AUDSCONF 0x1063 +#define HDMI_FC_AUDSSTAT 0x1064 +#define HDMI_FC_DATACH0FILL 0x1070 +#define HDMI_FC_DATACH1FILL 0x1071 +#define HDMI_FC_DATACH2FILL 0x1072 +#define HDMI_FC_CTRLQHIGH 0x1073 +#define HDMI_FC_CTRLQLOW 0x1074 +#define HDMI_FC_ACP0 0x1075 +#define HDMI_FC_ACP28 0x1076 +#define HDMI_FC_ACP27 0x1077 +#define HDMI_FC_ACP26 0x1078 +#define HDMI_FC_ACP25 0x1079 +#define HDMI_FC_ACP24 0x107A +#define HDMI_FC_ACP23 0x107B +#define HDMI_FC_ACP22 0x107C +#define HDMI_FC_ACP21 0x107D +#define HDMI_FC_ACP20 0x107E +#define HDMI_FC_ACP19 0x107F +#define HDMI_FC_ACP18 0x1080 +#define HDMI_FC_ACP17 0x1081 +#define HDMI_FC_ACP16 0x1082 +#define HDMI_FC_ACP15 0x1083 +#define HDMI_FC_ACP14 0x1084 +#define HDMI_FC_ACP13 0x1085 +#define HDMI_FC_ACP12 0x1086 +#define HDMI_FC_ACP11 0x1087 +#define HDMI_FC_ACP10 0x1088 +#define HDMI_FC_ACP9 0x1089 +#define HDMI_FC_ACP8 0x108A +#define HDMI_FC_ACP7 0x108B +#define HDMI_FC_ACP6 0x108C +#define HDMI_FC_ACP5 0x108D +#define HDMI_FC_ACP4 0x108E +#define HDMI_FC_ACP3 0x108F +#define HDMI_FC_ACP2 0x1090 +#define HDMI_FC_ACP1 0x1091 +#define HDMI_FC_ISCR1_0 0x1092 +#define HDMI_FC_ISCR1_16 0x1093 +#define HDMI_FC_ISCR1_15 0x1094 +#define HDMI_FC_ISCR1_14 0x1095 +#define HDMI_FC_ISCR1_13 0x1096 +#define HDMI_FC_ISCR1_12 0x1097 +#define HDMI_FC_ISCR1_11 0x1098 +#define HDMI_FC_ISCR1_10 0x1099 +#define HDMI_FC_ISCR1_9 0x109A +#define HDMI_FC_ISCR1_8 0x109B +#define HDMI_FC_ISCR1_7 0x109C +#define HDMI_FC_ISCR1_6 0x109D +#define HDMI_FC_ISCR1_5 0x109E +#define HDMI_FC_ISCR1_4 0x109F +#define HDMI_FC_ISCR1_3 0x10A0 +#define HDMI_FC_ISCR1_2 0x10A1 +#define HDMI_FC_ISCR1_1 0x10A2 +#define HDMI_FC_ISCR2_15 0x10A3 +#define HDMI_FC_ISCR2_14 0x10A4 +#define HDMI_FC_ISCR2_13 0x10A5 +#define HDMI_FC_ISCR2_12 0x10A6 +#define HDMI_FC_ISCR2_11 0x10A7 +#define HDMI_FC_ISCR2_10 0x10A8 +#define HDMI_FC_ISCR2_9 0x10A9 +#define HDMI_FC_ISCR2_8 0x10AA +#define HDMI_FC_ISCR2_7 0x10AB +#define HDMI_FC_ISCR2_6 0x10AC +#define HDMI_FC_ISCR2_5 0x10AD +#define HDMI_FC_ISCR2_4 0x10AE +#define HDMI_FC_ISCR2_3 0x10AF +#define HDMI_FC_ISCR2_2 0x10B0 +#define HDMI_FC_ISCR2_1 0x10B1 +#define HDMI_FC_ISCR2_0 0x10B2 +#define HDMI_FC_DATAUTO0 0x10B3 +#define HDMI_FC_DATAUTO1 0x10B4 +#define HDMI_FC_DATAUTO2 0x10B5 +#define HDMI_FC_DATMAN 0x10B6 +#define HDMI_FC_DATAUTO3 0x10B7 +#define HDMI_FC_RDRB0 0x10B8 +#define HDMI_FC_RDRB1 0x10B9 +#define HDMI_FC_RDRB2 0x10BA +#define HDMI_FC_RDRB3 0x10BB +#define HDMI_FC_RDRB4 0x10BC +#define HDMI_FC_RDRB5 0x10BD +#define HDMI_FC_RDRB6 0x10BE +#define HDMI_FC_RDRB7 0x10BF +#define HDMI_FC_STAT0 0x10D0 +#define HDMI_FC_INT0 0x10D1 +#define HDMI_FC_MASK0 0x10D2 +#define HDMI_FC_POL0 0x10D3 +#define HDMI_FC_STAT1 0x10D4 +#define HDMI_FC_INT1 0x10D5 +#define HDMI_FC_MASK1 0x10D6 +#define HDMI_FC_POL1 0x10D7 +#define HDMI_FC_STAT2 0x10D8 +#define HDMI_FC_INT2 0x10D9 +#define HDMI_FC_MASK2 0x10DA +#define HDMI_FC_POL2 0x10DB +#define HDMI_FC_PRCONF 0x10E0 + +#define HDMI_FC_GMD_STAT 0x1100 +#define HDMI_FC_GMD_EN 0x1101 +#define HDMI_FC_GMD_UP 0x1102 +#define HDMI_FC_GMD_CONF 0x1103 +#define HDMI_FC_GMD_HB 0x1104 +#define HDMI_FC_GMD_PB0 0x1105 +#define HDMI_FC_GMD_PB1 0x1106 +#define HDMI_FC_GMD_PB2 0x1107 +#define HDMI_FC_GMD_PB3 0x1108 +#define HDMI_FC_GMD_PB4 0x1109 +#define HDMI_FC_GMD_PB5 0x110A +#define HDMI_FC_GMD_PB6 0x110B +#define HDMI_FC_GMD_PB7 0x110C +#define HDMI_FC_GMD_PB8 0x110D +#define HDMI_FC_GMD_PB9 0x110E +#define HDMI_FC_GMD_PB10 0x110F +#define HDMI_FC_GMD_PB11 0x1110 +#define HDMI_FC_GMD_PB12 0x1111 +#define HDMI_FC_GMD_PB13 0x1112 +#define HDMI_FC_GMD_PB14 0x1113 +#define HDMI_FC_GMD_PB15 0x1114 +#define HDMI_FC_GMD_PB16 0x1115 +#define HDMI_FC_GMD_PB17 0x1116 +#define HDMI_FC_GMD_PB18 0x1117 +#define HDMI_FC_GMD_PB19 0x1118 +#define HDMI_FC_GMD_PB20 0x1119 +#define HDMI_FC_GMD_PB21 0x111A +#define HDMI_FC_GMD_PB22 0x111B +#define HDMI_FC_GMD_PB23 0x111C +#define HDMI_FC_GMD_PB24 0x111D +#define HDMI_FC_GMD_PB25 0x111E +#define HDMI_FC_GMD_PB26 0x111F +#define HDMI_FC_GMD_PB27 0x1120 + +#define HDMI_FC_DBGFORCE 0x1200 +#define HDMI_FC_DBGAUD0CH0 0x1201 +#define HDMI_FC_DBGAUD1CH0 0x1202 +#define HDMI_FC_DBGAUD2CH0 0x1203 +#define HDMI_FC_DBGAUD0CH1 0x1204 +#define HDMI_FC_DBGAUD1CH1 0x1205 +#define HDMI_FC_DBGAUD2CH1 0x1206 +#define HDMI_FC_DBGAUD0CH2 0x1207 +#define HDMI_FC_DBGAUD1CH2 0x1208 +#define HDMI_FC_DBGAUD2CH2 0x1209 +#define HDMI_FC_DBGAUD0CH3 0x120A +#define HDMI_FC_DBGAUD1CH3 0x120B +#define HDMI_FC_DBGAUD2CH3 0x120C +#define HDMI_FC_DBGAUD0CH4 0x120D +#define HDMI_FC_DBGAUD1CH4 0x120E +#define HDMI_FC_DBGAUD2CH4 0x120F +#define HDMI_FC_DBGAUD0CH5 0x1210 +#define HDMI_FC_DBGAUD1CH5 0x1211 +#define HDMI_FC_DBGAUD2CH5 0x1212 +#define HDMI_FC_DBGAUD0CH6 0x1213 +#define HDMI_FC_DBGAUD1CH6 0x1214 +#define HDMI_FC_DBGAUD2CH6 0x1215 +#define HDMI_FC_DBGAUD0CH7 0x1216 +#define HDMI_FC_DBGAUD1CH7 0x1217 +#define HDMI_FC_DBGAUD2CH7 0x1218 +#define HDMI_FC_DBGTMDS0 0x1219 +#define HDMI_FC_DBGTMDS1 0x121A +#define HDMI_FC_DBGTMDS2 0x121B + +/* HDMI Source PHY Registers */ +#define HDMI_PHY_CONF0 0x3000 +#define HDMI_PHY_TST0 0x3001 +#define HDMI_PHY_TST1 0x3002 +#define HDMI_PHY_TST2 0x3003 +#define HDMI_PHY_STAT0 0x3004 +#define HDMI_PHY_INT0 0x3005 +#define HDMI_PHY_MASK0 0x3006 +#define HDMI_PHY_POL0 0x3007 + +/* HDMI Master PHY Registers */ +#define HDMI_PHY_I2CM_SLAVE_ADDR 0x3020 +#define HDMI_PHY_I2CM_ADDRESS_ADDR 0x3021 +#define HDMI_PHY_I2CM_DATAO_1_ADDR 0x3022 +#define HDMI_PHY_I2CM_DATAO_0_ADDR 0x3023 +#define HDMI_PHY_I2CM_DATAI_1_ADDR 0x3024 +#define HDMI_PHY_I2CM_DATAI_0_ADDR 0x3025 +#define HDMI_PHY_I2CM_OPERATION_ADDR 0x3026 +#define HDMI_PHY_I2CM_INT_ADDR 0x3027 +#define HDMI_PHY_I2CM_CTLINT_ADDR 0x3028 +#define HDMI_PHY_I2CM_DIV_ADDR 0x3029 +#define HDMI_PHY_I2CM_SOFTRSTZ_ADDR 0x302a +#define HDMI_PHY_I2CM_SS_SCL_HCNT_1_ADDR 0x302b +#define HDMI_PHY_I2CM_SS_SCL_HCNT_0_ADDR 0x302c +#define HDMI_PHY_I2CM_SS_SCL_LCNT_1_ADDR 0x302d +#define HDMI_PHY_I2CM_SS_SCL_LCNT_0_ADDR 0x302e +#define HDMI_PHY_I2CM_FS_SCL_HCNT_1_ADDR 0x302f +#define HDMI_PHY_I2CM_FS_SCL_HCNT_0_ADDR 0x3030 +#define HDMI_PHY_I2CM_FS_SCL_LCNT_1_ADDR 0x3031 +#define HDMI_PHY_I2CM_FS_SCL_LCNT_0_ADDR 0x3032 + +/* Audio Sampler Registers */ +#define HDMI_AUD_CONF0 0x3100 +#define HDMI_AUD_CONF1 0x3101 +#define HDMI_AUD_INT 0x3102 +#define HDMI_AUD_CONF2 0x3103 +#define HDMI_AUD_N1 0x3200 +#define HDMI_AUD_N2 0x3201 +#define HDMI_AUD_N3 0x3202 +#define HDMI_AUD_CTS1 0x3203 +#define HDMI_AUD_CTS2 0x3204 +#define HDMI_AUD_CTS3 0x3205 +#define HDMI_AUD_INPUTCLKFS 0x3206 +#define HDMI_AUD_SPDIFINT 0x3302 +#define HDMI_AUD_CONF0_HBR 0x3400 +#define HDMI_AUD_HBR_STATUS 0x3401 +#define HDMI_AUD_HBR_INT 0x3402 +#define HDMI_AUD_HBR_POL 0x3403 +#define HDMI_AUD_HBR_MASK 0x3404 + +/* + * Generic Parallel Audio Interface Registers + * Not used as GPAUD interface is not enabled in hw + */ +#define HDMI_GP_CONF0 0x3500 +#define HDMI_GP_CONF1 0x3501 +#define HDMI_GP_CONF2 0x3502 +#define HDMI_GP_STAT 0x3503 +#define HDMI_GP_INT 0x3504 +#define HDMI_GP_MASK 0x3505 +#define HDMI_GP_POL 0x3506 + +/* Audio DMA Registers */ +#define HDMI_AHB_DMA_CONF0 0x3600 +#define HDMI_AHB_DMA_START 0x3601 +#define HDMI_AHB_DMA_STOP 0x3602 +#define HDMI_AHB_DMA_THRSLD 0x3603 +#define HDMI_AHB_DMA_STRADDR0 0x3604 +#define HDMI_AHB_DMA_STRADDR1 0x3605 +#define HDMI_AHB_DMA_STRADDR2 0x3606 +#define HDMI_AHB_DMA_STRADDR3 0x3607 +#define HDMI_AHB_DMA_STPADDR0 0x3608 +#define HDMI_AHB_DMA_STPADDR1 0x3609 +#define HDMI_AHB_DMA_STPADDR2 0x360a +#define HDMI_AHB_DMA_STPADDR3 0x360b +#define HDMI_AHB_DMA_BSTADDR0 0x360c +#define HDMI_AHB_DMA_BSTADDR1 0x360d +#define HDMI_AHB_DMA_BSTADDR2 0x360e +#define HDMI_AHB_DMA_BSTADDR3 0x360f +#define HDMI_AHB_DMA_MBLENGTH0 0x3610 +#define HDMI_AHB_DMA_MBLENGTH1 0x3611 +#define HDMI_AHB_DMA_STAT 0x3612 +#define HDMI_AHB_DMA_INT 0x3613 +#define HDMI_AHB_DMA_MASK 0x3614 +#define HDMI_AHB_DMA_POL 0x3615 +#define HDMI_AHB_DMA_CONF1 0x3616 +#define HDMI_AHB_DMA_BUFFSTAT 0x3617 +#define HDMI_AHB_DMA_BUFFINT 0x3618 +#define HDMI_AHB_DMA_BUFFMASK 0x3619 +#define HDMI_AHB_DMA_BUFFPOL 0x361a + +/* Main Controller Registers */ +#define HDMI_MC_SFRDIV 0x4000 +#define HDMI_MC_CLKDIS 0x4001 +#define HDMI_MC_SWRSTZ 0x4002 +#define HDMI_MC_OPCTRL 0x4003 +#define HDMI_MC_FLOWCTRL 0x4004 +#define HDMI_MC_PHYRSTZ 0x4005 +#define HDMI_MC_LOCKONCLOCK 0x4006 +#define HDMI_MC_HEACPHY_RST 0x4007 + +/* Color Space Converter Registers */ +#define HDMI_CSC_CFG 0x4100 +#define HDMI_CSC_SCALE 0x4101 +#define HDMI_CSC_COEF_A1_MSB 0x4102 +#define HDMI_CSC_COEF_A1_LSB 0x4103 +#define HDMI_CSC_COEF_A2_MSB 0x4104 +#define HDMI_CSC_COEF_A2_LSB 0x4105 +#define HDMI_CSC_COEF_A3_MSB 0x4106 +#define HDMI_CSC_COEF_A3_LSB 0x4107 +#define HDMI_CSC_COEF_A4_MSB 0x4108 +#define HDMI_CSC_COEF_A4_LSB 0x4109 +#define HDMI_CSC_COEF_B1_MSB 0x410A +#define HDMI_CSC_COEF_B1_LSB 0x410B +#define HDMI_CSC_COEF_B2_MSB 0x410C +#define HDMI_CSC_COEF_B2_LSB 0x410D +#define HDMI_CSC_COEF_B3_MSB 0x410E +#define HDMI_CSC_COEF_B3_LSB 0x410F +#define HDMI_CSC_COEF_B4_MSB 0x4110 +#define HDMI_CSC_COEF_B4_LSB 0x4111 +#define HDMI_CSC_COEF_C1_MSB 0x4112 +#define HDMI_CSC_COEF_C1_LSB 0x4113 +#define HDMI_CSC_COEF_C2_MSB 0x4114 +#define HDMI_CSC_COEF_C2_LSB 0x4115 +#define HDMI_CSC_COEF_C3_MSB 0x4116 +#define HDMI_CSC_COEF_C3_LSB 0x4117 +#define HDMI_CSC_COEF_C4_MSB 0x4118 +#define HDMI_CSC_COEF_C4_LSB 0x4119 + +/* HDCP Encryption Engine Registers */ +#define HDMI_A_HDCPCFG0 0x5000 +#define HDMI_A_HDCPCFG1 0x5001 +#define HDMI_A_HDCPOBS0 0x5002 +#define HDMI_A_HDCPOBS1 0x5003 +#define HDMI_A_HDCPOBS2 0x5004 +#define HDMI_A_HDCPOBS3 0x5005 +#define HDMI_A_APIINTCLR 0x5006 +#define HDMI_A_APIINTSTAT 0x5007 +#define HDMI_A_APIINTMSK 0x5008 +#define HDMI_A_VIDPOLCFG 0x5009 +#define HDMI_A_OESSWCFG 0x500A +#define HDMI_A_TIMER1SETUP0 0x500B +#define HDMI_A_TIMER1SETUP1 0x500C +#define HDMI_A_TIMER2SETUP0 0x500D +#define HDMI_A_TIMER2SETUP1 0x500E +#define HDMI_A_100MSCFG 0x500F +#define HDMI_A_2SCFG0 0x5010 +#define HDMI_A_2SCFG1 0x5011 +#define HDMI_A_5SCFG0 0x5012 +#define HDMI_A_5SCFG1 0x5013 +#define HDMI_A_SRMVERLSB 0x5014 +#define HDMI_A_SRMVERMSB 0x5015 +#define HDMI_A_SRMCTRL 0x5016 +#define HDMI_A_SFRSETUP 0x5017 +#define HDMI_A_I2CHSETUP 0x5018 +#define HDMI_A_INTSETUP 0x5019 +#define HDMI_A_PRESETUP 0x501A +#define HDMI_A_SRM_BASE 0x5020 + +/* CEC Engine Registers */ +#define HDMI_CEC_CTRL 0x7D00 +#define HDMI_CEC_STAT 0x7D01 +#define HDMI_CEC_MASK 0x7D02 +#define HDMI_CEC_POLARITY 0x7D03 +#define HDMI_CEC_INT 0x7D04 +#define HDMI_CEC_ADDR_L 0x7D05 +#define HDMI_CEC_ADDR_H 0x7D06 +#define HDMI_CEC_TX_CNT 0x7D07 +#define HDMI_CEC_RX_CNT 0x7D08 +#define HDMI_CEC_TX_DATA0 0x7D10 +#define HDMI_CEC_TX_DATA1 0x7D11 +#define HDMI_CEC_TX_DATA2 0x7D12 +#define HDMI_CEC_TX_DATA3 0x7D13 +#define HDMI_CEC_TX_DATA4 0x7D14 +#define HDMI_CEC_TX_DATA5 0x7D15 +#define HDMI_CEC_TX_DATA6 0x7D16 +#define HDMI_CEC_TX_DATA7 0x7D17 +#define HDMI_CEC_TX_DATA8 0x7D18 +#define HDMI_CEC_TX_DATA9 0x7D19 +#define HDMI_CEC_TX_DATA10 0x7D1a +#define HDMI_CEC_TX_DATA11 0x7D1b +#define HDMI_CEC_TX_DATA12 0x7D1c +#define HDMI_CEC_TX_DATA13 0x7D1d +#define HDMI_CEC_TX_DATA14 0x7D1e +#define HDMI_CEC_TX_DATA15 0x7D1f +#define HDMI_CEC_RX_DATA0 0x7D20 +#define HDMI_CEC_RX_DATA1 0x7D21 +#define HDMI_CEC_RX_DATA2 0x7D22 +#define HDMI_CEC_RX_DATA3 0x7D23 +#define HDMI_CEC_RX_DATA4 0x7D24 +#define HDMI_CEC_RX_DATA5 0x7D25 +#define HDMI_CEC_RX_DATA6 0x7D26 +#define HDMI_CEC_RX_DATA7 0x7D27 +#define HDMI_CEC_RX_DATA8 0x7D28 +#define HDMI_CEC_RX_DATA9 0x7D29 +#define HDMI_CEC_RX_DATA10 0x7D2a +#define HDMI_CEC_RX_DATA11 0x7D2b +#define HDMI_CEC_RX_DATA12 0x7D2c +#define HDMI_CEC_RX_DATA13 0x7D2d +#define HDMI_CEC_RX_DATA14 0x7D2e +#define HDMI_CEC_RX_DATA15 0x7D2f +#define HDMI_CEC_LOCK 0x7D30 +#define HDMI_CEC_WKUPCTRL 0x7D31 + +/* I2C Master Registers (E-DDC) */ +#define HDMI_I2CM_SLAVE 0x7E00 +#define HDMI_I2CMESS 0x7E01 +#define HDMI_I2CM_DATAO 0x7E02 +#define HDMI_I2CM_DATAI 0x7E03 +#define HDMI_I2CM_OPERATION 0x7E04 +#define HDMI_I2CM_INT 0x7E05 +#define HDMI_I2CM_CTLINT 0x7E06 +#define HDMI_I2CM_DIV 0x7E07 +#define HDMI_I2CM_SEGADDR 0x7E08 +#define HDMI_I2CM_SOFTRSTZ 0x7E09 +#define HDMI_I2CM_SEGPTR 0x7E0A +#define HDMI_I2CM_SS_SCL_HCNT_1_ADDR 0x7E0B +#define HDMI_I2CM_SS_SCL_HCNT_0_ADDR 0x7E0C +#define HDMI_I2CM_SS_SCL_LCNT_1_ADDR 0x7E0D +#define HDMI_I2CM_SS_SCL_LCNT_0_ADDR 0x7E0E +#define HDMI_I2CM_FS_SCL_HCNT_1_ADDR 0x7E0F +#define HDMI_I2CM_FS_SCL_HCNT_0_ADDR 0x7E10 +#define HDMI_I2CM_FS_SCL_LCNT_1_ADDR 0x7E11 +#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12 + +enum { +/* IH_FC_INT2 field values */ + HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03, + HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02, + HDMI_IH_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01, + +/* IH_FC_STAT2 field values */ + HDMI_IH_FC_STAT2_OVERFLOW_MASK = 0x03, + HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02, + HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01, + +/* IH_PHY_STAT0 field values */ + HDMI_IH_PHY_STAT0_RX_SENSE3 = 0x20, + HDMI_IH_PHY_STAT0_RX_SENSE2 = 0x10, + HDMI_IH_PHY_STAT0_RX_SENSE1 = 0x8, + HDMI_IH_PHY_STAT0_RX_SENSE0 = 0x4, + HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2, + HDMI_IH_PHY_STAT0_HPD = 0x1, + +/* IH_MUTE_I2CMPHY_STAT0 field values */ + HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2, + HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1, + +/* IH_AHBDMAAUD_STAT0 field values */ + HDMI_IH_AHBDMAAUD_STAT0_ERROR = 0x20, + HDMI_IH_AHBDMAAUD_STAT0_LOST = 0x10, + HDMI_IH_AHBDMAAUD_STAT0_RETRY = 0x08, + HDMI_IH_AHBDMAAUD_STAT0_DONE = 0x04, + HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = 0x02, + HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01, + +/* IH_MUTE_FC_STAT2 field values */ + HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK = 0x03, + HDMI_IH_MUTE_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02, + HDMI_IH_MUTE_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01, + +/* IH_MUTE_AHBDMAAUD_STAT0 field values */ + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = 0x20, + HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = 0x10, + HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = 0x08, + HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = 0x04, + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = 0x02, + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01, + +/* IH_MUTE field values */ + HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT = 0x2, + HDMI_IH_MUTE_MUTE_ALL_INTERRUPT = 0x1, + +/* TX_INVID0 field values */ + HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_MASK = 0x80, + HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_ENABLE = 0x80, + HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE = 0x00, + HDMI_TX_INVID0_VIDEO_MAPPING_MASK = 0x1F, + HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET = 0, + +/* TX_INSTUFFING field values */ + HDMI_TX_INSTUFFING_BDBDATA_STUFFING_MASK = 0x4, + HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE = 0x4, + HDMI_TX_INSTUFFING_BDBDATA_STUFFING_DISABLE = 0x0, + HDMI_TX_INSTUFFING_RCRDATA_STUFFING_MASK = 0x2, + HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE = 0x2, + HDMI_TX_INSTUFFING_RCRDATA_STUFFING_DISABLE = 0x0, + HDMI_TX_INSTUFFING_GYDATA_STUFFING_MASK = 0x1, + HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE = 0x1, + HDMI_TX_INSTUFFING_GYDATA_STUFFING_DISABLE = 0x0, + +/* VP_PR_CD field values */ + HDMI_VP_PR_CD_COLOR_DEPTH_MASK = 0xF0, + HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET = 4, + HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK = 0x0F, + HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET = 0, + +/* VP_STUFF field values */ + HDMI_VP_STUFF_IDEFAULT_PHASE_MASK = 0x20, + HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET = 5, + HDMI_VP_STUFF_IFIX_PP_TO_LAST_MASK = 0x10, + HDMI_VP_STUFF_IFIX_PP_TO_LAST_OFFSET = 4, + HDMI_VP_STUFF_ICX_GOTO_P0_ST_MASK = 0x8, + HDMI_VP_STUFF_ICX_GOTO_P0_ST_OFFSET = 3, + HDMI_VP_STUFF_YCC422_STUFFING_MASK = 0x4, + HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE = 0x4, + HDMI_VP_STUFF_YCC422_STUFFING_DIRECT_MODE = 0x0, + HDMI_VP_STUFF_PP_STUFFING_MASK = 0x2, + HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE = 0x2, + HDMI_VP_STUFF_PP_STUFFING_DIRECT_MODE = 0x0, + HDMI_VP_STUFF_PR_STUFFING_MASK = 0x1, + HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE = 0x1, + HDMI_VP_STUFF_PR_STUFFING_DIRECT_MODE = 0x0, + +/* VP_CONF field values */ + HDMI_VP_CONF_BYPASS_EN_MASK = 0x40, + HDMI_VP_CONF_BYPASS_EN_ENABLE = 0x40, + HDMI_VP_CONF_BYPASS_EN_DISABLE = 0x00, + HDMI_VP_CONF_PP_EN_ENMASK = 0x20, + HDMI_VP_CONF_PP_EN_ENABLE = 0x20, + HDMI_VP_CONF_PP_EN_DISABLE = 0x00, + HDMI_VP_CONF_PR_EN_MASK = 0x10, + HDMI_VP_CONF_PR_EN_ENABLE = 0x10, + HDMI_VP_CONF_PR_EN_DISABLE = 0x00, + HDMI_VP_CONF_YCC422_EN_MASK = 0x8, + HDMI_VP_CONF_YCC422_EN_ENABLE = 0x8, + HDMI_VP_CONF_YCC422_EN_DISABLE = 0x0, + HDMI_VP_CONF_BYPASS_SELECT_MASK = 0x4, + HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER = 0x4, + HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER = 0x0, + HDMI_VP_CONF_OUTPUT_SELECTOR_MASK = 0x3, + HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS = 0x3, + HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422 = 0x1, + HDMI_VP_CONF_OUTPUT_SELECTOR_PP = 0x0, + +/* VP_REMAP field values */ + HDMI_VP_REMAP_MASK = 0x3, + HDMI_VP_REMAP_YCC422_24bit = 0x2, + HDMI_VP_REMAP_YCC422_20bit = 0x1, + HDMI_VP_REMAP_YCC422_16bit = 0x0, + +/* FC_INVIDCONF field values */ + HDMI_FC_INVIDCONF_HDCP_KEEPOUT_MASK = 0x80, + HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE = 0x80, + HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE = 0x00, + HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_MASK = 0x40, + HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH = 0x40, + HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW = 0x00, + HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_MASK = 0x20, + HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH = 0x20, + HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW = 0x00, + HDMI_FC_INVIDCONF_DE_IN_POLARITY_MASK = 0x10, + HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH = 0x10, + HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW = 0x00, + HDMI_FC_INVIDCONF_DVI_MODEZ_MASK = 0x8, + HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE = 0x8, + HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE = 0x0, + HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_MASK = 0x2, + HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH = 0x2, + HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW = 0x0, + HDMI_FC_INVIDCONF_IN_I_P_MASK = 0x1, + HDMI_FC_INVIDCONF_IN_I_P_INTERLACED = 0x1, + HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE = 0x0, + +/* FC_AUDICONF0 field values */ + HDMI_FC_AUDICONF0_CC_OFFSET = 4, + HDMI_FC_AUDICONF0_CC_MASK = 0x70, + HDMI_FC_AUDICONF0_CT_OFFSET = 0, + HDMI_FC_AUDICONF0_CT_MASK = 0xF, + +/* FC_AUDICONF1 field values */ + HDMI_FC_AUDICONF1_SS_OFFSET = 3, + HDMI_FC_AUDICONF1_SS_MASK = 0x18, + HDMI_FC_AUDICONF1_SF_OFFSET = 0, + HDMI_FC_AUDICONF1_SF_MASK = 0x7, + +/* FC_AUDICONF3 field values */ + HDMI_FC_AUDICONF3_LFEPBL_OFFSET = 5, + HDMI_FC_AUDICONF3_LFEPBL_MASK = 0x60, + HDMI_FC_AUDICONF3_DM_INH_OFFSET = 4, + HDMI_FC_AUDICONF3_DM_INH_MASK = 0x10, + HDMI_FC_AUDICONF3_LSV_OFFSET = 0, + HDMI_FC_AUDICONF3_LSV_MASK = 0xF, + +/* FC_AUDSCHNLS0 field values */ + HDMI_FC_AUDSCHNLS0_CGMSA_OFFSET = 4, + HDMI_FC_AUDSCHNLS0_CGMSA_MASK = 0x30, + HDMI_FC_AUDSCHNLS0_COPYRIGHT_OFFSET = 0, + HDMI_FC_AUDSCHNLS0_COPYRIGHT_MASK = 0x01, + +/* FC_AUDSCHNLS3-6 field values */ + HDMI_FC_AUDSCHNLS3_OIEC_CH0_OFFSET = 0, + HDMI_FC_AUDSCHNLS3_OIEC_CH0_MASK = 0x0f, + HDMI_FC_AUDSCHNLS3_OIEC_CH1_OFFSET = 4, + HDMI_FC_AUDSCHNLS3_OIEC_CH1_MASK = 0xf0, + HDMI_FC_AUDSCHNLS4_OIEC_CH2_OFFSET = 0, + HDMI_FC_AUDSCHNLS4_OIEC_CH2_MASK = 0x0f, + HDMI_FC_AUDSCHNLS4_OIEC_CH3_OFFSET = 4, + HDMI_FC_AUDSCHNLS4_OIEC_CH3_MASK = 0xf0, + + HDMI_FC_AUDSCHNLS5_OIEC_CH0_OFFSET = 0, + HDMI_FC_AUDSCHNLS5_OIEC_CH0_MASK = 0x0f, + HDMI_FC_AUDSCHNLS5_OIEC_CH1_OFFSET = 4, + HDMI_FC_AUDSCHNLS5_OIEC_CH1_MASK = 0xf0, + HDMI_FC_AUDSCHNLS6_OIEC_CH2_OFFSET = 0, + HDMI_FC_AUDSCHNLS6_OIEC_CH2_MASK = 0x0f, + HDMI_FC_AUDSCHNLS6_OIEC_CH3_OFFSET = 4, + HDMI_FC_AUDSCHNLS6_OIEC_CH3_MASK = 0xf0, + +/* HDMI_FC_AUDSCHNLS7 field values */ + HDMI_FC_AUDSCHNLS7_ACCURACY_OFFSET = 4, + HDMI_FC_AUDSCHNLS7_ACCURACY_MASK = 0x30, + +/* HDMI_FC_AUDSCHNLS8 field values */ + HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_MASK = 0xf0, + HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_OFFSET = 4, + HDMI_FC_AUDSCHNLS8_WORDLEGNTH_MASK = 0x0f, + HDMI_FC_AUDSCHNLS8_WORDLEGNTH_OFFSET = 0, + +/* FC_AUDSCONF field values */ + HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_MASK = 0xF0, + HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_OFFSET = 4, + HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK = 0x1, + HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_OFFSET = 0, + HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1 = 0x1, + HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0 = 0x0, + +/* FC_STAT2 field values */ + HDMI_FC_STAT2_OVERFLOW_MASK = 0x03, + HDMI_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02, + HDMI_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01, + +/* FC_INT2 field values */ + HDMI_FC_INT2_OVERFLOW_MASK = 0x03, + HDMI_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02, + HDMI_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01, + +/* FC_MASK2 field values */ + HDMI_FC_MASK2_OVERFLOW_MASK = 0x03, + HDMI_FC_MASK2_LOW_PRIORITY_OVERFLOW = 0x02, + HDMI_FC_MASK2_HIGH_PRIORITY_OVERFLOW = 0x01, + +/* FC_PRCONF field values */ + HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK = 0xF0, + HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET = 4, + HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F, + HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0, + +/* FC_AVICONF0-FC_AVICONF3 field values */ + HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03, + HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00, + HDMI_FC_AVICONF0_PIX_FMT_YCBCR422 = 0x01, + HDMI_FC_AVICONF0_PIX_FMT_YCBCR444 = 0x02, + HDMI_FC_AVICONF0_ACTIVE_FMT_MASK = 0x40, + HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT = 0x40, + HDMI_FC_AVICONF0_ACTIVE_FMT_NO_INFO = 0x00, + HDMI_FC_AVICONF0_BAR_DATA_MASK = 0x0C, + HDMI_FC_AVICONF0_BAR_DATA_NO_DATA = 0x00, + HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR = 0x04, + HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR = 0x08, + HDMI_FC_AVICONF0_BAR_DATA_VERT_HORIZ_BAR = 0x0C, + HDMI_FC_AVICONF0_SCAN_INFO_MASK = 0x30, + HDMI_FC_AVICONF0_SCAN_INFO_OVERSCAN = 0x10, + HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN = 0x20, + HDMI_FC_AVICONF0_SCAN_INFO_NODATA = 0x00, + + HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_MASK = 0x0F, + HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_USE_CODED = 0x08, + HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3 = 0x09, + HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9 = 0x0A, + HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_14_9 = 0x0B, + HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_MASK = 0x30, + HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_NO_DATA = 0x00, + HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3 = 0x10, + HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9 = 0x20, + HDMI_FC_AVICONF1_COLORIMETRY_MASK = 0xC0, + HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA = 0x00, + HDMI_FC_AVICONF1_COLORIMETRY_SMPTE = 0x40, + HDMI_FC_AVICONF1_COLORIMETRY_ITUR = 0x80, + HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO = 0xC0, + + HDMI_FC_AVICONF2_SCALING_MASK = 0x03, + HDMI_FC_AVICONF2_SCALING_NONE = 0x00, + HDMI_FC_AVICONF2_SCALING_HORIZ = 0x01, + HDMI_FC_AVICONF2_SCALING_VERT = 0x02, + HDMI_FC_AVICONF2_SCALING_HORIZ_VERT = 0x03, + HDMI_FC_AVICONF2_RGB_QUANT_MASK = 0x0C, + HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT = 0x00, + HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE = 0x04, + HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE = 0x08, + HDMI_FC_AVICONF2_EXT_COLORIMETRY_MASK = 0x70, + HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601 = 0x00, + HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709 = 0x10, + HDMI_FC_AVICONF2_EXT_COLORIMETRY_SYCC601 = 0x20, + HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_YCC601 = 0x30, + HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_RGB = 0x40, + HDMI_FC_AVICONF2_IT_CONTENT_MASK = 0x80, + HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA = 0x00, + HDMI_FC_AVICONF2_IT_CONTENT_VALID = 0x80, + + HDMI_FC_AVICONF3_IT_CONTENT_TYPE_MASK = 0x03, + HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS = 0x00, + HDMI_FC_AVICONF3_IT_CONTENT_TYPE_PHOTO = 0x01, + HDMI_FC_AVICONF3_IT_CONTENT_TYPE_CINEMA = 0x02, + HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GAME = 0x03, + HDMI_FC_AVICONF3_QUANT_RANGE_MASK = 0x0C, + HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED = 0x00, + HDMI_FC_AVICONF3_QUANT_RANGE_FULL = 0x04, + +/* FC_DBGFORCE field values */ + HDMI_FC_DBGFORCE_FORCEAUDIO = 0x10, + HDMI_FC_DBGFORCE_FORCEVIDEO = 0x1, + +/* PHY_CONF0 field values */ + HDMI_PHY_CONF0_PDZ_MASK = 0x80, + HDMI_PHY_CONF0_PDZ_OFFSET = 7, + HDMI_PHY_CONF0_ENTMDS_MASK = 0x40, + HDMI_PHY_CONF0_ENTMDS_OFFSET = 6, + HDMI_PHY_CONF0_SPARECTRL = 0x20, + HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10, + HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4, + HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8, + HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET = 3, + HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK = 0x4, + HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET = 2, + HDMI_PHY_CONF0_SELDATAENPOL_MASK = 0x2, + HDMI_PHY_CONF0_SELDATAENPOL_OFFSET = 1, + HDMI_PHY_CONF0_SELDIPIF_MASK = 0x1, + HDMI_PHY_CONF0_SELDIPIF_OFFSET = 0, + +/* PHY_TST0 field values */ + HDMI_PHY_TST0_TSTCLR_MASK = 0x20, + HDMI_PHY_TST0_TSTCLR_OFFSET = 5, + HDMI_PHY_TST0_TSTEN_MASK = 0x10, + HDMI_PHY_TST0_TSTEN_OFFSET = 4, + HDMI_PHY_TST0_TSTCLK_MASK = 0x1, + HDMI_PHY_TST0_TSTCLK_OFFSET = 0, + +/* PHY_STAT0 field values */ + HDMI_PHY_RX_SENSE3 = 0x80, + HDMI_PHY_RX_SENSE2 = 0x40, + HDMI_PHY_RX_SENSE1 = 0x20, + HDMI_PHY_RX_SENSE0 = 0x10, + HDMI_PHY_HPD = 0x02, + HDMI_PHY_TX_PHY_LOCK = 0x01, + +/* PHY_I2CM_SLAVE_ADDR field values */ + HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2 = 0x69, + HDMI_PHY_I2CM_SLAVE_ADDR_HEAC_PHY = 0x49, + +/* PHY_I2CM_OPERATION_ADDR field values */ + HDMI_PHY_I2CM_OPERATION_ADDR_WRITE = 0x10, + HDMI_PHY_I2CM_OPERATION_ADDR_READ = 0x1, + +/* HDMI_PHY_I2CM_INT_ADDR */ + HDMI_PHY_I2CM_INT_ADDR_DONE_POL = 0x08, + HDMI_PHY_I2CM_INT_ADDR_DONE_MASK = 0x04, + +/* HDMI_PHY_I2CM_CTLINT_ADDR */ + HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL = 0x80, + HDMI_PHY_I2CM_CTLINT_ADDR_NAC_MASK = 0x40, + HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08, + HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04, + +/* AUD_CTS3 field values */ + HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5, + HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0, + HDMI_AUD_CTS3_N_SHIFT_1 = 0, + HDMI_AUD_CTS3_N_SHIFT_16 = 0x20, + HDMI_AUD_CTS3_N_SHIFT_32 = 0x40, + HDMI_AUD_CTS3_N_SHIFT_64 = 0x60, + HDMI_AUD_CTS3_N_SHIFT_128 = 0x80, + HDMI_AUD_CTS3_N_SHIFT_256 = 0xa0, + /* note that the CTS3 MANUAL bit has been removed + from our part. Can't set it, will read as 0. */ + HDMI_AUD_CTS3_CTS_MANUAL = 0x10, + HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f, + +/* AHB_DMA_CONF0 field values */ + HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7, + HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80, + HDMI_AHB_DMA_CONF0_HBR = 0x10, + HDMI_AHB_DMA_CONF0_EN_HLOCK_OFFSET = 3, + HDMI_AHB_DMA_CONF0_EN_HLOCK_MASK = 0x08, + HDMI_AHB_DMA_CONF0_INCR_TYPE_OFFSET = 1, + HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK = 0x06, + HDMI_AHB_DMA_CONF0_INCR4 = 0x0, + HDMI_AHB_DMA_CONF0_INCR8 = 0x2, + HDMI_AHB_DMA_CONF0_INCR16 = 0x4, + HDMI_AHB_DMA_CONF0_BURST_MODE = 0x1, + +/* HDMI_AHB_DMA_START field values */ + HDMI_AHB_DMA_START_START_OFFSET = 0, + HDMI_AHB_DMA_START_START_MASK = 0x01, + +/* HDMI_AHB_DMA_STOP field values */ + HDMI_AHB_DMA_STOP_STOP_OFFSET = 0, + HDMI_AHB_DMA_STOP_STOP_MASK = 0x01, + +/* AHB_DMA_STAT, AHB_DMA_INT, AHB_DMA_MASK, AHB_DMA_POL field values */ + HDMI_AHB_DMA_DONE = 0x80, + HDMI_AHB_DMA_RETRY_SPLIT = 0x40, + HDMI_AHB_DMA_LOSTOWNERSHIP = 0x20, + HDMI_AHB_DMA_ERROR = 0x10, + HDMI_AHB_DMA_FIFO_THREMPTY = 0x04, + HDMI_AHB_DMA_FIFO_FULL = 0x02, + HDMI_AHB_DMA_FIFO_EMPTY = 0x01, + +/* AHB_DMA_BUFFSTAT, AHB_DMA_BUFFINT,AHB_DMA_BUFFMASK,AHB_DMA_BUFFPOL values */ + HDMI_AHB_DMA_BUFFSTAT_FULL = 0x02, + HDMI_AHB_DMA_BUFFSTAT_EMPTY = 0x01, + +/* MC_CLKDIS field values */ + HDMI_MC_CLKDIS_HDCPCLK_DISABLE = 0x40, + HDMI_MC_CLKDIS_CECCLK_DISABLE = 0x20, + HDMI_MC_CLKDIS_CSCCLK_DISABLE = 0x10, + HDMI_MC_CLKDIS_AUDCLK_DISABLE = 0x8, + HDMI_MC_CLKDIS_PREPCLK_DISABLE = 0x4, + HDMI_MC_CLKDIS_TMDSCLK_DISABLE = 0x2, + HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1, + +/* MC_SWRSTZ field values */ + HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02, + +/* MC_FLOWCTRL field values */ + HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_MASK = 0x1, + HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH = 0x1, + HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS = 0x0, + +/* MC_PHYRSTZ field values */ + HDMI_MC_PHYRSTZ_ASSERT = 0x0, + HDMI_MC_PHYRSTZ_DEASSERT = 0x1, + +/* MC_HEACPHY_RST field values */ + HDMI_MC_HEACPHY_RST_ASSERT = 0x1, + HDMI_MC_HEACPHY_RST_DEASSERT = 0x0, + +/* CSC_CFG field values */ + HDMI_CSC_CFG_INTMODE_MASK = 0x30, + HDMI_CSC_CFG_INTMODE_OFFSET = 4, + HDMI_CSC_CFG_INTMODE_DISABLE = 0x00, + HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1 = 0x10, + HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA2 = 0x20, + HDMI_CSC_CFG_DECMODE_MASK = 0x3, + HDMI_CSC_CFG_DECMODE_OFFSET = 0, + HDMI_CSC_CFG_DECMODE_DISABLE = 0x0, + HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA1 = 0x1, + HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA2 = 0x2, + HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3 = 0x3, + +/* CSC_SCALE field values */ + HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK = 0xF0, + HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP = 0x00, + HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP = 0x50, + HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP = 0x60, + HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP = 0x70, + HDMI_CSC_SCALE_CSCSCALE_MASK = 0x03, + +/* A_HDCPCFG0 field values */ + HDMI_A_HDCPCFG0_ELVENA_MASK = 0x80, + HDMI_A_HDCPCFG0_ELVENA_ENABLE = 0x80, + HDMI_A_HDCPCFG0_ELVENA_DISABLE = 0x00, + HDMI_A_HDCPCFG0_I2CFASTMODE_MASK = 0x40, + HDMI_A_HDCPCFG0_I2CFASTMODE_ENABLE = 0x40, + HDMI_A_HDCPCFG0_I2CFASTMODE_DISABLE = 0x00, + HDMI_A_HDCPCFG0_BYPENCRYPTION_MASK = 0x20, + HDMI_A_HDCPCFG0_BYPENCRYPTION_ENABLE = 0x20, + HDMI_A_HDCPCFG0_BYPENCRYPTION_DISABLE = 0x00, + HDMI_A_HDCPCFG0_SYNCRICHECK_MASK = 0x10, + HDMI_A_HDCPCFG0_SYNCRICHECK_ENABLE = 0x10, + HDMI_A_HDCPCFG0_SYNCRICHECK_DISABLE = 0x00, + HDMI_A_HDCPCFG0_AVMUTE_MASK = 0x8, + HDMI_A_HDCPCFG0_AVMUTE_ENABLE = 0x8, + HDMI_A_HDCPCFG0_AVMUTE_DISABLE = 0x0, + HDMI_A_HDCPCFG0_RXDETECT_MASK = 0x4, + HDMI_A_HDCPCFG0_RXDETECT_ENABLE = 0x4, + HDMI_A_HDCPCFG0_RXDETECT_DISABLE = 0x0, + HDMI_A_HDCPCFG0_EN11FEATURE_MASK = 0x2, + HDMI_A_HDCPCFG0_EN11FEATURE_ENABLE = 0x2, + HDMI_A_HDCPCFG0_EN11FEATURE_DISABLE = 0x0, + HDMI_A_HDCPCFG0_HDMIDVI_MASK = 0x1, + HDMI_A_HDCPCFG0_HDMIDVI_HDMI = 0x1, + HDMI_A_HDCPCFG0_HDMIDVI_DVI = 0x0, + +/* A_HDCPCFG1 field values */ + HDMI_A_HDCPCFG1_DISSHA1CHECK_MASK = 0x8, + HDMI_A_HDCPCFG1_DISSHA1CHECK_DISABLE = 0x8, + HDMI_A_HDCPCFG1_DISSHA1CHECK_ENABLE = 0x0, + HDMI_A_HDCPCFG1_PH2UPSHFTENC_MASK = 0x4, + HDMI_A_HDCPCFG1_PH2UPSHFTENC_ENABLE = 0x4, + HDMI_A_HDCPCFG1_PH2UPSHFTENC_DISABLE = 0x0, + HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK = 0x2, + HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE = 0x2, + HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_ENABLE = 0x0, + HDMI_A_HDCPCFG1_SWRESET_MASK = 0x1, + HDMI_A_HDCPCFG1_SWRESET_ASSERT = 0x0, + +/* A_VIDPOLCFG field values */ + HDMI_A_VIDPOLCFG_UNENCRYPTCONF_MASK = 0x60, + HDMI_A_VIDPOLCFG_UNENCRYPTCONF_OFFSET = 5, + HDMI_A_VIDPOLCFG_DATAENPOL_MASK = 0x10, + HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH = 0x10, + HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW = 0x0, + HDMI_A_VIDPOLCFG_VSYNCPOL_MASK = 0x8, + HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_HIGH = 0x8, + HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_LOW = 0x0, + HDMI_A_VIDPOLCFG_HSYNCPOL_MASK = 0x2, + HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2, + HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0, +}; +#endif /* __IMX_HDMI_H__ */ diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c index 654bf03e05ff8b..7e593296ac4728 100644 --- a/drivers/staging/imx-drm/imx-ldb.c +++ b/drivers/staging/imx-drm/imx-ldb.c @@ -167,9 +167,8 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, /* set display clock mux to LDB input clock */ ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk[chno]); - if (ret) { + if (ret) dev_err(ldb->dev, "unable to set di%d parent clock to ldb_di%d\n", mux, chno); - } } static void imx_ldb_encoder_prepare(struct drm_encoder *encoder) @@ -414,7 +413,7 @@ enum { LVDS_BIT_MAP_JEIDA }; -static const char *imx_ldb_bit_mappings[] = { +static const char * const imx_ldb_bit_mappings[] = { [LVDS_BIT_MAP_SPWG] = "spwg", [LVDS_BIT_MAP_JEIDA] = "jeida", }; diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 2c44fef8d58b32..9abc7ca8b6cf78 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c @@ -560,7 +560,7 @@ static const char *imx_tve_modes[] = { [TVE_MODE_VGA] = "vga", }; -const int of_get_tve_mode(struct device_node *np) +static const int of_get_tve_mode(struct device_node *np) { const char *bm; int ret, i; diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c index 97ca6924dbb3fc..ca85d3d70ae3f4 100644 --- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c +++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c index ce6ba987ec91b0..22be104fbda975 100644 --- a/drivers/staging/imx-drm/ipuv3-crtc.c +++ b/drivers/staging/imx-drm/ipuv3-crtc.c @@ -218,7 +218,8 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id) if (ipu_crtc->newfb) { ipu_crtc->newfb = NULL; - ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb, 0, 0); + ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb, + ipu_crtc->plane[0]->x, ipu_crtc->plane[0]->y); ipu_crtc_handle_pageflip(ipu_crtc); } diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c index d97454a0dffd94..34b642a12f8bb2 100644 --- a/drivers/staging/imx-drm/ipuv3-plane.c +++ b/drivers/staging/imx-drm/ipuv3-plane.c @@ -64,6 +64,7 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, { struct ipu_ch_param __iomem *cpmem; struct drm_gem_cma_object *cma_obj; + unsigned long eba; cma_obj = drm_fb_cma_get_gem_obj(fb, 0); if (!cma_obj) { @@ -76,8 +77,15 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, cpmem = ipu_get_cpmem(ipu_plane->ipu_ch); ipu_cpmem_set_stride(cpmem, fb->pitches[0]); - ipu_cpmem_set_buffer(cpmem, 0, cma_obj->paddr + fb->offsets[0] + - fb->pitches[0] * y + x); + + eba = cma_obj->paddr + fb->offsets[0] + + fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; + ipu_cpmem_set_buffer(cpmem, 0, eba); + ipu_cpmem_set_buffer(cpmem, 1, eba); + + /* cache offsets for subsequent pageflips */ + ipu_plane->x = x; + ipu_plane->y = y; return 0; } diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c index 24aa9beedcfb6c..351d61dede0024 100644 --- a/drivers/staging/imx-drm/parallel-display.c +++ b/drivers/staging/imx-drm/parallel-display.c @@ -23,6 +23,7 @@ #include #include #include +#include